diff --git a/docs/_snippets/_users-and-roles-common.md b/docs/_snippets/_users-and-roles-common.md
index 29726229be9..964f9a8b40d 100644
--- a/docs/_snippets/_users-and-roles-common.md
+++ b/docs/_snippets/_users-and-roles-common.md
@@ -42,64 +42,73 @@ Create these tables and users to be used in the examples.
#### Creating a sample database, table, and rows {#creating-a-sample-database-table-and-rows}
-1. Create a test database
+
- ```sql
- CREATE DATABASE db1;
- ```
+##### Create a test database {#create-a-test-database}
-2. Create a table
+```sql
+CREATE DATABASE db1;
+```
- ```sql
- CREATE TABLE db1.table1 (
- id UInt64,
- column1 String,
- column2 String
- )
- ENGINE MergeTree
- ORDER BY id;
- ```
+##### Create a table {#create-a-table}
-3. Populate the table with sample rows
+```sql
+CREATE TABLE db1.table1 (
+ id UInt64,
+ column1 String,
+ column2 String
+)
+ENGINE MergeTree
+ORDER BY id;
+```
- ```sql
- INSERT INTO db1.table1
- (id, column1, column2)
- VALUES
- (1, 'A', 'abc'),
- (2, 'A', 'def'),
- (3, 'B', 'abc'),
- (4, 'B', 'def');
- ```
+##### Populate the table with sample rows {#populate}
-4. Verify the table:
+```sql
+INSERT INTO db1.table1
+ (id, column1, column2)
+VALUES
+ (1, 'A', 'abc'),
+ (2, 'A', 'def'),
+ (3, 'B', 'abc'),
+ (4, 'B', 'def');
+```
- ```sql
- SELECT *
- FROM db1.table1
- ```
+##### Verify the table {#verify}
- ```response
- Query id: 475015cc-6f51-4b20-bda2-3c9c41404e49
+```sql title="Query"
+SELECT *
+FROM db1.table1
+```
- ┌─id─┬─column1─┬─column2─┐
- │ 1 │ A │ abc │
- │ 2 │ A │ def │
- │ 3 │ B │ abc │
- │ 4 │ B │ def │
- └────┴─────────┴─────────┘
- ```
+```response title="Response"
+Query id: 475015cc-6f51-4b20-bda2-3c9c41404e49
-5. Create a regular user that will be used to demonstrate restrict access to certain columns:
+┌─id─┬─column1─┬─column2─┐
+│ 1 │ A │ abc │
+│ 2 │ A │ def │
+│ 3 │ B │ abc │
+│ 4 │ B │ def │
+└────┴─────────┴─────────┘
+```
- ```sql
- CREATE USER column_user IDENTIFIED BY 'password';
- ```
+##### Create `column_user` {#create-a-user-with-restricted-access-to-columns}
-6. Create a regular user that will be used to demonstrate restricting access to rows with certain values:
- ```sql
- CREATE USER row_user IDENTIFIED BY 'password';
- ```
+Create a regular user that will be used to demonstrate restrict access to certain columns:
+
+```sql
+CREATE USER column_user IDENTIFIED BY 'password';
+```
+
+##### Create `row_user` {#create-a-user-with-restricted-access-to-rows-with-certain-values}
+
+Create a regular user that will be used to demonstrate restricting access to rows with certain values:
+
+```sql
+CREATE USER row_user IDENTIFIED BY 'password';
+```
+
+
#### Creating roles {#creating-roles}
diff --git a/docs/best-practices/_snippets/_table_of_contents.md b/docs/best-practices/_snippets/_table_of_contents.md
new file mode 100644
index 00000000000..9e0d34ef2d1
--- /dev/null
+++ b/docs/best-practices/_snippets/_table_of_contents.md
@@ -0,0 +1,12 @@
+| Page | Description |
+|--------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------|
+| [Choosing a Primary Key](/best-practices/choosing-a-primary-key) | How to select primary keys that maximize query performance and minimize storage overhead. |
+| [Select Data Types](/best-practices/select-data-types) | Choose optimal data types to reduce memory usage, improve compression, and accelerate queries. |
+| [Use Materialized Views](/best-practices/use-materialized-views) | Leverage materialized views to pre-aggregate data and dramatically speed up analytical queries. |
+| [Minimize and Optimize JOINs](/best-practices/minimize-optimize-joins) | Best practices for using ClickHouse's `JOIN` capabilities efficiently. |
+| [Choosing a Partitioning Key](/best-practices/choosing-a-partitioning-key) | Select partitioning strategies that enable efficient data pruning and faster query execution. |
+| [Selecting an Insert Strategy](/best-practices/selecting-an-insert-strategy) | Optimize data ingestion throughput and reduce resource consumption with proper insert patterns. |
+| [Data Skipping Indices](/best-practices/use-data-skipping-indices-where-appropriate) | Apply secondary indices strategically to skip irrelevant data blocks and accelerate filtered queries. |
+| [Avoid Mutations](/best-practices/avoid-mutations) | Design schemas and workflows that eliminate costly `UPDATE`/`DELETE` operations for better performance. |
+| [Avoid OPTIMIZE FINAL](/best-practices/avoid-optimize-final) | Prevent performance bottlenecks by understanding when `OPTIMIZE FINAL` hurts more than it helps. |
+| [Use JSON where appropriate](/best-practices/use-json-where-appropriate) | Balance flexibility and performance when working with semi-structured JSON data in ClickHouse. |
\ No newline at end of file
diff --git a/docs/best-practices/index.md b/docs/best-practices/index.md
index 5a3ae78ab5f..b4721106510 100644
--- a/docs/best-practices/index.md
+++ b/docs/best-practices/index.md
@@ -6,19 +6,10 @@ hide_title: true
description: 'Landing page for Best Practices section in ClickHouse'
---
+import TableOfContents from '@site/docs/best-practices/_snippets/_table_of_contents.md';
+
# Best Practices in ClickHouse {#best-practices-in-clickhouse}
This section provides the best practices you will want to follow to get the most out of ClickHouse.
-| Page | Description |
-|----------------------------------------------------------------------|--------------------------------------------------------------------------|
-| [Choosing a Primary Key](/best-practices/choosing-a-primary-key) | Guidance on selecting an effective Primary Key in ClickHouse. |
-| [Select Data Types](/best-practices/select-data-types) | Recommendations for choosing appropriate data types. |
-| [Use Materialized Views](/best-practices/use-materialized-views) | When and how to benefit from materialized views. |
-| [Minimize and Optimize JOINs](/best-practices/minimize-optimize-joins)| Best practices for minimizing and optimizing JOIN operations. |
-| [Choosing a Partitioning Key](/best-practices/choosing-a-partitioning-key) | How to choose and apply partitioning keys effectively. |
-| [Selecting an Insert Strategy](/best-practices/selecting-an-insert-strategy) | Strategies for efficient data insertion in ClickHouse. |
-| [Data Skipping Indices](/best-practices/use-data-skipping-indices-where-appropriate) | When to apply data skipping indices for performance gains. |
-| [Avoid Mutations](/best-practices/avoid-mutations) | Reasons to avoid mutations and how to design without them. |
-| [Avoid OPTIMIZE FINAL](/best-practices/avoid-optimize-final) | Why `OPTIMIZE FINAL` can be costly and how to work around it. |
-| [Use JSON where appropriate](/best-practices/use-json-where-appropriate) | Considerations for using JSON columns in ClickHouse. |
+
\ No newline at end of file
diff --git a/docs/cloud-index.md b/docs/cloud-index.md
deleted file mode 100644
index 911b6d139ff..00000000000
--- a/docs/cloud-index.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-slug: /cloud/overview
-keywords: ['AWS', 'Cloud', 'serverless']
-title: 'Overview'
-hide_title: true
-description: 'Overview page for Cloud'
----
-
-import Content from '@site/docs/about-us/cloud.md';
-
-
diff --git a/docs/cloud/_snippets/_security_table_of_contents.md b/docs/cloud/_snippets/_security_table_of_contents.md
new file mode 100644
index 00000000000..45aa2a68290
--- /dev/null
+++ b/docs/cloud/_snippets/_security_table_of_contents.md
@@ -0,0 +1,8 @@
+| Page | Description |
+|---------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------|
+| [Shared Responsibility Model](shared-responsibility-model.md) | Understand how security responsibilities are divided between ClickHouse Cloud and your organization for different service types. |
+| [Cloud Access Management](cloud-access-management/index.md) | Manage user access with authentication, single sign-on (SSO), role-based permissions, and team invitations. |
+| [Connectivity](connectivity-overview.md) | Configure secure network access including IP allow-lists, private networking, S3 data access, and Cloud IP address management. |
+| [Enhanced Encryption](cmek.md) | Learn about default AES 256 encryption and how to enable Transparent Data Encryption (TDE) for additional data protection at rest. |
+| [Audit Logging](audit-logging.md) | Set up and use audit logging to track and monitor activities in your ClickHouse Cloud environment. |
+| [Privacy and Compliance](privacy-compliance-overview.md) | Review security certifications, compliance standards, and learn how to manage your personal information and data rights. |
\ No newline at end of file
diff --git a/docs/cloud/manage/api/api-overview.md b/docs/cloud/api/api-overview.md
similarity index 98%
rename from docs/cloud/manage/api/api-overview.md
rename to docs/cloud/api/api-overview.md
index ab0484d0c5c..5e81632b5e8 100644
--- a/docs/cloud/manage/api/api-overview.md
+++ b/docs/cloud/api/api-overview.md
@@ -56,7 +56,8 @@ If your organization has been migrated to one of the [new pricing plans](https:/
You will now also be able to specify the `num_replicas` field as a property of the service resource.
:::
-## Terraform and OpenAPI New Pricing: Replica Settings Explained
+## Terraform and OpenAPI New Pricing: Replica Settings Explained {#terraform-and-openapi-new-pricing---replica-settings-explained}
+
The number of replicas each service will be created with defaults to 3 for the Scale and Enterprise tiers, while it defaults to 1 for the Basic tier.
For the Scale and the Enterprise tiers it is possible to adjust it by passing a `numReplicas` field in the service creation request.
The value of the `numReplicas` field must be between 2 and 20 for the first service in a warehouse. Services that are created in an existing warehouse can have a number of replicas as low as 1.
diff --git a/docs/cloud/manage/api/index.md b/docs/cloud/api/index.md
similarity index 100%
rename from docs/cloud/manage/api/index.md
rename to docs/cloud/api/index.md
diff --git a/docs/cloud/manage/openapi.md b/docs/cloud/api/openapi.md
similarity index 100%
rename from docs/cloud/manage/openapi.md
rename to docs/cloud/api/openapi.md
diff --git a/docs/cloud/manage/postman.md b/docs/cloud/api/postman.md
similarity index 100%
rename from docs/cloud/manage/postman.md
rename to docs/cloud/api/postman.md
diff --git a/docs/cloud/bestpractices/index.md b/docs/cloud/bestpractices/index.md
deleted file mode 100644
index 550f2901bc4..00000000000
--- a/docs/cloud/bestpractices/index.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-slug: /cloud/bestpractices
-keywords: ['Cloud', 'Best Practices', 'Bulk Inserts', 'Asynchronous Inserts', 'Avoid mutations', 'Avoid nullable columns', 'Avoid Optimize Final', 'Low Cardinality Partitioning Key', 'Multi Tenancy', 'Usage Limits']
-title: 'Overview'
-hide_title: true
-description: 'Landing page for Best Practices section in ClickHouse Cloud'
----
-
-# Best Practices in ClickHouse Cloud {#best-practices-in-clickhouse-cloud}
-
-This section provides best practices you will want to follow to get the most out of ClickHouse Cloud.
-
-| Page | Description |
-|----------------------------------------------------------|----------------------------------------------------------------------------|
-| [Usage Limits](/cloud/bestpractices/usage-limits)| Explore the limits of ClickHouse. |
-| [Multi tenancy](/cloud/bestpractices/multi-tenancy)| Learn about different strategies to implement multi-tenancy. |
-
-These are in addition to the standard best practices which apply to all deployments of ClickHouse.
-
-| Page | Description |
-|----------------------------------------------------------------------|--------------------------------------------------------------------------|
-| [Choosing a Primary Key](/best-practices/choosing-a-primary-key) | Guidance on selecting an effective Primary Key in ClickHouse. |
-| [Select Data Types](/best-practices/select-data-types) | Recommendations for choosing appropriate data types. |
-| [Use Materialized Views](/best-practices/use-materialized-views) | When and how to benefit from materialized views. |
-| [Minimize and Optimize JOINs](/best-practices/minimize-optimize-joins)| Best practices for minimizing and optimizing JOIN operations. |
-| [Choosing a Partitioning Key](/best-practices/choosing-a-partitioning-key) | How to choose and apply partitioning keys effectively. |
-| [Selecting an Insert Strategy](/best-practices/selecting-an-insert-strategy) | Strategies for efficient data insertion in ClickHouse. |
-| [Data Skipping Indices](/best-practices/use-data-skipping-indices-where-appropriate) | When to apply data skipping indices for performance gains. |
-| [Avoid Mutations](/best-practices/avoid-mutations) | Reasons to avoid mutations and how to design without them. |
-| [Avoid `OPTIMIZE FINAL`](/best-practices/avoid-optimize-final) | Why `OPTIMIZE FINAL` can be costly and how to work around it. |
-| [Use JSON where appropriate](/best-practices/use-json-where-appropriate) | Considerations for using JSON columns in ClickHouse. |
diff --git a/docs/cloud/bestpractices/usagelimits.md b/docs/cloud/bestpractices/usagelimits.md
deleted file mode 100644
index 37ab67b542c..00000000000
--- a/docs/cloud/bestpractices/usagelimits.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-slug: /cloud/bestpractices/usage-limits
-sidebar_label: 'Usage Limits'
-title: 'Usage limits'
-description: 'Describes the recommended usage limits in ClickHouse Cloud'
----
-
-While ClickHouse is known for its speed and reliability, optimal performance is achieved within certain operating parameters. For example, having too many tables, databases or parts could negatively impact performance. To avoid this, Clickhouse Cloud has guardrails set up for several types of items. You can find details of these guardrails below.
-
-:::tip
-If you've run up against one of these guardrails, it's possible that you are implementing your use case in an unoptimized way. Contact our support team and we will gladly help you refine your use case to avoid exceeding the guardrails or look together at how we can increase them in a controlled manner.
-:::
-
-| Dimension | Limit |
-|-----------|-------|
-|**Databases**| 1000|
-|**Tables**| 5000|
-|**Columns**| ∼1000 (wide format is preferred to compact)|
-|**Partitions**| 50k|
-|**Parts**| 100k across the entire instance|
-|**Part size**| 150gb|
-|**Services per organization**| 20 (soft)|
-|**Services per warehouse**| 5 (soft)|
-|**Low cardinality**| 10k or less|
-|**Primary keys in a table**| 4-5 that sufficiently filter down the data|
-|**Query concurrency**| 1000|
-|**Batch ingest**| anything > 1M will be split by the system in 1M row blocks|
-
-:::note
-For Single Replica Services, the maximum number of databases is restricted to 100, and the maximum number of tables is restricted to 500. In addition, storage for Basic Tier Services is limited to 1 TB.
-:::
diff --git a/docs/cloud/manage/scaling.md b/docs/cloud/features/01_automatic_scaling.md
similarity index 100%
rename from docs/cloud/manage/scaling.md
rename to docs/cloud/features/01_automatic_scaling.md
diff --git a/docs/cloud/get-started/sql-console.md b/docs/cloud/features/01_cloud_console_features/01_sql-console.md
similarity index 100%
rename from docs/cloud/get-started/sql-console.md
rename to docs/cloud/features/01_cloud_console_features/01_sql-console.md
diff --git a/docs/cloud/get-started/query-insights.md b/docs/cloud/features/01_cloud_console_features/02_query-insights.md
similarity index 100%
rename from docs/cloud/get-started/query-insights.md
rename to docs/cloud/features/01_cloud_console_features/02_query-insights.md
diff --git a/docs/cloud/get-started/query-endpoints.md b/docs/cloud/features/01_cloud_console_features/03_query-endpoints.md
similarity index 100%
rename from docs/cloud/get-started/query-endpoints.md
rename to docs/cloud/features/01_cloud_console_features/03_query-endpoints.md
diff --git a/docs/cloud/manage/dashboards.md b/docs/cloud/features/01_cloud_console_features/04_dashboards.md
similarity index 100%
rename from docs/cloud/manage/dashboards.md
rename to docs/cloud/features/01_cloud_console_features/04_dashboards.md
diff --git a/docs/cloud/features/01_cloud_console_features/_category_.json b/docs/cloud/features/01_cloud_console_features/_category_.json
new file mode 100644
index 00000000000..85ba09bce82
--- /dev/null
+++ b/docs/cloud/features/01_cloud_console_features/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Cloud console",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/features/02_infrastructure_and_deploy/_category_.json b/docs/cloud/features/02_infrastructure_and_deploy/_category_.json
new file mode 100644
index 00000000000..3e6367dd545
--- /dev/null
+++ b/docs/cloud/features/02_infrastructure_and_deploy/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Infrastructure and deploy",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/reference/byoc.md b/docs/cloud/features/02_infrastructure_and_deploy/byoc.md
similarity index 100%
rename from docs/cloud/reference/byoc.md
rename to docs/cloud/features/02_infrastructure_and_deploy/byoc.md
diff --git a/docs/cloud/reference/shared-catalog.md b/docs/cloud/features/02_infrastructure_and_deploy/shared-catalog.md
similarity index 100%
rename from docs/cloud/reference/shared-catalog.md
rename to docs/cloud/features/02_infrastructure_and_deploy/shared-catalog.md
diff --git a/docs/cloud/reference/shared-merge-tree.md b/docs/cloud/features/02_infrastructure_and_deploy/shared-merge-tree.md
similarity index 100%
rename from docs/cloud/reference/shared-merge-tree.md
rename to docs/cloud/features/02_infrastructure_and_deploy/shared-merge-tree.md
diff --git a/docs/cloud/reference/warehouses.md b/docs/cloud/features/02_infrastructure_and_deploy/warehouses.md
similarity index 100%
rename from docs/cloud/reference/warehouses.md
rename to docs/cloud/features/02_infrastructure_and_deploy/warehouses.md
diff --git a/docs/cloud/features/03_monitoring/_category_.json b/docs/cloud/features/03_monitoring/_category_.json
new file mode 100644
index 00000000000..ef0bd973e2c
--- /dev/null
+++ b/docs/cloud/features/03_monitoring/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Monitoring",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/manage/monitoring/advanced_dashboard.md b/docs/cloud/features/03_monitoring/advanced_dashboard.md
similarity index 99%
rename from docs/cloud/manage/monitoring/advanced_dashboard.md
rename to docs/cloud/features/03_monitoring/advanced_dashboard.md
index ab320eb6ebe..578a412846e 100644
--- a/docs/cloud/manage/monitoring/advanced_dashboard.md
+++ b/docs/cloud/features/03_monitoring/advanced_dashboard.md
@@ -110,7 +110,7 @@ interface can help detect issues.
| Network receive bytes/sec | Tracks the current speed of outbound network traffic |
| Concurrent network connections | Tracks the number of current concurrent network connections |
-## Identifying issues with the Advanced dashboard {#identifying-issues-with-the-advanced-dashboard}
+## Identifying issues using the advanced dashboard {#identifying-issues-with-the-advanced-dashboard}
Having this real-time view of the health of your ClickHouse service greatly helps
mitigate issues before they impact your business or help solve them. Below are a
diff --git a/docs/cloud/manage/monitoring/prometheus.md b/docs/cloud/features/03_monitoring/prometheus.md
similarity index 100%
rename from docs/cloud/manage/monitoring/prometheus.md
rename to docs/cloud/features/03_monitoring/prometheus.md
diff --git a/docs/cloud/security/shared-responsibility-model.md b/docs/cloud/features/04_security/01_shared-responsibility-model.md
similarity index 100%
rename from docs/cloud/security/shared-responsibility-model.md
rename to docs/cloud/features/04_security/01_shared-responsibility-model.md
diff --git a/docs/cloud/features/04_security/_category_.json b/docs/cloud/features/04_security/_category_.json
new file mode 100644
index 00000000000..aed26fa7f7a
--- /dev/null
+++ b/docs/cloud/features/04_security/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Security",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/security/cloud-access-management/cloud-access-management.md b/docs/cloud/features/04_security/cloud-access-management/cloud-access-management.md
similarity index 76%
rename from docs/cloud/security/cloud-access-management/cloud-access-management.md
rename to docs/cloud/features/04_security/cloud-access-management/cloud-access-management.md
index b0794fccf84..cfab1faad61 100644
--- a/docs/cloud/security/cloud-access-management/cloud-access-management.md
+++ b/docs/cloud/features/04_security/cloud-access-management/cloud-access-management.md
@@ -32,23 +32,31 @@ Users must be assigned an organization level role and may optionally be assigned
| SQL console | Custom | Configure using SQL [`GRANT`](/sql-reference/statements/grant) statement; assign the role to a SQL console user by naming the role after the user |
To create a custom role for a SQL console user and grant it a general role, run the following commands. The email address must match the user's email address in the console.
+
+
+
+#### Create `database_developer` and grant permissions {#create-database_developer-and-grant-permissions}
+
+Create the `database_developer` role and grant `SHOW`, `CREATE`, `ALTER`, and `DELETE` permissions.
-1. Create the database_developer role and grant `SHOW`, `CREATE`, `ALTER`, and `DELETE` permissions.
-
- ```sql
- CREATE ROLE OR REPLACE database_developer;
- GRANT SHOW ON * TO database_developer;
- GRANT CREATE ON * TO database_developer;
- GRANT ALTER ON * TO database_developer;
- GRANT DELETE ON * TO database_developer;
- ```
-
-2. Create a role for the SQL console user my.user@domain.com and assign it the database_developer role.
+```sql
+CREATE ROLE OR REPLACE database_developer;
+GRANT SHOW ON * TO database_developer;
+GRANT CREATE ON * TO database_developer;
+GRANT ALTER ON * TO database_developer;
+GRANT DELETE ON * TO database_developer;
+```
+
+#### Create SQL console user role {#create-sql-console-user-role}
+
+Create a role for the SQL console user my.user@domain.com and assign it the database_developer role.
- ```sql
- CREATE ROLE OR REPLACE `sql-console-role:my.user@domain.com`;
- GRANT database_developer TO `sql-console-role:my.user@domain.com`;
- ```
+```sql
+CREATE ROLE OR REPLACE `sql-console-role:my.user@domain.com`;
+GRANT database_developer TO `sql-console-role:my.user@domain.com`;
+```
+
+
### SQL console passwordless authentication {#sql-console-passwordless-authentication}
SQL console users are created for each session and authenticated using X.509 certificates that are automatically rotated. The user is removed when the session is terminated. When generating access lists for audits, please navigate to the Settings tab for the service in the console and note the SQL console access in addition to the database users that exist in the database. If custom roles are configured, the user's access is listed in the role ending with the user's username.
@@ -88,38 +96,46 @@ Users can use a SHA256 hash generator or code function such as `hashlib` in Pyth
### Database access listings with SQL console users {#database-access-listings-with-sql-console-users}
The following process can be used to generate a complete access listing across the SQL console and databases in your organization.
-1. Run the following queries to get a list of all grants in the database.
-
- ```sql
- SELECT grants.user_name,
- grants.role_name,
- users.name AS role_member,
- grants.access_type,
- grants.database,
- grants.table
- FROM system.grants LEFT OUTER JOIN system.role_grants ON grants.role_name = role_grants.granted_role_name
- LEFT OUTER JOIN system.users ON role_grants.user_name = users.name
-
- UNION ALL
-
- SELECT grants.user_name,
- grants.role_name,
- role_grants.role_name AS role_member,
- grants.access_type,
- grants.database,
- grants.table
- FROM system.role_grants LEFT OUTER JOIN system.grants ON role_grants.granted_role_name = grants.role_name
- WHERE role_grants.user_name is null;
- ```
-
-2. Associate this list to Console users with access to SQL console.
+
+
+#### Get a list of all database grants {#get-a-list-of-all-database-grants}
+
+Run the following queries to get a list of all grants in the database.
+
+```sql
+SELECT grants.user_name,
+grants.role_name,
+users.name AS role_member,
+grants.access_type,
+grants.database,
+grants.table
+FROM system.grants LEFT OUTER JOIN system.role_grants ON grants.role_name = role_grants.granted_role_name
+LEFT OUTER JOIN system.users ON role_grants.user_name = users.name
+
+UNION ALL
+
+SELECT grants.user_name,
+grants.role_name,
+role_grants.role_name AS role_member,
+grants.access_type,
+grants.database,
+grants.table
+FROM system.role_grants LEFT OUTER JOIN system.grants ON role_grants.granted_role_name = grants.role_name
+WHERE role_grants.user_name is null;
+```
+
+#### Associate grant list to Console users with access to SQL console {#associate-grant-list-to-console-users-with-access-to-sql-console}
+
+Associate this list with Console users that have access to SQL console.
- a. Go to the Console.
+a. Go to the Console.
+
+b. Select the relevant service.
- b. Select the relevant service.
+c. Select Settings on the left.
- c. Select Settings on the left.
+d. Scroll to the SQL console access section.
- d. Scroll to the SQL console access section.
+e. Click the link for the number of users with access to the database `There are # users with access to this service.` to see the user listing.
- e. Click the link for the number of users with access to the database `There are # users with access to this service.` to see the user listing.
+
\ No newline at end of file
diff --git a/docs/cloud/security/cloud-access-management/cloud-authentication.md b/docs/cloud/features/04_security/cloud-access-management/cloud-authentication.md
similarity index 100%
rename from docs/cloud/security/cloud-access-management/cloud-authentication.md
rename to docs/cloud/features/04_security/cloud-access-management/cloud-authentication.md
diff --git a/docs/cloud/security/cloud-access-management/index.md b/docs/cloud/features/04_security/cloud-access-management/index.md
similarity index 100%
rename from docs/cloud/security/cloud-access-management/index.md
rename to docs/cloud/features/04_security/cloud-access-management/index.md
diff --git a/docs/cloud/security/inviting-new-users.md b/docs/cloud/features/04_security/cloud-access-management/inviting-new-users.md
similarity index 100%
rename from docs/cloud/security/inviting-new-users.md
rename to docs/cloud/features/04_security/cloud-access-management/inviting-new-users.md
diff --git a/docs/cloud/security/cmek.md b/docs/cloud/features/04_security/cmek.md
similarity index 100%
rename from docs/cloud/security/cmek.md
rename to docs/cloud/features/04_security/cmek.md
diff --git a/docs/cloud/security/cloud-endpoints-api.md b/docs/cloud/features/04_security/connectivity/cloud-endpoints-api.md
similarity index 100%
rename from docs/cloud/security/cloud-endpoints-api.md
rename to docs/cloud/features/04_security/connectivity/cloud-endpoints-api.md
diff --git a/docs/cloud/security/connectivity-overview.md b/docs/cloud/features/04_security/connectivity/connectivity-overview.md
similarity index 100%
rename from docs/cloud/security/connectivity-overview.md
rename to docs/cloud/features/04_security/connectivity/connectivity-overview.md
diff --git a/docs/cloud/security/aws-privatelink.md b/docs/cloud/features/04_security/connectivity/private_networking/aws-privatelink.md
similarity index 100%
rename from docs/cloud/security/aws-privatelink.md
rename to docs/cloud/features/04_security/connectivity/private_networking/aws-privatelink.md
diff --git a/docs/cloud/security/azure-privatelink.md b/docs/cloud/features/04_security/connectivity/private_networking/azure-privatelink.md
similarity index 100%
rename from docs/cloud/security/azure-privatelink.md
rename to docs/cloud/features/04_security/connectivity/private_networking/azure-privatelink.md
diff --git a/docs/cloud/security/gcp-private-service-connect.md b/docs/cloud/features/04_security/connectivity/private_networking/gcp-private-service-connect.md
similarity index 100%
rename from docs/cloud/security/gcp-private-service-connect.md
rename to docs/cloud/features/04_security/connectivity/private_networking/gcp-private-service-connect.md
diff --git a/docs/cloud/security/private-link-overview.md b/docs/cloud/features/04_security/connectivity/private_networking/private-link-overview.md
similarity index 100%
rename from docs/cloud/security/private-link-overview.md
rename to docs/cloud/features/04_security/connectivity/private_networking/private-link-overview.md
diff --git a/docs/cloud/security/setting-ip-filters.md b/docs/cloud/features/04_security/connectivity/setting-ip-filters.md
similarity index 100%
rename from docs/cloud/security/setting-ip-filters.md
rename to docs/cloud/features/04_security/connectivity/setting-ip-filters.md
diff --git a/docs/cloud/manage/notifications.md b/docs/cloud/features/05_notifications.md
similarity index 100%
rename from docs/cloud/manage/notifications.md
rename to docs/cloud/features/05_notifications.md
diff --git a/docs/cloud/support.md b/docs/cloud/features/06_support.md
similarity index 88%
rename from docs/cloud/support.md
rename to docs/cloud/features/06_support.md
index 836382cd3c5..e6b73fc87a0 100644
--- a/docs/cloud/support.md
+++ b/docs/cloud/features/06_support.md
@@ -1,6 +1,6 @@
---
sidebar_label: 'Cloud Support'
-title: 'Cloud Support'
+title: 'Support'
slug: /cloud/support
description: 'Learn about Cloud Support'
hide_title: true
diff --git a/docs/cloud/features/_category_.json b/docs/cloud/features/_category_.json
new file mode 100644
index 00000000000..383c8150644
--- /dev/null
+++ b/docs/cloud/features/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Features",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/manage/backups/configurable-backups.md b/docs/cloud/features/backups/configurable-backups.md
similarity index 100%
rename from docs/cloud/manage/backups/configurable-backups.md
rename to docs/cloud/features/backups/configurable-backups.md
diff --git a/docs/cloud/manage/backups/export-backups-to-own-cloud-account.md b/docs/cloud/features/backups/export-backups-to-own-cloud-account.md
similarity index 100%
rename from docs/cloud/manage/backups/export-backups-to-own-cloud-account.md
rename to docs/cloud/features/backups/export-backups-to-own-cloud-account.md
diff --git a/docs/cloud/manage/backups/index.md b/docs/cloud/features/backups/index.md
similarity index 100%
rename from docs/cloud/manage/backups/index.md
rename to docs/cloud/features/backups/index.md
diff --git a/docs/cloud/manage/backups/overview.md b/docs/cloud/features/backups/overview.md
similarity index 100%
rename from docs/cloud/manage/backups/overview.md
rename to docs/cloud/features/backups/overview.md
diff --git a/docs/cloud/manage/hyperdx.md b/docs/cloud/features/hyperdx.md
similarity index 98%
rename from docs/cloud/manage/hyperdx.md
rename to docs/cloud/features/hyperdx.md
index 7e56e90d279..71e5cee6102 100644
--- a/docs/cloud/manage/hyperdx.md
+++ b/docs/cloud/features/hyperdx.md
@@ -15,7 +15,7 @@ HyperDX is the user interface for [**ClickStack**](/use-cases/observability/clic
HyperDX is a purpose-built frontend for exploring and visualizing observability data, supporting both Lucene-style and SQL queries, interactive dashboards, alerting, trace exploration, and more—all optimized for ClickHouse as the backend.
-HyperDX in ClickHouse Cloud allows users to enjoy a more turnkey ClickStack experience - no infrastructure to manage, no separate authentication to configure.
+HyperDX in ClickHouse Cloud allows users to enjoy a more turnkey ClickStack experience - no infrastructure to manage, no separate authentication to configure.
HyperDX can be launched with a single click and connected to your data - fully integrated into the ClickHouse Cloud authentication system for seamless, secure access to your observability insights.
## Deployment {#main-concepts}
diff --git a/docs/cloud/features/index.md b/docs/cloud/features/index.md
new file mode 100644
index 00000000000..ce8d1500485
--- /dev/null
+++ b/docs/cloud/features/index.md
@@ -0,0 +1,7 @@
+---
+sidebar_label: 'Features'
+slug: /cloud/features
+title: 'Features'
+description: 'Table of contents page linking to Cloud features'
+---
+
diff --git a/docs/cloud/manage/integrations.md b/docs/cloud/features/integrations.md
similarity index 100%
rename from docs/cloud/manage/integrations.md
rename to docs/cloud/features/integrations.md
diff --git a/docs/cloud/manage/replica-aware-routing.md b/docs/cloud/features/replica-aware-routing.md
similarity index 100%
rename from docs/cloud/manage/replica-aware-routing.md
rename to docs/cloud/features/replica-aware-routing.md
diff --git a/docs/cloud/manage/upgrades.md b/docs/cloud/features/upgrades.md
similarity index 100%
rename from docs/cloud/manage/upgrades.md
rename to docs/cloud/features/upgrades.md
diff --git a/docs/cloud/get-started/index.md b/docs/cloud/get-started/index.md
deleted file mode 100644
index 3c30f63f149..00000000000
--- a/docs/cloud/get-started/index.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-slug: /cloud/get-started
-title: 'Get Started'
-description: 'Get Started Table Of Contents'
-keywords: ['Cloud Quick Start', 'SQL Console', 'Query Insights', 'Query API Endpoints', 'Dashboards', 'Cloud Support']
----
-
-Welcome to ClickHouse Cloud! Explore the pages below to learn more about what ClickHouse Cloud has to offer.
-
-| Page | Description |
-|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Overview](/cloud/overview) | Overview of the benefits of using ClickHouse Cloud and what version of ClickHouse is used for it. |
-| [SQL Console](/cloud/get-started/sql-console) | Learn about the interactive SQL console available in Cloud |
-| [Query Insights](/cloud/get-started/query-insights) | Learn about how Cloud's Query Insights feature makes ClickHouse's built-in query log easier to use through various visualizations and tables. |
-| [Query Endpoints](/cloud/get-started/query-endpoints) | Learn about the Query API Endpoints feature which allows you to create an API endpoint directly from any saved SQL query in the ClickHouse Cloud console. |
-| [Dashboards](/cloud/manage/dashboards) | Learn about how SQL Console's dashboards feature allows you to collect and share visualizations from saved queries. |
-| [Cloud Support](/cloud/support) | Learn more about Support Services for ClickHouse Cloud users and customers. |
diff --git a/docs/cloud/bestpractices/_category_.yml b/docs/cloud/guides/_category_.yml
similarity index 83%
rename from docs/cloud/bestpractices/_category_.yml
rename to docs/cloud/guides/_category_.yml
index 1648e8a79cb..747e5fb1796 100644
--- a/docs/cloud/bestpractices/_category_.yml
+++ b/docs/cloud/guides/_category_.yml
@@ -1,4 +1,4 @@
-label: 'Best Practices'
+label: 'Guides'
collapsible: true
collapsed: true
link:
diff --git a/docs/cloud/guides/best_practices/_category_.json b/docs/cloud/guides/best_practices/_category_.json
new file mode 100644
index 00000000000..21f95c55bca
--- /dev/null
+++ b/docs/cloud/guides/best_practices/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Best practices",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/guides/best_practices/index.md b/docs/cloud/guides/best_practices/index.md
new file mode 100644
index 00000000000..4719ea2750a
--- /dev/null
+++ b/docs/cloud/guides/best_practices/index.md
@@ -0,0 +1,22 @@
+---
+slug: /cloud/bestpractices
+keywords: ['Cloud', 'Best Practices', 'Bulk Inserts', 'Asynchronous Inserts', 'Avoid Mutations', 'Avoid Nullable Columns', 'Avoid Optimize Final', 'Low Cardinality Partitioning Key', 'Multi Tenancy', 'Usage Limits']
+title: 'Overview'
+hide_title: true
+description: 'Landing page for Best Practices section in ClickHouse Cloud'
+---
+
+import TableOfContents from '@site/docs/best-practices/_snippets/_table_of_contents.md';
+
+# Best Practices in ClickHouse Cloud {#best-practices-in-clickhouse-cloud}
+
+This section provides best practices you will want to follow to get the most out of ClickHouse Cloud.
+
+| Page | Description |
+|----------------------------------------------------------|----------------------------------------------------------------------------|
+| [Usage Limits](/cloud/bestpractices/usage-limits)| Explore the limits of ClickHouse. |
+| [Multi tenancy](/cloud/bestpractices/multi-tenancy)| Learn about different strategies to implement multi-tenancy. |
+
+These are in addition to the standard best practices which apply to all deployments of ClickHouse.
+
+
\ No newline at end of file
diff --git a/docs/cloud/bestpractices/multitenancy.md b/docs/cloud/guides/best_practices/multitenancy.md
similarity index 99%
rename from docs/cloud/bestpractices/multitenancy.md
rename to docs/cloud/guides/best_practices/multitenancy.md
index 5289a09b067..5f7df65427a 100644
--- a/docs/cloud/bestpractices/multitenancy.md
+++ b/docs/cloud/guides/best_practices/multitenancy.md
@@ -1,6 +1,6 @@
---
slug: /cloud/bestpractices/multi-tenancy
-sidebar_label: 'Implement multi tenancy'
+sidebar_label: 'Multi tenancy'
title: 'Multi tenancy'
description: 'Best practices to implement multi tenancy'
---
diff --git a/docs/cloud/guides/best_practices/usagelimits.md b/docs/cloud/guides/best_practices/usagelimits.md
new file mode 100644
index 00000000000..af49f5956be
--- /dev/null
+++ b/docs/cloud/guides/best_practices/usagelimits.md
@@ -0,0 +1,40 @@
+---
+slug: /cloud/bestpractices/usage-limits
+sidebar_label: 'Service limits'
+title: 'Usage limits'
+description: 'Describes the recommended usage limits in ClickHouse Cloud'
+---
+
+While ClickHouse is known for its speed and reliability, optimal performance is
+achieved within certain operating parameters. For example, having too many tables,
+databases or parts could negatively impact performance. To avoid this, Clickhouse
+Cloud has guardrails set up for several types of items. You can find details of
+these guardrails below.
+
+:::tip
+If you've run up against one of these guardrails, it's possible that you are
+implementing your use case in an unoptimized way. Contact our support team and
+we will gladly help you refine your use case to avoid exceeding the guardrails
+or look together at how we can increase them in a controlled manner.
+:::
+
+| Dimension | Limit |
+|-------------------------------|------------------------------------------------------------|
+| **Databases** | 1000 |
+| **Tables** | 5000 |
+| **Columns** | ∼1000 (wide format is preferred to compact) |
+| **Partitions** | 50k |
+| **Parts** | 100k across the entire instance |
+| **Part size** | 150gb |
+| **Services per organization** | 20 (soft) |
+| **Services per warehouse** | 5 (soft) |
+| **Low cardinality** | 10k or less |
+| **Primary keys in a table** | 4-5 that sufficiently filter down the data |
+| **Query concurrency** | 1000 |
+| **Batch ingest** | anything > 1M will be split by the system in 1M row blocks |
+
+:::note
+For Single Replica Services, the maximum number of databases is restricted to
+100, and the maximum number of tables is restricted to 500. In addition, storage
+for Basic Tier Services is limited to 1 TB.
+:::
diff --git a/docs/cloud/reference/cloud-compatibility.md b/docs/cloud/guides/cloud-compatibility.md
similarity index 99%
rename from docs/cloud/reference/cloud-compatibility.md
rename to docs/cloud/guides/cloud-compatibility.md
index 86dafbfefd5..59c238c9c08 100644
--- a/docs/cloud/reference/cloud-compatibility.md
+++ b/docs/cloud/guides/cloud-compatibility.md
@@ -1,6 +1,6 @@
---
slug: /whats-new/cloud-compatibility
-sidebar_label: 'Cloud Compatibility'
+sidebar_label: 'Cloud compatibility'
title: 'Cloud Compatibility'
description: 'This guide provides an overview of what to expect functionally and operationally in ClickHouse Cloud.'
---
diff --git a/docs/cloud/guides/index.md b/docs/cloud/guides/index.md
new file mode 100644
index 00000000000..2355ca4370c
--- /dev/null
+++ b/docs/cloud/guides/index.md
@@ -0,0 +1,6 @@
+---
+slug: /cloud/guides
+title: 'Guides'
+hide_title: true
+description: 'Table of contents page for the ClickHouse Cloud guides section'
+---
\ No newline at end of file
diff --git a/docs/cloud/guides/security/_category_.json b/docs/cloud/guides/security/_category_.json
new file mode 100644
index 00000000000..aed26fa7f7a
--- /dev/null
+++ b/docs/cloud/guides/security/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Security",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/guides/security/cloud_access_management/_category_.json b/docs/cloud/guides/security/cloud_access_management/_category_.json
new file mode 100644
index 00000000000..abfdcebed27
--- /dev/null
+++ b/docs/cloud/guides/security/cloud_access_management/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Cloud Access Management",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/security/common-access-management-queries.md b/docs/cloud/guides/security/cloud_access_management/common-access-management-queries.md
similarity index 100%
rename from docs/cloud/security/common-access-management-queries.md
rename to docs/cloud/guides/security/cloud_access_management/common-access-management-queries.md
diff --git a/docs/cloud/security/saml-sso-setup.md b/docs/cloud/guides/security/cloud_access_management/saml-sso-setup.md
similarity index 100%
rename from docs/cloud/security/saml-sso-setup.md
rename to docs/cloud/guides/security/cloud_access_management/saml-sso-setup.md
diff --git a/docs/cloud/guides/security/connectivity/_category_.json b/docs/cloud/guides/security/connectivity/_category_.json
new file mode 100644
index 00000000000..6e137e0592d
--- /dev/null
+++ b/docs/cloud/guides/security/connectivity/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Connectivity",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/security/accessing-s3-data-securely.md b/docs/cloud/guides/security/connectivity/accessing-s3-data-securely.md
similarity index 100%
rename from docs/cloud/security/accessing-s3-data-securely.md
rename to docs/cloud/guides/security/connectivity/accessing-s3-data-securely.md
diff --git a/docs/cloud/manage/_category_.yml b/docs/cloud/manage/_category_.yml
deleted file mode 100644
index 59089856c86..00000000000
--- a/docs/cloud/manage/_category_.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-label: 'Manage Cloud'
-collapsible: true
-collapsed: true
-link:
- type: generated-index
- title: Manage ClickHouse Cloud
diff --git a/docs/cloud/manage/index.md b/docs/cloud/manage/index.md
deleted file mode 100644
index 46c407d0c6b..00000000000
--- a/docs/cloud/manage/index.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-slug: /cloud/manage
-keywords: ['AWS', 'Cloud', 'serverless', 'management']
-title: 'Overview'
-hide_title: true
-description: 'Overview page for Managing Cloud'
----
-
-# Managing Cloud
-
-In this section of the docs you will find all the information you may need about managing ClickHouse cloud. This section contains the following pages:
-
-| Page | Description |
-|-----------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
-| [ClickHouse Cloud Tiers](/cloud/manage/cloud-tiers) | Describes the different cloud tiers, their features, and considerations for choosing the right one. |
-| [Integrations](/manage/integrations) | Covers ClickHouse Cloud's built-in integrations, custom integrations, and integrations that are not supported. |
-| [Backups](/cloud/manage/backups) | Describes how backups work in ClickHouse Cloud, what options you have to configure backups for your service, and how to restore from a backup. |
-| [Monitoring](/integrations/prometheus) | How to integrate Prometheus as a way to monitor ClickHouse cloud. |
-| [Billing](/cloud/manage/billing/overview) | Explains the pricing model for ClickHouse Cloud, including the factors that affect the cost of your service. |
-| [Configuring Settings](/manage/settings) | Describes how to configure settings for ClickHouse Cloud. |
-| [Replica-aware Routing](/manage/replica-aware-routing) | Explains what Replica-aware Routing in ClickHouse Cloud is, its limitations, and how to configure it. |
-| [Automatic Scaling](/manage/scaling) | Explains how ClickHouse Cloud services can be scaled manually or automatically based on your resource needs. |
-| [Service Uptime and SLA](/cloud/manage/service-uptime) | Information about service uptime and Service Level Agreements offered for production instances. |
-| [Notifications](/cloud/notifications) | Shows how ClickHouse Cloud notifications are received and how they can be customized. |
-| [Upgrades](/manage/updates) | Information on how upgrades are rolled out in ClickHouse Cloud. |
-| [Delete Account](/cloud/manage/close_account) | Information on how to close or delete your account when necessary. |
-| [Programmatic API Access with Postman](/cloud/manage/postman) | A guide to help you test the ClickHouse API using Postman. |
-| [Troubleshooting](/faq/troubleshooting) | A collection of commonly encountered issues and how to troubleshoot them. |
-| [Data Transfer](./network-data-transfer.mdx) | Learn more about how ClickHouse Cloud meters data transferred ingress and egress. |
-| [Jan 2025 Changes FAQ](./jan2025_faq/index.md) | Learn more about changes to Cloud introduced in Jan 2025. |
diff --git a/docs/cloud/manage/network-data-transfer.mdx b/docs/cloud/manage/network-data-transfer.mdx
deleted file mode 100644
index 92725e6015c..00000000000
--- a/docs/cloud/manage/network-data-transfer.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
----
-sidebar_label: 'Data Transfer'
-slug: /cloud/manage/network-data-transfer
-title: 'Data Transfer'
-description: 'Learn more about how ClickHouse Cloud meters data transferred ingress and egress'
----
-
-import NetworkPricing from '@site/docs/cloud/manage/_snippets/_network_transfer_rates.md';
-
-ClickHouse Cloud meters data transferred ingress and egress.
-This includes any data in and out of ClickHouse Cloud as well as any intra-region and cross-region data transfer.
-This usage is tracked at the service level. Based on this usage, customers incur data transfer charges that are then added to their monthly bill.
-
-ClickHouse Cloud charges for:
-- Data egress from ClickHouse Cloud to the public Internet, including to other regions of other cloud providers.
-- Data egress to another region in the same cloud provider.
-
-There are no charges for intra-region data transfer or Private Link/Private Service Connect use and data transfer.
-However, we reserve the right to implement additional data transfer pricing dimensions if we see usage patterns that impact our ability to charge users appropriately.
-
-Data transfer charges vary by Cloud Service Provider (CSP) and region, and prices will not be tiered as usage increases. Public internet egress pricing is based only on the origin region.
-Inter-region (or cross-region) pricing depends on both the origin and destination regions. Data transfer pricing does **not** vary between organizational tiers.
-
-**Best Practices to minimize Data Transfer Costs**
-
-There are some patterns to keep in mind when ingressing and egressing data to minimize data transfer costs.
-1. When ingressing or egressing data from Clickhouse Cloud, use compression where possible, to minimize the amount of data transferred and the associated cost.
-2. Be aware that when doing an INSERT over the native protocol with non-inlined values (e.g. INSERT INTO [TABLE] FROM INFILE [FILE] FORMAT NATIVE), ClickHouse clients pull metadata from servers to pack the data. If the metadata is larger than the INSERT payload, you might counterintuitively see more egress than there is ingress from the server perspective. If this is unacceptable, consider inlining data with VALUES syntax or using the HTTP protocol.
-
-The tables below shows how data transfer charges for egress vary across public internet or cross-region by cloud provider and region.
-
-:::note
-ClickHouse Cloud meters inter-region usage in terms of tiers, Tier 1 through Tier 4, depending on the origin and destination regions. The table below shows the tier for each combination of inter-region data transfer. In the Billing usage screen on ClickHouse Cloud you will see data transfer usage broken out by tiers.
-:::
-
-
diff --git a/docs/cloud/onboard/01_discover/01_what_is.md b/docs/cloud/onboard/01_discover/01_what_is.md
new file mode 100644
index 00000000000..4814300e577
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/01_what_is.md
@@ -0,0 +1,46 @@
+---
+slug: /cloud/overview
+title: 'Introduction'
+keywords: ['clickhouse cloud', 'what is clickhouse cloud', 'clickhouse cloud overview', 'clickhouse cloud features']
+hide_title: true
+---
+
+## What is ClickHouse Cloud? {#what-is-clickhouse-cloud}
+
+ClickHouse Cloud is a fully managed cloud service created by the original creators
+of ClickHouse, the fastest and most popular open-source columnar online analytical
+processing database.
+
+With Cloud, infrastructure, maintenance, scaling, and operations are taken care of
+for you, so that you can focus on what matters most to you, which is building value
+for your organization and your customers faster.
+
+## Benefits of ClickHouse Cloud {#benefits-of-clickhouse-cloud}
+
+ClickHouse Cloud offers several major benefits over the open-source version:
+
+- **Fast time to value**: Start building instantly without having to size and scale your cluster.
+- **Seamless scaling**: Automatic scaling adjusts to variable workloads so you don't have to over-provision for peak usage.
+- **Serverless operations**: Sit back while we take care of sizing, scaling, security, reliability, and upgrades.
+- **Transparent pricing**: Pay only for what you use, with resource reservations and scaling controls.
+- **Total cost of ownership**: Best price / performance ratio and low administrative overhead.
+- **Broad ecosystem**: Bring your favorite data connectors, visualization tools, SQL and language clients with you.
+
+## OSS vs ClickHouse Cloud comparison {#oss-vs-clickhouse-cloud}
+
+| Feature | Benefits | OSS ClickHouse | ClickHouse Cloud |
+|--------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|-------------------|
+| **Deployment modes** | ClickHouse provides the flexibility to self-manage with open-source or deploy in the cloud. Use ClickHouse local for local files without a server or chDB to embed ClickHouse directly into your application. | ✅ | ✅ |
+| **Storage** | As an open-source and cloud-hosted product, ClickHouse can be deployed in both shared-disk and shared-nothing architectures. | ✅ | ✅ |
+| **Monitoring and alerting** | Monitoring and alerting about the status of your services is critical to ensuring optimal performance and a proactive approach to detect and triage potential issues. | ✅ | ✅ |
+| **ClickPipes** | ClickPipes is ClickHouse's managed ingestion pipeline that allows you to seamlessly connect your external data sources like databases, APIs, and streaming services into ClickHouse Cloud, eliminating the need for managing pipelines, custom jobs, or ETL processes. It supports workloads of all sizes. | ❌ | ✅ |
+| **Pre-built integrations** | ClickHouse provides pre-built integrations that connect ClickHouse to popular tools and services such as data lakes, SQL and language clients, visualization libraries, and more. | ❌ | ✅ |
+| **SQL console** | The SQL console offers a fast, intuitive way to connect, explore, and query ClickHouse databases, featuring a slick caption, query interface, data import tools, visualizations, collaboration features, and GenAI-powered SQL assistance. | ❌ | ✅ |
+| **Compliance** | ClickHouse Cloud compliance includes CCPA, EU-US DPF, GDPR, HIPAA, ISO 27001, ISO 27001 SoA, PCI DSS, SOC2. ClickHouse Cloud's security, availability, processing integrity, and confidentiality processes are all independently audited. Details: trust.clickhouse.com. | ❌ | ✅ |
+| **Enterprise-grade security** | Support for advanced security features such as SSO, multi-factor authentication, role-based access control (RBAC), private and secure connections with support for Private Link and Private Service Connect, IP filtering, customer-managed encryption keys (CMEK), and more. | ❌ | ✅ |
+| **Scaling and optimization** | Seamlessly scales up or down based on workload, supporting both horizontal and vertical scaling. With automated backups, replication, and high availability, ClickHouse, it provides users with optimal resource allocation. | ❌ | ✅ |
+| **Support services** | Our best-in-class support services and open-source community resources provide coverage for whichever deployment model you choose. | ❌ | ✅ |
+| **Database upgrades** | Regular database upgrades are essential to establish a strong security posture and access the latest features and performance improvements. | ❌ | ✅ |
+| **Backups** | Backups and restore functionality ensures data durability and supports graceful recovery in the event of outages or other disruptions. | ❌ | ✅ |
+| **Compute-compute separation** | Users can scale compute resources independently of storage, so teams and workloads can share the same storage and maintain dedicated compute resources. This ensures that the performance of one workload doesn't interfere with another, enhancing flexibility, performance, and cost-efficiency. | ❌ | ✅ |
+| **Managed services** | With a cloud-managed service, teams can focus on business outcomes and accelerate time-to-market without having to worry about the operational overhead of sizing, setup, and maintenance of ClickHouse. | ❌ | ✅ |
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/00_overview.md b/docs/cloud/onboard/01_discover/02_use_cases/00_overview.md
new file mode 100644
index 00000000000..623b8fc605f
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/00_overview.md
@@ -0,0 +1,20 @@
+---
+slug: /cloud/get-started/cloud/use-cases/overview
+title: 'Building on ClickHouse Cloud'
+keywords: ['use cases', 'Cloud']
+sidebar_label: 'Overview'
+---
+
+ClickHouse Cloud is suitable for use as both a **primary data store** and as an **analytics
+layer**.
+
+ClickHouse's columnar architecture, vectorized processing, and cloud-native design
+make it uniquely suited for analytical workloads that require both speed and scale.
+Broadly, the most common use cases for ClickHouse Cloud are:
+
+| Use case | Description |
+|----------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [Real-Time analytics](/cloud/get-started/cloud/use-cases/real-time-analytics) | ClickHouse Cloud excels at real-time analytics by delivering sub-second query responses on billions of rows through its columnar storage architecture and vectorized execution engine. The platform handles high-throughput data ingestion of millions of events per second while enabling direct queries on raw data without requiring pre-aggregation. Materialized Views provide real-time aggregations and pre-computed results, while approximate functions for quantiles and counts deliver instant insights perfect for interactive dashboards and real-time decision making.|
+| [Data Lake and Warehouse](/cloud/get-started/cloud/use-cases/data_lake_and_warehouse) | As a modern data warehouse solution, ClickHouse Cloud combines native cloud storage integration with S3, GCS, and Azure Blob for cost-effective storage with schema-on-read flexibility that supports semi-structured data like JSON and nested types. The platform achieves massive compression ratios of 10:1 or better, significantly reducing storage costs, while its compute-storage separation architecture allows independent scaling and cost optimization. Users benefit from a standard SQL interface enhanced with advanced analytics functions, making it easy to query and analyze data at any scale.|
+| [Observability](/cloud/get-started/cloud/use-cases/observability) | ClickHouse Cloud is purpose-built for observability workloads, featuring specialized engines and functions optimized for time-series data that can ingest and query terabytes of logs, metrics, and traces with ease. Through ClickStack, ClickHouse's comprehensive observability solution, organizations can break down the traditional three silos of logs, metrics, and traces by unifying all observability data in a single platform, enabling correlated analysis and eliminating the complexity of managing separate systems. This unified approach makes it ideal for application performance monitoring, infrastructure monitoring, and security event analysis at enterprise scale, with ClickStack providing the tools and integrations needed for complete observability workflows without data silos.|
+| [Machine Learning and GenAI](/cloud/get-started/cloud/use-cases/machine_learning_and_gen_ai) | ClickHouse Cloud powers modern AI applications through four key capabilities: native vector similarity search for RAG applications and embedding storage, comprehensive feature store functionality for real-time ML feature engineering and serving, specialized LLM observability for tracking model performance and usage patterns, and integrated MCP (Model Context Protocol) server support that enables AI agents and LLMs to directly query and analyze data. This unified platform eliminates the complexity of managing separate systems for vector databases, feature stores, and observability tools, providing a single solution for the entire AI/ML data pipeline with ClickHouse's signature performance and scalability.|
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md b/docs/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md
new file mode 100644
index 00000000000..fe1e8eada64
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/01_real-time-analytics.md
@@ -0,0 +1,159 @@
+---
+slug: /cloud/get-started/cloud/use-cases/real-time-analytics
+title: 'Real-time analytics'
+keywords: ['use cases', 'real-time analytics']
+sidebar_label: 'Real-time analytics'
+---
+
+import Image from '@theme/IdealImage';
+import rta_0 from '@site/static/images/cloud/onboard/discover/use_cases/0_rta.png';
+import rta_1 from '@site/static/images/cloud/onboard/discover/use_cases/1_rta.png';
+import rta_2 from '@site/static/images/cloud/onboard/discover/use_cases/2_rta.png';
+import rta_3 from '@site/static/images/cloud/onboard/discover/use_cases/3_rta.png';
+
+
+
+## What is real-time analytics? {#what-is-real-time-analytics}
+
+Real-time analytics refers to data processing that delivers insights to end users
+and customers as soon as the data is generated. It differs from traditional or
+batch analytics, where data is collected in batches and processed, often a long
+time after it was generated.
+
+Real-time analytics systems are built on top of event streams, which consist of
+a series of events ordered in time. An event is something that’s already happened.
+It could be the addition of an item to the shopping cart on an e-commerce website,
+the emission of a reading from an Internet of Things (IoT) sensor, or a shot on
+goal in a football (soccer) match.
+
+An event (from an imaginary IoT sensor) is shown below, as an example:
+
+```json
+{
+ "deviceId": "sensor-001",
+ "timestamp": "2023-10-05T14:30:00Z",
+ "eventType": "temperatureAlert",
+ "data": {
+ "temperature": 28.5,
+ "unit": "Celsius",
+ "thresholdExceeded": true
+ }
+}
+```
+
+Organizations can discover insights about their customers by aggregating and
+analyzing events like this. This has traditionally been done using batch analytics,
+and in the next section, we’ll compare batch and real-time analytics.
+
+## Real-Time analytics vs batch analytics {#real-time-analytics-vs-batch-analytics}
+
+The diagram below shows what a typical batch analytics system would look like
+from the perspective of an individual event:
+
+
+
+You can see that there’s quite a big gap from when the event happens until we
+process and gain some insight from it. Traditionally, this was the only means of
+data analysis, and we’d need to create artificial time boundaries to process
+the data in batches. For example, we might process all the data collected at the
+end of a day. This worked for many use cases, but for others, it’s sub-optimal
+because we’re working with stale data, and it doesn’t allow us to react to the
+data quickly enough.
+
+By contrast, in real-time analytics systems, we react to an event as soon as it
+happens, as shown in the following diagram:
+
+
+
+We can now derive insights from events almost as soon as they’re generated. But
+why is this useful?
+
+## Benefits of real-time analytics {#benefits-of-real-time-analytics}
+
+In today's fast-paced world, organizations rely on real-time analytics to stay
+agile and responsive to ever-changing conditions. A real-time analytics system
+can benefit a business in many ways.
+
+### Better decision-making {#better-decision-making}
+
+Decision-making can be improved by having access to actionable insights via
+real-time analytics. When business operators can see events as they’re happening,
+it makes it much easier to make timely interventions.
+
+For example, if we make changes to an application and want to know whether it’s
+having a detrimental effect on the user experience, we want to know this as
+quickly as possible so that we can revert the changes if necessary. With a less
+real-time approach, we might have to wait until the next day to do this
+analysis, by which type we’ll have a lot of unhappy users.
+
+### New products and revenue streams {#new-products-and-revenue-streams}
+
+Real-time analytics can help businesses generate new revenue streams. Organizations
+can develop new data-centered products and services that give users access to
+analytical querying capabilities. These products are often compelling enough for
+users to pay for access.
+
+In addition, existing applications can be made stickier, increasing user
+engagement and retention. This will result in more application use, creating more
+revenue for the organization.
+
+### Improved customer experience {#improved-customer-experience}
+
+With real-time analytics, businesses can gain instant insights into customer
+behavior, preferences, and needs. This lets businesses offer timely assistance,
+personalize interactions, and create more engaging experiences that keep
+customers returning.
+
+## Real-time analytics use cases {#real-time-analytics-use-cases}
+
+The actual value of real-time analytics becomes evident when we consider its
+practical applications. Let’s examine some of them.
+
+### Fraud detection {#fraud-detection}
+
+Fraud detection is about detecting fraudulent patterns, ranging from fake accounts
+to payment fraud. We want to detect this fraud as quickly as possible, flagging
+suspicious activities, blocking transactions, and disabling accounts when necessary.
+
+This use case stretches across industries: healthcare, digital banking, financial
+services, retail, and more.
+
+[Instacart](https://www.instacart.com/) is North America's leading online grocery
+company, with millions of active customers and shoppers. It uses ClickHouse as
+part of Yoda, its fraud detection platform. In addition to the general types of
+fraud described above, it also tries to detect collusion between customers and
+shoppers.
+
+
+
+They identified the following characteristics of ClickHouse that enable real-time
+fraud detection:
+
+> ClickHouse supports LSM-tree based MergeTree family engines.
+> These are optimized for writing which is suitable for ingesting large amounts
+> of data in real-time.
+
+> ClickHouse is designed and optimized explicitly for analytical queries. This
+> fits perfectly with the needs of applications where data is continuously
+> analyzed for patterns that might indicate fraud.
+
+### Time-sensitive decision making {#ftime-sensitive-decision-making}
+
+Time-sensitive decision-making refers to situations where users or organizations
+need to make informed choices quickly based on the most current information
+available. Real-time analytics empowers users to make informed choices in
+dynamic environments, whether they're traders reacting to market fluctuations,
+consumers making purchasing decisions, or professionals adapting to real-time
+operational changes.
+
+Coinhall provides its users with real-time insights into price movements over
+time via a candlestick chart, which shows the open, high, low, and close prices
+for each trading period. They needed to be able to run these types of queries
+quickly and with a large number of concurrent users.
+
+
+
+> In terms of performance, ClickHouse was the clear winner, executing candlestick queries in 20 milliseconds, compared
+> to 400 milliseconds or more for the other databases. It ran latest-price queries in 8 milliseconds, outpacing the
+> next-best performance (SingleStore) which came in at 45 milliseconds. Finally, it handled ASOF JOIN queries in
+> 50 milliseconds, while Snowflake took 20 minutes and Rockset timed out.
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/02_observability.md b/docs/cloud/onboard/01_discover/02_use_cases/02_observability.md
new file mode 100644
index 00000000000..7ec9034824e
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/02_observability.md
@@ -0,0 +1,229 @@
+---
+slug: /cloud/get-started/cloud/use-cases/observability
+title: 'Observability'
+keywords: ['use cases', 'observability']
+sidebar_label: 'Observability'
+---
+
+
+
+Modern software systems are complex. Microservices, cloud infrastructure, and
+distributed systems have made it increasingly difficult to understand what's
+happening inside our applications. When something goes wrong, teams need to know
+where and why quickly.
+
+This is where observability comes in. It's evolved from simple system monitoring
+into a comprehensive approach to understanding system behavior. However,
+implementing effective observability isn't straightforward - it requires
+understanding technical concepts and organizational challenges.
+
+## What is Observability? {#what-is-observability}
+
+Observability is understanding a system's internal state by examining its outputs.
+In software systems, this means understanding what's happening inside your
+applications and infrastructure through the data they generate.
+
+This field has evolved significantly and can be understood through two distinct
+generations of observability approaches.
+
+The first generation, often called Observability 1.0, was built around the
+traditional "three pillars" approach of metrics, logs, and traces. This approach
+required multiple tools and data stores for different types of telemetry. It
+often forced engineers to pre-define what they wanted to measure, making it
+costly and complex to maintain multiple systems.
+
+Modern observability, or Observability 2.0, takes a fundamentally different
+approach. It's based on collecting wide, structured events for each unit of work
+(e.g., an HTTP request and response) in our system. This approach captures
+high-cardinality data, such as user IDs, request IDs, Git commit hashes,
+instance IDs, Kubernetes pod names, specific route parameters, and vendor
+transaction IDs. A rule of thumb is adding a piece of metadata if it could help
+us understand how the system behaves.
+
+This rich data collection enables dynamic slicing and dicing of data without
+pre-defining metrics. Teams can derive metrics, traces, and other visualizations
+from this base data, allowing them to answer complex questions about system
+behavior that weren't anticipated when the instrumentation was first added.
+
+However, implementing modern observability capabilities presents its challenges.
+Organizations need reliable ways to collect, process, and export this rich
+telemetry data across diverse systems and technologies. While modern approaches
+have evolved beyond traditional boundaries, understanding the fundamental
+building blocks of observability remains crucial.
+
+## The three pillars of observability {#three-pillars-of-observability}
+
+To better understand how observability has evolved and works in practice, let's
+examine the three pillars of observability - logs, metrics, and traces.
+
+While modern observability has moved beyond treating these as separate concerns,
+they remain fundamental concepts for understanding different aspects of system
+behavior.
+
+1. **Logs** - Text-based records of discrete events that occur within a system.
+These provide detailed context about specific occurrences, errors, and state changes.
+2. **Metrics** - Numerical measurements collected over time. These include counters,
+gauges, and histograms that help track system performance, resource usage, and business KPIs.
+3. **Traces** - Records that track the journey of requests as they flow through distributed systems.
+These help understand the relationships between services and identify performance bottlenecks.
+
+These pillars enable teams to monitor, troubleshoot, and optimize their systems.
+However, the real power comes from understanding how to effectively collect,
+analyze, and correlate data across all three pillars to gain meaningful insights
+into system behavior.
+
+## The benefits of observability {#the-benefits-of-observability}
+
+While the technical aspects of observability - logs, metrics, and traces - are
+well understood, the business benefits are equally important to consider.
+
+In their book ["Observability Engineering"](https://clickhouse.com/engineering-resources/observability#:~:text=Observability%20Engineering)
+(O'Reilly, 2022), Charity Majors, Liz Fong-Jones, and George Miranda draw from
+industry research and anecdotal feedback to identify four key business benefits
+that organizations can expect from implementing proper observability practices.
+Let's examine these benefits:
+
+### Higher incremental revenue {#higher-incremental-revenue}
+
+The authors note that observability tools that help teams improve uptime and
+performance can lead to increased incremental revenue through improved code quality.
+This manifests in several ways:
+
+1. Improved customer experience: Fast problem resolution and prevention of service
+degradation leads to higher customer satisfaction and retention
+2. Increased system reliability: Better uptime means more successful transactions
+and fewer lost business opportunities
+3. Enhanced performance: The ability to identify and optimize performance bottlenecks
+helps maintain responsive services that keep customers engaged
+4. Competitive advantage: Organizations that can maintain high service quality
+through comprehensive monitoring and quick issue resolution often gain an edge
+over competitors
+
+### Cost Savings from faster incident response {#cost-savings-from-faster-incident-response}
+
+One of the most immediate benefits of observability is reduced labor costs
+through faster detection and resolution of issues. This comes from:
+
+* Reduced Mean Time to Detect (MTTD) and Mean Time to Resolve (MTTR)
+* Improved query response times, enabling faster investigation
+* Quicker identification of performance bottlenecks
+* Reduced time spent on-call
+* Fewer resources wasted on unnecessary rollbacks
+
+We see this in practice - [trip.com built their observability system with ClickHouse](trip.com built their observability system with ClickHouse)
+and achieved query speeds 4-30x faster than their previous solution, with 90% of
+queries completing in under 300ms, enabling rapid issue investigation.
+
+### Cost savings from incidents avoided {#cost-savings-from-incidents-avoided}
+
+Observability doesn't just help resolve issues faster - it helps prevent them entirely.
+The authors emphasize how teams can prevent critical issues by:
+
+* Identifying potential problems before they become critical
+* Analyzing patterns to prevent recurring issues
+* Understanding system behavior under different conditions
+* Proactively addressing performance bottlenecks
+* Making data-driven decisions about system improvements
+
+ClickHouse's [own observability platform, LogHouse](https://clickhouse.com/blog/building-a-logging-platform-with-clickhouse-and-saving-millions-over-datadog),
+demonstrates this. It enables our core engineers to search historical patterns across all clusters, helping prevent
+recurring issues.
+
+### Cost savings from decreased employee churn {#cost-savings-from-decreased-employee-churn}
+
+One of the most overlooked benefits is the impact on team satisfaction and retention.
+The authors highlight how observability leads to:
+
+* Improved job satisfaction through better tooling
+* Decreased developer burnout from fewer unresolved issues
+* Reduced alert fatigue through better signal-to-noise ratio
+* Lower on-call stress due to better incident management
+* Increased team confidence in system reliability
+
+We see this in practice - when [Fastly migrated to ClickHouse](https://clickhouse.com/videos/scaling-graphite-with-clickhouse),
+their engineers were amazed by the improvement in query performance, noting:
+
+> "I couldn't believe it. I actually had to go back a couple of times just to
+> make sure that I was querying it properly... this is coming back too fast.
+> This doesn't make sense."
+
+As the authors emphasize, while the specific measures of these benefits may vary
+depending on the tools and implementation, these fundamental improvements can be
+expected across organizations that adopt robust observability practices. The key
+is choosing and implementing the right tools effectively to maximize these benefits.
+
+Achieving these benefits requires overcoming several significant hurdles. Even
+organizations that understand the value of observability often find that
+implementation presents unexpected complexities and challenges that must be
+carefully navigated.
+
+## Challenges in implementing observability {#challenges-in-implementing-observability}
+
+Implementing observability within an organization is a transformative step toward
+gaining deeper insights into system performance and reliability. However, this
+journey is not without its challenges. As organizations strive to harness the
+full potential of observability, they encounter various obstacles that can impede
+progress. Let’s go through some of them.
+
+### Data volume and scalability {#data-volume-and-scalability}
+
+One of the primary hurdles in implementing observability is managing the sheer
+volume and scalability of telemetry data generated by modern systems. As
+organizations grow, so does the data they need to monitor, necessitating
+solutions that efficiently handle large-scale data ingestion and
+real-time analytics.
+
+### Integration with existing systems {#integration-with-existing-systems}
+
+Integration with existing systems poses another significant challenge. Many
+organizations operate in heterogeneous environments with diverse technologies,
+making it essential for observability tools to seamlessly integrate with current
+infrastructure. Open standards are crucial in facilitating this integration,
+ensuring interoperability and reducing the complexity of deploying observability
+solutions across varied tech stacks.
+
+### Skill gaps {#skill-gaps}
+
+Skill gaps can also impede the successful implementation of observability. The
+transition to advanced observability solutions often requires specialized
+knowledge of data analytics and specific tools. Teams may need to invest in
+training or hiring to bridge these gaps and fully leverage the capabilities of
+their observability platforms.
+
+### Cost management {#cost-management}
+
+Cost management is critical, as observability solutions can become expensive,
+particularly at scale. Organizations must balance the costs of these tools with
+the value they provide, seeking cost-effective solutions that offer significant
+savings compared to traditional approaches.
+
+### Data retention and storage {#data-retention-and-storage}
+
+Data retention and storage management present additional challenges. Deciding
+how long to retain observability data without compromising performance or
+insights requires careful planning and efficient storage solutions that reduce
+storage requirements while maintaining data accessibility.
+
+### Standardization and vendor lock-in {#standardization-and-vendor-lock-in}
+
+Ensuring standardization and avoiding vendor lock-in are vital for maintaining
+flexibility and adaptability in observability solutions. By adhering to open
+standards, organizations can prevent being tied to specific vendors and ensure
+their observability stack can evolve with their needs.
+
+### Security and compliance {#security-and-compliance}
+
+Security and compliance considerations remain crucial, especially when handling
+sensitive data within observability systems. Organizations must ensure that their
+observability solutions adhere to relevant regulations and effectively protect
+sensitive information.
+
+These challenges underscore the importance of strategic planning and informed
+decision-making in implementing observability solutions that effectively meet
+organizational needs.
+
+To address these challenges, organizations need a well-structured approach to
+implementing observability. The standard observability pipeline has evolved to
+provide a framework for effectively collecting, processing, and analyzing
+telemetry data. One of the earliest and most influential examples of this
+evolution comes from Twitter's experience in 2013.
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/03_data_lake_and_warehouse.md b/docs/cloud/onboard/01_discover/02_use_cases/03_data_lake_and_warehouse.md
new file mode 100644
index 00000000000..acb44128b30
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/03_data_lake_and_warehouse.md
@@ -0,0 +1,118 @@
+---
+slug: /cloud/get-started/cloud/use-cases/data_lake_and_warehouse
+title: 'Data Lakehouse'
+keywords: ['use cases', 'data lake and warehouse']
+sidebar_label: 'Data Lakehouse'
+---
+
+import Image from '@theme/IdealImage';
+import datalakehouse_01 from '@site/static/images/cloud/onboard/discover/use_cases/datalakehouse_01.png';
+
+
+
+The data lakehouse is a convergent architecture that applies database principles
+to data lake infrastructure while maintaining the flexibility and scale of cloud storage systems.
+
+The lakehouse is not just taking a database apart but building database-like
+capabilities onto a fundamentally different foundation (cloud object storage)
+that focuses on supporting traditional analytics and modern AI/ML workloads in
+a unified platform.
+
+## What are the components of the data lakehouse? {#components-of-the-data-lakehouse}
+
+The modern data lakehouse architecture represents a convergence of data warehouse
+and data lake technologies, combining the best aspects of both approaches. This
+architecture comprises several distinct but interconnected layers providing a
+flexible, robust data storage, management, and analysis platform.
+
+Understanding these components is essential for organizations looking to
+implement or optimize their data lakehouse strategy. The layered approach allows
+for component substitution and independent evolution of each layer, providing
+architectural flexibility and future-proofing.
+
+Let's explore the core building blocks of a typical data lakehouse architecture
+and how they interact to create a cohesive data management platform.
+
+
+
+| Component | Description |
+|-------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **Data sources** | Lakehouse data sources include operational databases, streaming platforms, IoT devices, application logs, and external providers. |
+| **Query engine** | Processes analytical queries against the data stored in the object storage, leveraging the metadata and optimizations provided by the table format layer. Supports SQL and potentially other query languages to analyze large volumes of data efficiently. |
+| **Metadata catalog** | The [data catalog](https://clickhouse.com/engineering-resources/data-catalog) acts as a central repository for metadata, storing and managing table definitions and schemas, partitioning information, and access control policies. Enables data discovery, lineage tracking, and governance across the lakehouse. |
+| **Table format layer** | The [table format layer](https://clickhouse.com/engineering-resources/open-table-formats) manages the logical organization of data files into tables, providing database-like features such as ACID transactions, schema enforcement and evolution, time travel capabilities, and performance optimizations like data skipping and clustering. |
+| **Object storage** | This layer provides scalable, durable, cost-effective storage for all data files and metadata. It handles the physical persistence of data in an open format, enabling direct access from multiple tools and systems. |
+| **Client applications** | Various tools and applications that connect to the lakehouse to query data, visualize insights, or build data products. These can include BI tools, data science notebooks, custom applications, and ETL/ELT tools. |
+
+## What are the benefits of the data lakehouse? {#benefits-of-the-data-lakehouse}
+
+The data lakehouse architecture offers several significant advantages when compared
+directly to both traditional data warehouses and data lakes:
+
+### Compared to traditional data warehouses {#compared-to-traditional-data-warehouses}
+
+| # | Benefit | Description |
+|---|--------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| 1 | **Cost efficiency** | Lakehouses leverage inexpensive object storage rather than proprietary storage formats, significantly reducing storage costs compared to data warehouses that charge premium prices for their integrated storage. |
+| 2 | **Component flexibility and interchangeability** | The lakehouse architecture allows organizations to substitute different components. Traditional systems require wholesale replacement when requirements change or technology advances, while lakehouses enable incremental evolution by swapping out individual components like query engines or table formats. This flexibility reduces vendor lock-in and allows organizations to adapt to changing needs without disruptive migrations. |
+| 3 | **Open format support** | Lakehouses store data in open file formats like Parquet, allowing direct access from various tools without vendor lock-in, unlike proprietary data warehouse formats that restrict access to their ecosystem. |
+| 4 | **AI/ML integration** | Lakehouses provide direct access to data for machine learning frameworks and Python/R libraries, whereas data warehouses typically require extracting data before using it for advanced analytics. |
+| 5 | **Independent scaling** | Lakehouses separate storage from compute, allowing each to scale independently based on actual needs, unlike many data warehouses, where they scale together. |
+
+### Compared to data lakes {#compared-to-data-lakes}
+
+| # | Benefit | Description |
+|---|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| 1 | **Query performance** | Lakehouses implement indexing, statistics, and data layout optimizations that enable SQL queries to run at speeds comparable to data warehouses, overcoming the poor performance of raw data lakes. |
+| 2 | **Data consistency** | Through ACID transaction support, lakehouses ensure consistency during concurrent operations, solving a major limitation of traditional data lakes, where file conflicts can corrupt data. |
+| 3 | **Schema management** | Lakehouses enforce schema validation and track schema evolution, preventing the "data swamp" problem common in data lakes where data becomes unusable due to schema inconsistencies. |
+| 4 | **Governance capabilities** | Lakehouses provide fine-grained access control and auditing features at row/column levels, addressing the limited security controls in basic data lakes. |
+| 5 | **BI Tool support** | Lakehouses offer SQL interfaces and optimizations that make them compatible with standard BI tools, unlike raw data lakes that require additional processing layers before visualization. |
+
+## Where does ClickHouse fit in the data lakehouse architecture? {#where-does-clickhouse-fit-in-the-data-lakehouse-architecture}
+
+ClickHouse is a powerful analytical query engine within the modern data lakehouse
+ecosystem. It offers organizations a high-performance option for analyzing data
+at scale. ClickHouse is a compelling choice due to its exceptional query speed and
+efficiency.
+
+Within the lakehouse architecture, ClickHouse functions as a specialized
+processing layer that can flexibly interact with the underlying data. It can
+directly query Parquet files stored in cloud object storage systems like S3,
+Azure Blob Storage, or Google Cloud Storage, leveraging its optimized columnar
+processing capabilities to deliver rapid results even on massive datasets.
+This direct query capability allows organizations to analyze their lake data
+without complex data movement or transformation processes.
+
+ClickHouse integrates with open table formats such as Apache Iceberg, Delta Lake,
+or Apache Hudi for more sophisticated data management needs. This integration
+enables ClickHouse to take advantage of these formats' advanced features, while
+still delivering the exceptional query performance it's known for. Organizations
+can integrate these table formats directly or connect through metadata catalogs
+like AWS Glue, Unity, or other catalog services.
+
+By incorporating ClickHouse as a query engine in their lakehouse architecture,
+organizations can run lightning-fast analytical queries against their data lake
+while maintaining the flexibility and openness that define the lakehouse approach.
+This combination delivers the performance characteristics of a specialized
+analytical database without sacrificing the core benefits of the lakehouse model,
+including component interchangeability, open formats, and unified data management.
+
+## Hybrid architecture: The best of both worlds {#hybrid-architecture-the-best-of-both-worlds}
+
+While ClickHouse excels at querying lakehouse components, its highly optimized
+storage engine offers an additional advantage. For use cases demanding ultra-low
+latency queries - such as real-time dashboards, operational analytics, or
+interactive user experiences - organizations can selectively store
+performance-critical data directly in ClickHouse's native format. This hybrid
+approach delivers the best of both worlds: the unmatched query speed of
+ClickHouse's specialized storage for time-sensitive analytics and the flexibility
+to query the broader data lakehouse when needed.
+
+This dual capability allows organizations to implement tiered data strategies
+where hot, frequently accessed data resides in ClickHouse's optimized storage
+for sub-second query responses, while maintaining seamless access to the complete
+data history in the lakehouse. Teams can make architectural decisions based on
+performance requirements rather than technical limitations, using ClickHouse as
+a lightning-fast analytical database for critical workloads and a flexible query
+engine for the broader data ecosystem.
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/01_overview.md b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/01_overview.md
new file mode 100644
index 00000000000..77e32109dd2
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/01_overview.md
@@ -0,0 +1,100 @@
+---
+slug: /cloud/get-started/cloud/use-cases/AI_ML
+title: 'Machine learning and generative AI'
+keywords: ['use cases', 'Machine Learning', 'Generative AI']
+sidebar_label: 'Overview'
+---
+
+
+
+## The rapidly evolving data landscape for Machine Learning and Generative AI {#the-rapidly-evolving-data-landscape-for-machine-learning-and-generative-ai}
+
+Rapid advancements in Machine Learning and Generative AI are completely reshaping
+how business and society operate, driving an ever-increasing demand for data on
+an unparalleled scale.
+At the time of writing, language training dataset size is growing on average 3.7x
+per year, while it is projected that the largest training run will use all
+public human-generated text by 2028. At the same time, users of these applications
+increasingly expect real-time performance and the success of AI and ML-driven
+insights, like personalized recommendations, accurate forecasting, or chatbots,
+hinge on the ability to handle massive datasets in real-time. Against the backdrop
+of these changes, traditional data architectures often face significant challenges
+when it comes to meeting the scale and real-time requirements that modern AI/ML
+workloads demand.
+
+## Challenges of traditional data stacks for AI/ML workloads {#challenges-of-traditional-data-stacks}
+
+Traditional database systems are often not designed for the massive analytical
+workloads and complex queries inherent in modern ML and GenAI applications.
+They frequently become bottlenecks as data volume grows and query complexity
+increases, hindering the rapid processing required for AI. In addition to this,
+machine learning architectures can become fragmented and challenging to handle
+due to a proliferation of specialized tools and components which often leads to
+higher learning curves, increased points of failure, and escalating expenses.
+Real-time processing for ML faces significant challenges, including dealing with
+the sheer volume and velocity of incoming data, minimizing latency and response
+times, and continuously addressing issues like model drift and ensuring data
+quality. These systems, designed for structured data at much smaller scales, often
+take days or weeks when faced with terabytes or petabytes of data. Not only do
+they become a performance bottleneck, but also a cost bottleneck, often relying
+on expensive, close-coupled storage that does not scale cost effectively.
+
+## ClickHouse as a foundation for real-time AI/ML {#clickhouse-for-real-time-ai-ml}
+
+ClickHouse was designed and built from the ground up to tackle data at scale in
+real-time. As such, it is ideally positioned for handling the requirements of
+today’s AI and ML applications. Several core features enable it to ingest,
+process and query datasets on the petabyte scale with real-time performance:
+
+| Feature | Description |
+|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **Columnar Storage** | ClickHouse utilizes a columnar storage model. This means that data from each column of an inserted row is stored together on disk, which enables significantly more efficient compression and boosts query speed by allowing the system to read only the relevant columns required for a query, which drastically reduces disk I/O. This is particularly advantageous for analytical queries common in ML/GenAI that often involve aggregations or filtering on a subset of columns. |
+| **High Performance** | ClickHouse offers for its lightning-fast query processing, capable of querying billions of rows in milliseconds. It achieves this through a fully parallelized query pipeline and vectorized query execution engine, which processes multiple rows simultaneously at the CPU level, maximizing efficiency. |
+| **Scalability** | Designed for horizontal scalability, ClickHouse allows users to add more servers (nodes) to a cluster to handle increasing data volumes and query loads, distributing data and queries across them. Performance scales linearly with the addition of each new server, enabling it to easily handle petabytes of data. |
+| **Real-time data ingestion** | It is built for continuous data ingestion, supporting high rates of inserts and merges (billions of rows per second, gigabytes per second) without disrupting ongoing queries or analytics. This capability is crucial for environments where data arrives in a constant stream, such as from IoT devices or application logs, ensuring that ML models are fueled with the most up-to-date information. |
+| **Specialized data types & functions** | In addition to standard SQL data types, syntax and functions, ClickHouse offers a host of additional specialised data types and functions suited for ML use cases. Some of these include Array functions which natively support vector operations, distance calculations, array manipulations; Native JSON support for efficient processing of semi-structured data common to ML feature stores; Approximate algorithms like HyperLogLog, quantiles, and sampling functions for large-scale statistical analysis or numeric indexed vectors for vector aggregation and pointwise operations. |
+| **Extensive integration ecosystem** | ClickHouse's extensive integration ecosystem makes it exceptionally valuable for AI/ML applications by seamlessly connecting with every critical component of the ML toolchain—from Python/pandas and Jupyter for data science workflows, to Spark and Kafka for large-scale data processing, to Airflow for pipeline orchestration, and Grafana for model monitoring—eliminating the typical friction and data movement bottlenecks that plague multi-tool ML environments. |
+
+## How ClickHouse helps simplify the AI/ML Data Stack {#simplify-the-ai-ml-data-stack}
+
+ClickHouse streamlines the traditionally fragmented AI/ML data infrastructure
+by serving as a unified platform that handles multiple data management
+functions within a single high-performance system. Rather than maintaining
+separate specialized data stores for different ML tasks, ClickHouse provides
+a consolidated foundation for analytics, machine learning workloads, and
+data preparation and exploration.
+
+ClickHouse natively integrates with object storage like S3, GCP and Azure. It
+integrates with data lakes, enabling direct querying of data in popular formats
+like Iceberg, Delta Lake, and Hudi, positioning it as a comprehensive access and
+computation layer for ML operations. This unified approach tackles challenges
+faced in MLOps by reducing the complexity that typically stems from managing
+multiple systems.
+
+Data fragmentation across separate stores creates many operational pain
+points such as escalating costs, increased failure risks, and the need for
+duplicate transformation logic between training and inference pipelines.
+ClickHouse addresses these issues by consolidating all of this functionality
+into a single system, particularly for feature engineering where consistency
+between offline training and online serving is critical.
+
+Through its integration with data catalogs including Unity, AWS Glue, Polaris,
+and Hive Metastore, ClickHouse minimizes data movement and duplication. This
+architectural approach ensures that feature definitions remain consistent
+across models and experiments, reducing the risk of discrepancies that can
+undermine model performance. For MLOps teams, this
+translates to less time managing infrastructure complexity and more focus on
+core activities like model development and deployment, ultimately accelerating
+the ML lifecycle while improving the economic viability of AI initiatives at
+scale.
+
+## ClickHouse across the AI/ML Lifecycle {#clickhouse-across-the-ai-ml-lifecycle}
+
+ClickHouse's capabilities span the entire AI/ML lifecycle, providing a robust and
+efficient platform from the very first stages of data preparation all the way to
+model deployment and monitoring.
+
+| Area | Description |
+|----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|
+| [Data preparation and feature engineering](/get-started/cloud/use-cases/AI_ML/feature_engineering) | Learn how ClickHouse is used in the data preparation and feature engineering stages of the AI/ML pipeline |
+| [Agent-facing analytics](/cloud/get-started/cloud/use-cases/AI_ML/agent_facing_analytics) | Learn how ClickHouse enables agentic facing analytics |
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/02_data_prep_feature_engineering.md b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/02_data_prep_feature_engineering.md
new file mode 100644
index 00000000000..6c1678ec2bb
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/02_data_prep_feature_engineering.md
@@ -0,0 +1,240 @@
+---
+slug: /cloud/get-started/cloud/use-cases/AI_ML/feature_engineering
+title: 'Data preparation and feature engineering'
+keywords: ['use cases', 'Machine Learning', 'Generative AI']
+sidebar_label: 'Data preparation and feature engineering'
+---
+
+import Image from '@theme/IdealImage';
+import ml_ai_01 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_01.png';
+import ml_ai_02 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_02.png';
+import ml_ai_03 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_03.png';
+import ml_ai_04 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_04.png';
+
+## Data preparation and feature engineering {#data-preparation-and-feature-engineering}
+
+Data preparation bridges raw data and effective machine learning or AI
+models, typically consuming the majority of time in AI/ML projects and
+directly determining model success. It sits between initial data collection
+and model development in the lifecycle, transforming messy, inconsistent
+real-world data into clean, structured formats that algorithms can
+effectively learn from. `clickhouse-local`, `chDB` (an in-process version
+of ClickHouse for Python), open-source ClickHouse server or ClickHouse Cloud
+allow developers and data scientists to work with ever-growing amounts of
+data interactively and efficiently for ad-hoc querying, data cleaning, and
+feature engineering.
+
+### What is a feature store? {#what-is-a-feature-store}
+
+In its simplest form, a feature store is a centralized repository for storing
+and managing feature data and acting as the source of truth. By providing
+APIs that allow the storage, versioning, and retrieval of features, feature
+stores aim to provide a consistent view of features for training and
+inference from development to production environments. Whether a custom-built
+in-house solution or off-the-shelf product, actual product-level features
+provided by a feature store will vary, with some providing a complete data
+platform capable of aggregating data into features and even providing a
+compute engine for the training of models.
+
+Irrespective of how many capabilities are inherent to the feature store, all
+provide abstractions to the underlying data with which data scientists and
+engineers will be familiar. As well as delivering data as versioned
+entities, features, and classes, most expose concepts of feature groups,
+training sets, batching, streaming, and point-in-time queries (such as the
+ability to identify the values for a feature at either a specific point,
+e.g. the latest value).
+
+
+
+### Why might you use one? {#why-use-one}
+
+In theory, a feature store ties disparate systems and capabilities together to
+form a complete ML data layer, capable of both acting as the source of truth for
+training data and also being used to provide context when predictions are being
+made.
+
+While the exact capabilities they provide vary, the objectives remain the same:
+
+- **improve collaboration and reusability** between data scientists and data
+engineers by centralizing features and their transformation logic
+- **reduce model iteration time** during both experimentation and deployment by
+allowing feature re-use at both training and inference time
+- **governance and compliance** through rules and versioning which can restrict
+model access to sensitive data (and features)
+- **improve model performance and reliability** by abstracting the complexity of
+data engineering from data scientists and ensuring they work with only quality
+consistent features delivered through an API.
+
+While these represent a very high-level overview of some of the problems a
+feature store solves, the predominant benefit here is the ability to share
+features across teams and utilize the same data for training and inference.
+
+Feature stores also address a number of other challenges present in MLOps,
+such as how to backfill feature data, handle incremental updates to the
+source data (to update features), or monitor new data for drift. More
+recently, they have also integrated vector databases to act as the
+orchestration layer for RAG pipelines or to help find similar features
+using embeddings - a useful capability during some model training.
+
+### Components of a feature store {#components-of-a-feature-store}
+
+Before we explore how ClickHouse might fit into a feature store, understanding
+the common components is helpful for context. Typically, a feature store will
+consist of up to 4 main components:
+
+
+
+- **Data source** - While this can be as simple as a CSV file, it is often a
+database or data lake with files in a format like Iceberg and accessible
+through a query engine.
+
+- **Transformation engine (optional)** - Raw data needs to be transformed into
+features. In a simple case, a feature can be correlated with a column's
+values. More likely, it is the result of a transformation process involving
+joins, aggregations, and expressions changing the structure and/or type of
+column values. Some feature stores (see Types of Feature Store) might
+provide built-in capabilities to achieve this; others may offload the work
+to local Python functions or, for larger datasets, the database (maybe even
+using dbt under the hood) via materializations, or a processing engine such
+as Spark. With ClickHouse, this is achievable through Materialized Views.
+Features that are continuously subject to update often require some form of
+streaming pipeline, typically implemented with tooling such as Flink or
+Spark Streaming. Normally, some form of directed acyclic graph (DAG) is
+required, if these transformations are chained, and dependencies need to be
+tracked.
+
+- **Offline (Training) Store** - The offline store holds the features
+resulting from the previous transformation pipeline. These features are
+typically grouped as entities and associated with a label (the target
+prediction). Usually, models need to consume these features selectively,
+either iteratively or through aggregations, potentially multiple times and
+in random order. Models often require more than one feature, requiring
+features to be grouped together in a "feature group" - usually by an entity
+ID and time dimension. This requires the offline store to be able to deliver
+the correct version of a feature and label for a specific point in time.
+This "point-in-time correctness" is often fundamental to models, which need
+to be trained incrementally.
+
+- **Online (Interference) Store** - Once a model has been trained, it can be
+deployed and used for making predictions. This inference process requires
+information that is only available at the moment of prediction, e.g. the
+user's ID for a transaction. However, it can also require features for the
+prediction, which may be precomputed, e.g. features representing historical
+purchases. These are often too expensive to compute at inference time, even
+for ClickHouse. These features need to be served in latency-sensitive
+situations, based on the most recent version of the data, especially in
+scenarios, where predictions need to be made in real-time, such as fraud
+detection. Features may be materialized from the offline store to the online
+store for serving.
+
+### Feature stores and ClickHouse {#feature-stores-and-clickhouse}
+
+As a real-time data warehouse, ClickHouse can fulfill the role of a number
+of the components - potentially significantly simplifying the feature store
+architecture.
+
+
+
+Specifically, ClickHouse can act as a:
+
+- **Data source** - With the ability to query or ingest data in over 70
+different file formats, including data lake formats such as Iceberg and
+Delta Lake, ClickHouse makes an ideal long-term store holding or querying
+data. By separating storage and compute using object storage, ClickHouse
+Cloud additionally allows data to be held indefinitely - with compute scaled
+down or made completely idle to minimize costs. Flexible codecs, coupled
+with column-oriented storage and ordering of data on disk, maximize
+compression rates, thus minimizing the required storage. Users can easily
+combine ClickHouse with data lakes, with built-in functions to query data in
+place on object storage.
+
+- **Transformation engine** - SQL provides a natural means of declaring data
+ transformations. When extended with ClickHouse's analytical and statistical
+ functions, these transformations become succinct and optimized. As well as
+ applying to either ClickHouse tables, in cases where ClickHouse is used as a
+ data store, table functions allow SQL queries to be written against data
+ stored in formats such as Parquet, on-disk or object storage, or even other
+ data stores such as Postgres and MySQL. A completely parallelization query
+ execution engine, combined with a column-oriented storage format, allows
+ ClickHouse to perform aggregations over PBs of data in seconds - unlike
+ transformations on in memory data frames, users are not memory-bound.
+ Furthermore, materialized views allow data to be transformed at insert time,
+ thus overloading compute to data load time from query time. These views can
+ exploit the same range of analytical and statistical functions ideal for
+ data analysis and summarization. Should any of ClickHouse's existing
+ analytical functions be insufficient or custom libraries need to be
+ integrated, users can also utilize User Defined Functions (UDFs).
+
+ While users can transform data directly in ClickHouse or prior to insertion
+ using SQL queries, ClickHouse can also be used in programming environments
+ such as Python via chDB. This allows embedded ClickHouse to be exposed as a
+ Python module and used to transform and manipulate large data frames within
+ notebooks. This allows transformation work to be performed client-side by
+ data engineers, with results potentially materialized as feature tables in
+ a centralized ClickHouse instance.
+
+- **Offline store** - With the above capabilities to read data from multiple
+ sources and apply transformations via SQL, the results of these queries can
+ also be persisted in ClickHouse via `INSERT INTO SELECT` statements. With
+ transformations often grouped by an entity ID and returning a number of
+ columns as results, ClickHouse's schema inference can automatically detect
+ the required types from these results and produce an appropriate table
+ schema to store them. Functions for generating random numbers and
+ statistical sampling allow data to be efficiently iterated and scaled at
+ millions or rows per second for feeding to model training pipelines.
+
+ Often, features are represented in tables with a timestamp indicating the
+ value for an entity and feature at a specific point in time. As described
+ earlier, training pipelines often need the state of features at specific
+ points in time and in groups. ClickHouse's sparse indices allow fast
+ filtering of data to satisfy point-in-time queries and feature selection
+ filters. While other technologies such as Spark, Redshift, and BigQuery
+ rely on slow stateful windowed approaches to identify the state of features
+ at a specific point in time, ClickHouse supports the `ASOF` (as-of-this-time)
+ `LEFT JOIN` query and `argMax` function. As well as simplifying syntax, this
+ approach is highly performant on large datasets through the use of a sort
+ and merge algorithm. This allows feature groups to be built quickly,
+ reducing data preparation time prior to training.
+
+
+
+- **Online store** - As a real-time analytics database, ClickHouse can serve highly
+ concurrent query workloads at low latency. While this requires data to be typically
+ denormalized, this aligns with the storage of feature groups used at both training
+ and inference time. Importantly, ClickHouse is able to deliver this query
+ performance while being subject to high write workloads thanks to its log-structured
+ merge tree. These properties are required in an online store to keep features
+ up-to-date. Since the features are already available within the offline store,
+ they can easily be materialized to new tables within either the same ClickHouse
+ cluster or a different instance via existing capabilities, e.g., [`remoteSecure`](/sql-reference/table-functions/remote#parameters).
+
+ :::note
+ For use cases requiring very high request concurrency i.e., thousands per second,
+ and very low latency, we recommend users still consider a dedicated data store,
+ e.g., Redis, designed for these workloads.
+ :::
+
+- **Vector database** - ClickHouse has built-in support for vector embeddings
+ through floating point arrays. These can be searched and compared through
+ [distance functions](https://clickhouse.com/docs/en/sql-reference/functions/distance-functions#cosinedistance),
+ allowing ClickHouse to be used as a vector database. This linear comparison can
+ be easily scaled and parallelized for larger datasets. Additionally, ClickHouse
+ has maturing support for [Approximate Nearest Neighbour (ANN)](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/annindexes)
+ indices, as well as [hyperplane indexes using pure-SQL](https://clickhouse.com/blog/approximate-nearest-neighbour-ann-with-sql-powered-local-sensitive-hashing-lsh-random-projections),
+ as required for larger vector datasets.
+
+By satisfying each of the above roles, ClickHouse can dramatically simplify
+the feature store architecture. Aside from the simplification of operations,
+this architecture allows features to be built and deployed faster. A single
+instance of ClickHouse can be scaled vertically to handle PBs of data, with
+additional instances simply added for high availability. This minimizes the
+movement of data between data stores, minimizing the typical network
+bottlenecks. ClickHouse Cloud expands on this further by storing only a
+single copy of the data in object storage and allowing nodes to be scaled
+vertically or horizontally dynamically in response to load as required.
+
+The above architecture still requires several key components not satisfied
+by ClickHouse: a streaming engine such as Kafka + Flink and a framework to
+provide compute for model training. A means of hosting models is also
+required. For simplicity, we assume the use of a cloud-hosted solution to
+these, such as Confluent and Amazon SageMaker.
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/03_agent_facing_analytics.md b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/03_agent_facing_analytics.md
new file mode 100644
index 00000000000..0cb2e68a439
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/03_agent_facing_analytics.md
@@ -0,0 +1,168 @@
+---
+slug: /cloud/get-started/cloud/use-cases/AI_ML/agent_facing_analytics
+title: 'Agent facing analytics'
+keywords: ['use cases', 'Machine Learning', 'Generative AI', 'agent facing analytics', 'agents']
+sidebar_label: 'Agent facing analytics'
+---
+
+import Image from '@theme/IdealImage';
+import ml_ai_05 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_05.png';
+import ml_ai_06 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_06.png';
+import ml_ai_07 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_07.png';
+import ml_ai_08 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_08.png';
+import ml_ai_09 from '@site/static/images/cloud/onboard/discover/use_cases/ml_ai_09.png';
+
+## Agent-facing analytics concepts {#agent-facing-analytics}
+
+### What are "agents"? {#agents}
+
+One can think of AI agents as digital assistants that have evolved beyond
+simple task execution (or function calling): they can understand context,
+make decisions, and take meaningful actions toward specific goals. They
+operate in a "sense-think-act" loop (see ReAct agents), processing various
+inputs (text, media, data), analyzing situations, and then doing something
+useful with that information. Most importantly, depending on the application
+domain, they can theoretically operate at various levels of autonomy,
+requiring or not human supervision.
+
+The game changer here has been the advent of Large Language Models (LLMs).
+While we had the notion of AI agents for quite a while, LLMs like the GPT
+series have given them a massive upgrade in their ability to "understand"
+and communicate. It's as if they've suddenly become more fluent in "human"
+aka. able to grasp requests and respond with relevant contextual information
+drawn from the model's training.
+
+### AI agents superpowers: “Tools” {#tools}
+
+These agents really shine through their access to “tools”. Tools enhance AI agents
+by giving them abilities to perform tasks. Rather than just being conversational
+interfaces, they can now get things done whether it’s crunching numbers, searching
+for information, or managing customer communications. Think of it as the difference
+between having someone who can describe how to solve a problem and someone who
+can actually solve it.
+
+For example, ChatGPT is now shipped by default with a search tool. This
+integration with search providers allows the model to pull current information
+from the web during conversations. This means it can fact-check responses, access
+recent events and data, and provide up-to-date information rather than relying
+solely on its training data.
+
+
+
+Tools can also be used to simplify the implementation of Retrieval-Augmented
+Generation (RAG) pipelines. Instead of relying only on what an AI model
+learned during training, RAG lets the model pull in relevant information
+before formulating a response. Here's an example: Using an AI assistant to
+help with customer support (e.g. Salesforce AgentForce, ServiceNow AI
+Agents). Without RAG, it would only use its general training to answer
+questions. But with RAG, when a customer asks about the latest product
+feature, the system retrieves the most recent documentation, release notes,
+and historical support tickets before crafting its response. This means that
+answers are now grounded in the latest information available to the AI
+model.
+
+### Reasoning models {#reasoning-models}
+
+Another development in the AI space, and perhaps one of the most
+interesting, is the emergence of reasoning models. Systems like OpenAI o1,
+Anthropic Claude, or DeepSeek-R1 take a more methodical approach by
+introducing a "thinking" step before responding to a prompt. Instead of
+generating the answer straightaway, reasoning models use prompting
+techniques like Chain-of-Thought (CoT) to analyze problems from multiple
+angles, break them down into steps, and use the tools available to them to
+gather contextual information when needed.
+
+This represents a shift toward more capable systems that can handle more
+complex tasks through a combination of reasoning and practical tools. One of
+the latest examples in this area is the introduction of OpenAI's deep
+research, an agent that can autonomously conduct complex multi-step research
+tasks online. It processes and synthesizes information from various sources,
+including text, images, and PDFs, to generate comprehensive reports within five
+to thirty minutes, a task that would traditionally take a human several hours.
+
+
+
+## Real-time analytics for AI agents {#real-time-analytics-for-ai-agents}
+
+Let's take the case of an agentic AI assistant with access to a
+real-time analytics database containing the company's CRM data. When a user asks
+about the latest (up-to-the-minute) sales trends, the AI assistant queries the
+connected data source. It iteratively analyzes the data to identify meaningful
+patterns and trends, such as month-over-month growth, seasonal variations, or
+emerging product categories. Finally, it generates a natural language response
+explaining key findings, often with supporting visualizations. When the main
+interface is chat-based like in this case, performance matters since these
+iterative explorations trigger a series of queries that can scan large amounts of
+data to extract relevant insights.
+
+Some properties make real-time databases especially suitable for such
+workloads. For example, real-time analytics databases are designed to work
+with near real-time data, allowing them to process and deliver insights
+almost immediately as new data arrives. This is crucial for AI agents, as
+they can require up-to-date information to make (or help make) timely and
+relevant decisions.
+
+The core analytical capabilities are also important. Real-time analytics
+databases shine in performing complex aggregations and pattern detection
+across large datasets. Unlike operational databases focusing primarily on
+raw data storage or retrieval, these systems are optimized for analyzing
+vast amounts of information. This makes them particularly well-suited for AI
+agents that need to uncover trends, detect anomalies, and derive actionable
+insights.
+
+Real-time analytics databases are also expected to deliver fast
+performance for interactive querying, essential for chat-based interaction
+and high-frequency explorative workloads. They ensure consistent performance
+even with large data volumes and high query concurrency, enabling responsive
+dialogues and a smoother user experience.
+
+Finally, real-time analytics databases often serve as the ultimate "data
+sinks" effectively consolidating valuable domain-specific data in a single
+location. By co-locating essential data across different sources and formats
+under the same tent, these databases ensure that AI agents have access to a
+unified view of the domain information, decoupled from operational systems.
+
+
+
+
+
+These properties already empower real-time databases to play a vital role
+in serving AI data retrieval use cases at scale (e.g. OpenAI's acquisition
+of Rockset). They can also enable AI agents to provide fast data-driven
+responses while offloading the heavy computational work.
+
+It positions the real-time analytics database as a preferred "context
+provider" for AI agents when it comes to insights.
+
+## AI agents as an emerging user persona {#ai-agents-as-an-emerging-user-persona}
+
+A useful way to think about AI agents leveraging real-time analytics databases
+is to perceive them as a new category of users, or in product manager speak:
+a user persona.
+
+
+
+From the database perspective, we can expect a potentially unlimited number of
+AI agents, concurrently running a large number of queries on behalf of users,
+or in autonomy, to perform investigations, refine iterative research and insights,
+and execute tasks.
+
+Over the years, real-time databases have had the time to adapt to human
+interactive users, directly connected to the system or via a middleware
+application layer. Classic personas examples include database administrators,
+business analysts, data scientists, or software developers building applications
+on top of the database. The industry has progressively learned their usage
+patterns and requirements and organically, provided the interfaces, the operators,
+the UIs, the formats, the clients, and the performance to satisfy their various
+use cases.
+
+The question now becomes, are we ready to accommodate the AI agent's workloads?
+What specific features do we need to re-think or create from scratch for these
+usage patterns?
+
+ClickHouse is rapidly providing answers to some of these questions through a host
+of features aimed at providing a feature-complete AI experience.
+
+## ClickHouse.ai {#clickhouse-ai}
+
+For more information about features coming soon to ClickHouse Cloud, see [ClickHouse.ai](https://clickhouse.com/clickhouse-ai/).
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/_category_.json b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/_category_.json
new file mode 100644
index 00000000000..7b4415fff32
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/_category_.json
@@ -0,0 +1,6 @@
+{
+ "position": 2.5,
+ "label": "Machine Learning and GenAI",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/onboard/01_discover/02_use_cases/_category_.json b/docs/cloud/onboard/01_discover/02_use_cases/_category_.json
new file mode 100644
index 00000000000..c066b9c4fdc
--- /dev/null
+++ b/docs/cloud/onboard/01_discover/02_use_cases/_category_.json
@@ -0,0 +1,6 @@
+{
+ "position": 2.5,
+ "label": "Use cases",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/manage/cloud-tiers.md b/docs/cloud/onboard/01_discover/04_cloud-tiers.md
similarity index 100%
rename from docs/cloud/manage/cloud-tiers.md
rename to docs/cloud/onboard/01_discover/04_cloud-tiers.md
diff --git a/docs/integrations/migration/overview.md b/docs/cloud/onboard/02_migrate/01_migration_guides/01_overview.md
similarity index 100%
rename from docs/integrations/migration/overview.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/01_overview.md
diff --git a/docs/migrations/postgres/overview.md b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/01_overview.md
similarity index 97%
rename from docs/migrations/postgres/overview.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/01_overview.md
index ca1d195b914..b8be25dcc58 100644
--- a/docs/migrations/postgres/overview.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/01_overview.md
@@ -1,10 +1,13 @@
---
slug: /migrations/postgresql/overview
-title: 'Migrating from PostgreSQL to ClickHouse'
+title: 'Comparing PostgreSQL and ClickHouse'
description: 'A guide to migrating from PostgreSQL to ClickHouse'
keywords: ['postgres', 'postgresql', 'migrate', 'migration']
+sidebar_label: 'Overview'
---
+# Comparing ClickHouse and PostgreSQL
+
## Why use ClickHouse over Postgres? {#why-use-clickhouse-over-postgres}
TLDR: Because ClickHouse is designed for fast analytics, specifically `GROUP BY` queries, as an OLAP database whereas Postgres is an OLTP database designed for transactional workloads.
diff --git a/docs/migrations/postgres/appendix.md b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/appendix.md
similarity index 100%
rename from docs/migrations/postgres/appendix.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/appendix.md
diff --git a/docs/migrations/postgres/index.md b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/index.md
similarity index 100%
rename from docs/migrations/postgres/index.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/index.md
diff --git a/docs/migrations/postgres/dataset.md b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/01_migration_guide_part1.md
similarity index 99%
rename from docs/migrations/postgres/dataset.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/01_migration_guide_part1.md
index 2574252e1da..fc97c8a76dc 100644
--- a/docs/migrations/postgres/dataset.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/01_migration_guide_part1.md
@@ -4,6 +4,7 @@ title: 'Migrating data'
description: 'Dataset example to migrate from PostgreSQL to ClickHouse'
keywords: ['Postgres']
show_related_blogs: true
+sidebar_label: 'Part 1'
---
import postgres_stackoverflow_schema from '@site/static/images/migrations/postgres-stackoverflow-schema.png';
diff --git a/docs/migrations/postgres/rewriting-queries.md b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/02_migration_guide_part2.md
similarity index 99%
rename from docs/migrations/postgres/rewriting-queries.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/02_migration_guide_part2.md
index 451d1b37d9a..a77b38ed5e5 100644
--- a/docs/migrations/postgres/rewriting-queries.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/02_migration_guide_part2.md
@@ -3,6 +3,7 @@ slug: /migrations/postgresql/rewriting-queries
title: 'Rewriting PostgreSQL Queries'
keywords: ['postgres', 'postgresql', 'rewriting queries']
description: 'Part 2 of a guide on migrating from PostgreSQL to ClickHouse'
+sidebar_label: 'Part 2'
---
> This is **Part 2** of a guide on migrating from PostgreSQL to ClickHouse. Using a practical example, it demonstrates how to efficiently carry out the migration with a real-time replication (CDC) approach. Many of the concepts covered are also applicable to manual bulk data transfers from PostgreSQL to ClickHouse.
diff --git a/docs/migrations/postgres/data-modeling-techniques.md b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/03_migration_guide_part3.md
similarity index 99%
rename from docs/migrations/postgres/data-modeling-techniques.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/03_migration_guide_part3.md
index f864bd8fb3e..db4468289d8 100644
--- a/docs/migrations/postgres/data-modeling-techniques.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/03_migration_guide_part3.md
@@ -1,9 +1,10 @@
---
slug: /migrations/postgresql/data-modeling-techniques
title: 'Data modeling techniques'
-description: 'Data modeling for migrating from PostgreSQL to ClickHouse'
+description: 'Part 3 of a guide on migrating from PostgreSQL to ClickHouse'
keywords: ['postgres', 'postgresql']
show_related_blogs: true
+sidebar_label: 'Part 3'
---
import postgres_b_tree from '@site/static/images/migrations/postgres-b-tree.png';
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/_category_.json b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/_category_.json
new file mode 100644
index 00000000000..ad514aeb890
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/migration_guide/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Migration guide",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/migrations/bigquery/equivalent-concepts.md b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/01_overview.md
similarity index 98%
rename from docs/migrations/bigquery/equivalent-concepts.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/01_overview.md
index ee330a0610c..729112ee81e 100644
--- a/docs/migrations/bigquery/equivalent-concepts.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/01_overview.md
@@ -4,12 +4,13 @@ slug: /migrations/bigquery/biquery-vs-clickhouse-cloud
description: 'How BigQuery differs from ClickHouse Cloud'
keywords: ['BigQuery']
show_related_blogs: true
+sidebar_label: 'Overview'
---
import bigquery_1 from '@site/static/images/migrations/bigquery-1.png';
import Image from '@theme/IdealImage';
-# BigQuery vs ClickHouse Cloud: equivalent and different concepts
+# Comparing ClickHouse Cloud and BigQuery
## Resource organization {#resource-organization}
@@ -21,7 +22,7 @@ The way resources are organized in ClickHouse Cloud is similar to [BigQuery's re
Similar to BigQuery, organizations are the root nodes in the ClickHouse cloud resource hierarchy. The first user you set up in your ClickHouse Cloud account is automatically assigned to an organization owned by the user. The user may invite additional users to the organization.
-### BigQuery projects vs ClickHouse Cloud services {#bigquery-projects-vs-clickhouse-cloud-services}
+### BigQuery Projects vs ClickHouse Cloud Services {#bigquery-projects-vs-clickhouse-cloud-services}
Within organizations, you can create services loosely equivalent to BigQuery projects because stored data in ClickHouse Cloud is associated with a service. There are [several service types available](/cloud/manage/cloud-tiers) in ClickHouse Cloud. Each ClickHouse Cloud service is deployed in a specific region and includes:
@@ -29,15 +30,15 @@ Within organizations, you can create services loosely equivalent to BigQuery pro
2. An object storage folder where the service stores all the data.
3. An endpoint (or multiple endpoints created via ClickHouse Cloud UI console) - a service URL that you use to connect to the service (for example, `https://dv2fzne24g.us-east-1.aws.clickhouse.cloud:8443`)
-### BigQuery datasets vs ClickHouse Cloud databases {#bigquery-datasets-vs-clickhouse-cloud-databases}
+### BigQuery Datasets vs ClickHouse Cloud Databases {#bigquery-datasets-vs-clickhouse-cloud-databases}
ClickHouse logically groups tables into databases. Like BigQuery datasets, ClickHouse databases are logical containers that organize and control access to table data.
-### BigQuery folders {#bigquery-folders}
+### BigQuery Folders {#bigquery-folders}
ClickHouse Cloud currently has no concept equivalent to BigQuery folders.
-### BigQuery slot reservations and quotas {#bigquery-slot-reservations-and-quotas}
+### BigQuery Slot reservations and Quotas {#bigquery-slot-reservations-and-quotas}
Like BigQuery slot reservations, you can [configure vertical and horizontal autoscaling](/manage/scaling#configuring-vertical-auto-scaling) in ClickHouse Cloud. For vertical autoscaling, you can set the minimum and maximum size for the memory and CPU cores of the compute nodes for a service. The service will then scale as needed within those bounds. These settings are also available during the initial service creation flow. Each compute node in the service has the same size. You can change the number of compute nodes within a service with [horizontal scaling](/manage/scaling#manual-horizontal-scaling).
@@ -78,7 +79,7 @@ When presented with multiple options for ClickHouse types, consider the actual r
## Query acceleration techniques {#query-acceleration-techniques}
-### Primary and foreign keys and primary index {#primary-and-foreign-keys-and-primary-index}
+### Primary and Foreign keys and Primary index {#primary-and-foreign-keys-and-primary-index}
In BigQuery, a table can have [primary key and foreign key constraints](https://cloud.google.com/bigquery/docs/information-schema-table-constraints). Typically, primary and foreign keys are used in relational databases to ensure data integrity. A primary key value is normally unique for each row and is not `NULL`. Each foreign key value in a row must be present in the primary key column of the primary key table or be `NULL`. In BigQuery, these constraints are not enforced, but the query optimizer may use this information to optimize queries better.
diff --git a/docs/migrations/bigquery/migrating-to-clickhouse-cloud.md b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/02_migrating-to-clickhouse-cloud.md
similarity index 99%
rename from docs/migrations/bigquery/migrating-to-clickhouse-cloud.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/02_migrating-to-clickhouse-cloud.md
index 44f8c8c7d20..0118a912fec 100644
--- a/docs/migrations/bigquery/migrating-to-clickhouse-cloud.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/02_migrating-to-clickhouse-cloud.md
@@ -4,6 +4,7 @@ slug: /migrations/bigquery/migrating-to-clickhouse-cloud
description: 'How to migrate your data from BigQuery to ClickHouse Cloud'
keywords: ['BigQuery']
show_related_blogs: true
+sidebar_label: 'Migration guide'
---
import bigquery_2 from '@site/static/images/migrations/bigquery-2.png';
diff --git a/docs/migrations/bigquery/loading-data.md b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/03_loading-data.md
similarity index 96%
rename from docs/migrations/bigquery/loading-data.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/03_loading-data.md
index 8e2558fe073..0bfdff8b2eb 100644
--- a/docs/migrations/bigquery/loading-data.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/03_loading-data.md
@@ -24,7 +24,9 @@ Exporting data from BigQuery to ClickHouse is dependent on the size of your data
| [contracts](https://github.com/ClickHouse/examples/blob/main/ethereum/schemas/contracts.md) | 57,225,837 | 350 | 45.35GB | 16 sec | 1 hr 51 min | 39.4 secs |
| Total | 8.26 billion | 23,577 | 3.982TB | 8 min 3 sec | \> 6 days 5 hrs | 53 mins 45 secs |
-## 1. Export table data to GCS {#1-export-table-data-to-gcs}
+
+
+## Export table data to GCS {#1-export-table-data-to-gcs}
In this step, we utilize the [BigQuery SQL workspace](https://cloud.google.com/bigquery/docs/bigquery-web-ui) to execute our SQL commands. Below, we export a BigQuery table named `mytable` to a GCS bucket using the [`EXPORT DATA`](https://cloud.google.com/bigquery/docs/reference/standard-sql/other-statements) statement.
@@ -60,7 +62,7 @@ This approach has a number of advantages:
- Exports produce multiple files automatically, limiting each to a maximum of 1GB of table data. This is beneficial to ClickHouse since it allows imports to be parallelized.
- Parquet, as a column-oriented format, represents a better interchange format since it is inherently compressed and faster for BigQuery to export and ClickHouse to query
-## 2. Importing data into ClickHouse from GCS {#2-importing-data-into-clickhouse-from-gcs}
+## Importing data into ClickHouse from GCS {#2-importing-data-into-clickhouse-from-gcs}
Once the export is complete, we can import this data into a ClickHouse table. You can use the [ClickHouse SQL console](/integrations/sql-clients/sql-console) or [`clickhouse-client`](/interfaces/cli) to execute the commands below.
@@ -111,7 +113,7 @@ In the above query, we use the [`ifNull` function](/sql-reference/functions/func
Alternatively, you can `SET input_format_null_as_default=1` and any missing or NULL values will be replaced by default values for their respective columns, if those defaults are specified.
:::
-## 3. Testing successful data export {#3-testing-successful-data-export}
+## Testing successful data export {#3-testing-successful-data-export}
To test whether your data was properly inserted, simply run a `SELECT` query on your new table:
@@ -121,6 +123,8 @@ SELECT * FROM mytable LIMIT 10;
To export more BigQuery tables, simply redo the steps above for each additional table.
+
+
## Further reading and support {#further-reading-and-support}
In addition to this guide, we also recommend reading our blog post that shows [how to use ClickHouse to speed up BigQuery and how to handle incremental imports](https://clickhouse.com/blog/clickhouse-bigquery-migrating-data-for-realtime-queries).
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/_04_sql_translation_reference.md b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/_04_sql_translation_reference.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/docs/migrations/bigquery/index.md b/docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/index.md
similarity index 100%
rename from docs/migrations/bigquery/index.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/03_bigquery/index.md
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/01_overview.md b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/01_overview.md
new file mode 100644
index 00000000000..980cfed6061
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/01_overview.md
@@ -0,0 +1,184 @@
+---
+sidebar_label: 'Overview'
+slug: /migrations/snowflake-overview
+description: 'Migrating from Snowflake to ClickHouse'
+keywords: ['Snowflake']
+title: 'Migrate from Snowflake to ClickHouse'
+show_related_blogs: true
+---
+
+import snowflake_architecture from '@site/static/images/cloud/onboard/discover/use_cases/snowflake_architecture.png';
+import cloud_architecture from '@site/static/images/cloud/onboard/discover/use_cases/cloud_architecture.png';
+import Image from '@theme/IdealImage';
+
+# Snowflake to ClickHouse migration
+
+> This document provides an introduction to migrating data from Snowflake to ClickHouse.
+
+Snowflake is a cloud data warehouse primarily focused on migrating legacy on-premise
+data warehousing workloads to the cloud. It is well-optimized for executing
+long-running reports at scale. As datasets migrate to the cloud, data owners start
+thinking about how else they can extract value from this data, including using
+these datasets to power real-time applications for internal and external use cases.
+When this happens, they often realize they need a database optimized for
+powering real-time analytics, like ClickHouse.
+
+## Comparison {#comparison}
+
+In this section, we'll compare the key features of ClickHouse and Snowflake.
+
+### Similarities {#similarities}
+
+Snowflake is a cloud-based data warehousing platform that provides a scalable
+and efficient solution for storing, processing, and analyzing large amounts of
+data.
+Like ClickHouse, Snowflake is not built on existing technologies but relies
+on its own SQL query engine and custom architecture.
+
+Snowflake’s architecture is described as a hybrid between a shared-storage (shared-disk)
+architecture and a shared-nothing architecture. A shared-storage architecture is
+one where data is both accessible from all compute nodes using object
+stores such as S3. A shared-nothing architecture is one where each compute node
+stores a portion of the entire data set locally to respond to queries. This, in
+theory, delivers the best of both models: the simplicity of a shared-disk
+architecture and the scalability of a shared-nothing architecture.
+
+This design fundamentally relies on object storage as the primary storage medium,
+which scales almost infinitely under concurrent access while providing high
+resilience and scalable throughput guarantees.
+
+The image below from [docs.snowflake.com](https://docs.snowflake.com/en/user-guide/intro-key-concepts)
+shows this architecture:
+
+
+
+Conversely, as an open-source and cloud-hosted product, ClickHouse can be deployed
+in both shared-disk and shared-nothing architectures. The latter is typical for
+self-managed deployments. While allowing for CPU and memory to be easily scaled,
+shared-nothing configurations introduce classic data management challenges and
+overhead of data replication, especially during membership changes.
+
+For this reason, ClickHouse Cloud utilizes a shared-storage architecture that is
+conceptually similar to Snowflake. Data is stored once in an object store
+(single copy), such as S3 or GCS, providing virtually infinite storage with
+strong redundancy guarantees. Each node has access to this single copy of the
+data as well as its own local SSDs for cache purposes. Nodes can, in turn, be
+scaled to provide additional CPU and memory resources as required. Like Snowflake,
+S3’s scalability properties address the classic limitation of shared-disk
+architectures (disk I/O and network bottlenecks) by ensuring the I/O throughput
+available to current nodes in a cluster is not impacted as additional nodes are
+added.
+
+
+
+### Differences {#differences}
+
+Aside from the underlying storage formats and query engines, these architectures
+differ in a few subtle ways:
+
+* Compute resources in Snowflake are provided through a concept of [warehouses](https://docs.snowflake.com/en/user-guide/warehouses).
+ These consist of a number of nodes, each of a set size. While Snowflake
+ doesn't publish the specific architecture of their warehouses, it is
+ [generally understood](https://select.dev/posts/snowflake-warehouse-sizing)
+ that each node consists of 8 vCPUs, 16GiB, and 200GB of local storage (for cache).
+ The number of nodes depends on a t-shirt size, e.g. an x-small has one node,
+ a small 2, medium 4, large 8, etc. These warehouses are independent of the data
+ and can be used to query any database residing on object storage. When idle
+ and not subjected to query load, warehouses are paused - resuming when a query
+ is received. While storage costs are always reflected in billing, warehouses
+ are only charged when active.
+
+* ClickHouse Cloud utilizes a similar principle of nodes with local cache
+ storage. Rather than t-shirt sizes, users deploy a service with a total
+ amount of compute and available RAM. This, in turn, transparently
+ auto-scales (within defined limits) based on the query load - either
+ vertically by increasing (or decreasing) the resources for each node or
+ horizontally by raising/lowering the total number of nodes. ClickHouse
+ Cloud nodes currently have a 1 CPU-to-memory ratio, unlike Snowflake's 1.
+ While a looser coupling is possible, services are currently coupled to the
+ data, unlike Snowflake warehouses. Nodes will also pause if idle and
+ resume if subjected to queries. Users can also manually resize services if
+ needed.
+
+* ClickHouse Cloud's query cache is currently node specific, unlike
+ Snowflake's, which is delivered at a service layer independent of the
+ warehouse. Based on benchmarks, ClickHouse Cloud's node cache outperforms
+ Snowflake's.
+
+* Snowflake and ClickHouse Cloud take different approaches to scaling to
+ increase query concurrency. Snowflake addresses this through a feature
+ known as [multi-cluster warehouses](https://docs.snowflake.com/en/user-guide/warehouses-multicluster#benefits-of-multi-cluster-warehouses).
+ This feature allows users to add clusters to a warehouse. While this offers no
+ improvement to query latency, it does provide additional parallelization and
+ allows higher query concurrency. ClickHouse achieves this by adding more memory
+ and CPU to a service through vertical or horizontal scaling. We do not explore the
+ capabilities of these services to scale to higher concurrency in this blog,
+ focusing instead on latency, but acknowledge that this work should be done
+ for a complete comparison. However, we would expect ClickHouse to perform
+ well in any concurrency test, with Snowflake explicitly limiting the number
+ of concurrent queries allowed for a [warehouse to 8 by default](https://docs.snowflake.com/en/sql-reference/parameters#max-concurrency-level).
+ In comparison, ClickHouse Cloud allows up to 1000 queries to be executed per
+ node.
+
+* Snowflake's ability to switch compute size on a dataset, coupled with fast
+ resume times for warehouses, makes it an excellent experience for ad hoc
+ querying. For data warehouse and data lake use cases, this provides an
+ advantage over other systems.
+
+### Real-time analytics {#real-time-analytics}
+
+Based on public [benchmark](https://benchmark.clickhouse.com/#system=+%E2%98%81w|%EF%B8%8Fr|C%20c|nfe&type=-&machine=-ca2|gl|6ax|6ale|3al&cluster_size=-&opensource=-&tuned=+n&metric=hot&queries=-) data,
+ClickHouse outperforms Snowflake for real-time analytics applications in the following areas:
+
+* **Query latency**: Snowflake queries have a higher query latency even
+ when clustering is applied to tables to optimize performance. In our
+ testing, Snowflake requires over twice the compute to achieve equivalent
+ ClickHouse performance on queries where a filter is applied that is part
+ of the Snowflake clustering key or ClickHouse primary key. While
+ Snowflake's [persistent query cache](https://docs.snowflake.com/en/user-guide/querying-persisted-results)
+ offsets some of these latency challenges, this is ineffective in cases
+ where the filter criteria are more diverse. This query cache effectiveness
+ can be further impacted by changes to the underlying data, with cache
+ entries invalidated when the table changes. While this is not the case in
+ the benchmark for our application, a real deployment would require the new,
+ more recent data to be inserted. Note that ClickHouse's query cache is
+ node specific and not [transactionally consistent](https://clickhouse.com/blog/introduction-to-the-clickhouse-query-cache-and-design),
+ making it [better suited ](https://clickhouse.com/blog/introduction-to-the-clickhouse-query-cache-and-design)
+ to real-time analytics. Users also have granular control over its use
+ with the ability to control its use on a [per-query basis](/operations/settings/settings#use-query-cache),
+ its [precise size](/operations/settings/settings#query-cache-max-size-in-bytes),
+ whether a [query is cached](/operations/settings/settings#enable-writes-to-query-cache)
+ (limits on duration or required number of executions), and whether it is
+ only [passively used](https://clickhouse.com/blog/introduction-to-the-clickhouse-query-cache-and-design#using-logs-and-settings).
+
+* **Lower cost**: Snowflake warehouses can be configured to suspend after
+ a period of query inactivity. Once suspended, charges are not incurred.
+ Practically, this inactivity check can [only be lowered to 60s](https://docs.snowflake.com/en/sql-reference/sql/alter-warehouse).
+ Warehouses will automatically resume, within several seconds, once a query
+ is received. With Snowflake only charging for resources when a warehouse
+ is under use, this behavior caters to workloads that often sit idle, like
+ ad-hoc querying.
+
+ However, many real-time analytics workloads require ongoing real-time data
+ ingestion and frequent querying that doesn't benefit from idling (like
+ customer-facing dashboards). This means warehouses must often be fully
+ active and incurring charges. This negates the cost-benefit of idling as
+ well as any performance advantage that may be associated with Snowflake's
+ ability to resume a responsive state faster than alternatives. This active
+ state requirement, when combined with ClickHouse Cloud's lower per-second
+ cost for an active state, results in ClickHouse Cloud offering a
+ significantly lower total cost for these kinds of workloads.
+
+* **Predictable pricing of features:** Features such as materialized views
+ and clustering (equivalent to ClickHouse's ORDER BY) are required to reach
+ the highest levels of performance in real-time analytics use cases. These
+ features incur additional charges in Snowflake, requiring not only a
+ higher tier, which increases costs per credit by 1.5x, but also
+ unpredictable background costs. For instance, materialized views incur a
+ background maintenance cost, as does clustering, which is hard to predict
+ prior to use. In contrast, these features incur no additional cost in
+ ClickHouse Cloud, except additional CPU and memory usage at insert time,
+ typically negligible outside of high insert workload use cases. We have
+ observed in our benchmark that these differences, along with lower query
+ latencies and higher compression, result in significantly lower costs with
+ ClickHouse.
diff --git a/docs/migrations/snowflake.md b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/02_migration_guide.md
similarity index 77%
rename from docs/migrations/snowflake.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/02_migration_guide.md
index 38d3b8dfac1..468a8b6193b 100644
--- a/docs/migrations/snowflake.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/02_migration_guide.md
@@ -1,23 +1,27 @@
---
-sidebar_label: 'Snowflake'
-sidebar_position: 20
+sidebar_label: 'Migration guide'
slug: /migrations/snowflake
description: 'Migrating from Snowflake to ClickHouse'
keywords: ['Snowflake']
title: 'Migrating from Snowflake to ClickHouse'
-show_related_blogs: true
+show_related_blogs: false
---
import migrate_snowflake_clickhouse from '@site/static/images/migrations/migrate_snowflake_clickhouse.png';
import Image from '@theme/IdealImage';
-# Migrating from Snowflake to ClickHouse
+# Migrate from Snowflake to ClickHouse
-This guide shows how to migrate data from Snowflake to ClickHouse.
+> This guide shows you how to migrate data from Snowflake to ClickHouse.
-Migrating data between Snowflake and ClickHouse requires the use of an object store, such as S3, as an intermediate storage for transfer. The migration process also relies on using the commands `COPY INTO` from Snowflake and `INSERT INTO SELECT` of ClickHouse.
+Migrating data between Snowflake and ClickHouse requires the use of an object store,
+such as S3, as an intermediate storage for transfer. The migration process also
+relies on using the commands `COPY INTO` from Snowflake and `INSERT INTO SELECT`
+of ClickHouse.
-## 1. Exporting data from Snowflake {#1-exporting-data-from-snowflake}
+
+
+## Export data from Snowflake {#1-exporting-data-from-snowflake}
@@ -54,7 +58,7 @@ COPY INTO @external_stage/mydataset from mydataset max_file_size=157286400 heade
For a dataset around 5TB of data with a maximum file size of 150MB, and using a 2X-Large Snowflake warehouse located in the same AWS `us-east-1` region, copying data to the S3 bucket will take around 30 minutes.
-## 2. Importing to ClickHouse {#2-importing-to-clickhouse}
+## Import to ClickHouse {#2-importing-to-clickhouse}
Once the data is staged in intermediary object storage, ClickHouse functions such as the [s3 table function](/sql-reference/table-functions/s3) can be used to insert the data into a table, as shown below.
@@ -65,10 +69,10 @@ Assuming the following table target schema:
```sql
CREATE TABLE default.mydataset
(
- `timestamp` DateTime64(6),
- `some_text` String,
- `some_file` Tuple(filename String, version String),
- `complex_data` Tuple(name String, description String),
+ `timestamp` DateTime64(6),
+ `some_text` String,
+ `some_file` Tuple(filename String, version String),
+ `complex_data` Tuple(name String, description String),
)
ENGINE = MergeTree
ORDER BY (timestamp)
@@ -79,16 +83,16 @@ We can then use the `INSERT INTO SELECT` command to insert the data from S3 into
```sql
INSERT INTO mydataset
SELECT
- timestamp,
- some_text,
- JSONExtract(
- ifNull(some_file, '{}'),
- 'Tuple(filename String, version String)'
- ) AS some_file,
- JSONExtract(
- ifNull(complex_data, '{}'),
- 'Tuple(filename String, description String)'
- ) AS complex_data,
+ timestamp,
+ some_text,
+ JSONExtract(
+ ifNull(some_file, '{}'),
+ 'Tuple(filename String, version String)'
+ ) AS some_file,
+ JSONExtract(
+ ifNull(complex_data, '{}'),
+ 'Tuple(filename String, description String)'
+ ) AS complex_data,
FROM s3('https://mybucket.s3.amazonaws.com/mydataset/mydataset*.parquet')
SETTINGS input_format_null_as_default = 1, -- Ensure columns are inserted as default if values are null
input_format_parquet_case_insensitive_column_matching = 1 -- Column matching between source data and target table should be case insensitive
@@ -100,10 +104,12 @@ The `VARIANT` and `OBJECT` columns in the original Snowflake table schema will b
Nested structures such as `some_file` are converted to JSON strings on copy by Snowflake. Importing this data requires us to transform these structures to Tuples at insert time in ClickHouse, using the [JSONExtract function](/sql-reference/functions/json-functions#jsonextract) as shown above.
:::
-## 3. Testing successful data export {#3-testing-successful-data-export}
+## Test successful data export {#3-testing-successful-data-export}
To test whether your data was properly inserted, simply run a `SELECT` query on your new table:
```sql
SELECT * FROM mydataset LIMIT 10;
```
+
+
\ No newline at end of file
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/03_sql_translation_reference.md b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/03_sql_translation_reference.md
new file mode 100644
index 00000000000..a5f41b52605
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/03_sql_translation_reference.md
@@ -0,0 +1,114 @@
+---
+sidebar_label: 'SQL translation reference'
+slug: /migrations/snowflake-translation-reference
+description: 'SQL translation reference'
+keywords: ['Snowflake']
+title: 'Migrating from Snowflake to ClickHouse'
+show_related_blogs: true
+---
+
+# Snowflake SQL translation guide
+
+## Data types {#data-types}
+
+### Numerics {#numerics}
+
+Users moving data between ClickHouse and Snowflake will immediately notice that
+ClickHouse offers more granular precision concerning declaring numerics. For example,
+Snowflake offers the type Number for numerics. This requires the user to specify a
+precision (total number of digits) and scale (digits to the right of the decimal place)
+up to a total of 38. Integer declarations are synonymous with Number, and simply
+define a fixed precision and scale where the range is the same. This convenience
+is possible as modifying the precision (scale is 0 for integers) does not impact the
+size of data on disk in Snowflake - the minimal required bytes are used for a
+numeric range at write time at a micro partition level. The scale does, however,
+impact storage space and is offset with compression. A `Float64` type offers a
+wider range of values with a loss of precision.
+
+Contrast this with ClickHouse, which offers multiple signed and unsigned
+precision for floats and integers. With these, ClickHouse users can be explicit about
+the precision required for integers to optimize storage and memory overhead. A
+Decimal type, equivalent to Snowflake’s Number type, also offers twice the
+precision and scale at 76 digits. In addition to a similar `Float64` value,
+ClickHouse also provides a `Float32` for when precision is less critical and
+compression paramount.
+
+### Strings {#strings}
+
+ClickHouse and Snowflake take contrasting approaches to the storage of string
+data. The `VARCHAR` in Snowflake holds Unicode characters in UTF-8, allowing the
+user to specify a maximum length. This length has no impact on storage or
+performance, with the minimum number of bytes always used to store a string, and
+rather provides only constraints useful for downstream tooling. Other types, such
+as `Text` and `NChar`, are simply aliases for this type. ClickHouse conversely
+stores all [string data as raw bytes](/sql-reference/data-types/string) with a `String`
+type (no length specification required), deferring encoding to the user, with
+[query time functions](/sql-reference/functions/string-functions#lengthutf8)
+available for different encodings. We refer the reader to ["Opaque data argument"](https://utf8everywhere.org/#cookie)
+for the motivation as to why. The ClickHouse `String` is thus more comparable
+to the Snowflake Binary type in its implementation. Both [Snowflake](https://docs.snowflake.com/en/sql-reference/collation)
+and [ClickHouse](/sql-reference/statements/select/order-by#collation-support)
+support “collation”, allowing users to override how strings are sorted and compared.
+
+### Semi-structured types {#semi-structured-data}
+
+Snowflake supports the `VARIANT`, `OBJECT` and `ARRAY` types for semi-structured
+data.
+
+ClickHouse offers the equivalent [`Variant`](/sql-reference/data-types/variant),
+[`Object`](/sql-reference/data-types/object-data-type) (deprecated) and [`Array`](/sql-reference/data-types/array)
+types. Additionally, ClickHouse has the [`JSON`](/sql-reference/data-types/newjson)
+type which replaces the now deprecated `Object('json')` type and is particularly
+performant and storage efficient in [comparison to other native JSON types](https://jsonbench.com/).
+
+ClickHouse also supports named [`Tuple`s](/sql-reference/data-types/tuple) and arrays of Tuples
+via the [`Nested`](/sql-reference/data-types/nested-data-structures/nested) type,
+allowing users to explicitly map nested structures. This allows codecs and type
+optimizations to be applied throughout the hierarchy, unlike Snowflake, which
+requires the user to use the `OBJECT`, `VARIANT`, and `ARRAY` types for the outer
+object and does not allow [explicit internal typing](https://docs.snowflake.com/en/sql-reference/data-types-semistructured#characteristics-of-an-object).
+This internal typing also simplifies queries on nested numerics in ClickHouse,
+which do not need to be cast and can be used in index definitions.
+
+In ClickHouse, codecs and optimized types can also be applied to substructures.
+This provides an added benefit that compression with nested structures remains
+excellent, and comparable, to flattened data. In contrast, as a result of the
+inability to apply specific types to substructures, Snowflake recommends [flattening
+data to achieve optimal compression](https://docs.snowflake.com/en/user-guide/semistructured-considerations#storing-semi-structured-data-in-a-variant-column-vs-flattening-the-nested-structure).
+Snowflake also [imposes size restrictions](https://docs.snowflake.com/en/user-guide/semistructured-considerations#data-size-limitations)
+for these data types.
+
+### Type reference {#type-reference}
+
+| Snowflake | ClickHouse | Note |
+|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [`NUMBER`](https://docs.snowflake.com/en/sql-reference/data-types-numeric) | [`Decimal`](/sql-reference/data-types/decimal) | ClickHouse supports twice the precision and scale than Snowflake - 76 digits vs. 38. |
+| [`FLOAT`, `FLOAT4`, `FLOAT8`](https://docs.snowflake.com/en/sql-reference/data-types-numeric#data-types-for-floating-point-numbers) | [`Float32`, `Float64`](/sql-reference/data-types/float) | All floats in Snowflake are 64 bit. |
+| [`VARCHAR`](https://docs.snowflake.com/en/sql-reference/data-types-text#varchar) | [`String`](/sql-reference/data-types/string) | |
+| [`BINARY`](https://docs.snowflake.com/en/sql-reference/data-types-text#binary) | [`String`](/sql-reference/data-types/string) | |
+| [`BOOLEAN`](https://docs.snowflake.com/en/sql-reference/data-types-logical) | [`Bool`](/sql-reference/data-types/boolean) | |
+| [`DATE`](https://docs.snowflake.com/en/sql-reference/data-types-datetime#date) | [`Date`](/sql-reference/data-types/date), [`Date32`](/sql-reference/data-types/date32) | `DATE` in Snowflake offers a wider date range than ClickHouse e.g. min for `Date32` is `1900-01-01` and `Date` `1970-01-01`. `Date` in ClickHouse provides more cost efficient (two byte) storage. |
+| [`TIME(N)`](https://docs.snowflake.com/en/sql-reference/data-types-datetime#time) | No direct equivalent but can be represented by [`DateTime`](/sql-reference/data-types/datetime) and [`DateTime64(N)`](/sql-reference/data-types/datetime64). | `DateTime64` uses the same concepts of precision. |
+| [`TIMESTAMP`](https://docs.snowflake.com/en/sql-reference/data-types-datetime#timestamp) - [`TIMESTAMP_LTZ`](https://docs.snowflake.com/en/sql-reference/data-types-datetime#timestamp-ltz-timestamp-ntz-timestamp-tz), [`TIMESTAMP_NTZ`](https://docs.snowflake.com/en/sql-reference/data-types-datetime#timestamp-ltz-timestamp-ntz-timestamp-tz), [`TIMESTAMP_TZ`](https://docs.snowflake.com/en/sql-reference/data-types-datetime#timestamp-ltz-timestamp-ntz-timestamp-tz) | [`DateTime`](/sql-reference/data-types/datetime) and [`DateTime64`](/sql-reference/data-types/datetime64) | `DateTime` and `DateTime64` can optionally have a TZ parameter defined for the column. If not present, the server's timezone is used. Additionally a `--use_client_time_zone` parameter is available for the client. |
+| [`VARIANT`](https://docs.snowflake.com/en/sql-reference/data-types-semistructured#variant) | [`JSON`, `Tuple`, `Nested`](/integrations/data-formats/json) | `JSON` type is experimental in ClickHouse. This type infers the column types at insert time. `Tuple`, `Nested` and `Array` can also be used to build explicitly type structures as an alternative. |
+| [`OBJECT`](https://docs.snowflake.com/en/sql-reference/data-types-semistructured#object) | [`Tuple`, `Map`, `JSON`](/integrations/data-formats/json) | Both `OBJECT` and `Map` are analogous to `JSON` type in ClickHouse where the keys are a `String`. ClickHouse requires the value to be consistent and strongly typed whereas Snowflake uses `VARIANT`. This means the values of different keys can be a different type. If this is required in ClickHouse, explicitly define the hierarchy using `Tuple` or rely on `JSON` type. |
+| [`ARRAY`](https://docs.snowflake.com/en/sql-reference/data-types-semistructured#array) | [`Array`](/sql-reference/data-types/array), [`Nested`](/sql-reference/data-types/nested-data-structures/nested) | `ARRAY` in Snowflake uses `VARIANT` for the elements - a super type. Conversely these are strongly typed in ClickHouse. |
+| [`GEOGRAPHY`](https://docs.snowflake.com/en/sql-reference/data-types-geospatial#geography-data-type) | [`Point`, `Ring`, `Polygon`, `MultiPolygon`](/sql-reference/data-types/geo) | Snowflake imposes a coordinate system (WGS 84) while ClickHouse applies at query time. |
+| [`GEOMETRY`](https://docs.snowflake.com/en/sql-reference/data-types-geospatial#geometry-data-type) | [`Point`, `Ring`, `Polygon`, `MultiPolygon`](/sql-reference/data-types/geo) | | |
+
+| ClickHouse Type | Description |
+|-------------------|-----------------------------------------------------------------------------------------------------|
+| `IPv4` and `IPv6` | IP-specific types, potentially allowing more efficient storage than Snowflake. |
+| `FixedString` | Allows a fixed length of bytes to be used, which is useful for hashes. |
+| `LowCardinality` | Allows any type to be dictionary encoded. Useful for when the cardinality is expected to be < 100k. |
+| `Enum` | Allows efficient encoding of named values in either 8 or 16-bit ranges. |
+| `UUID` | For efficient storage of UUIDs. |
+| `Array(Float32)` | Vectors can be represented as an Array of Float32 with supported distance functions. |
+
+Finally, ClickHouse offers the unique ability to store the intermediate
+[state of aggregate functions](/sql-reference/data-types/aggregatefunction). This
+state is implementation-specific, but allows the result of an aggregation to be
+stored and later queried (with corresponding merge functions). Typically, this
+feature is used via a materialized view and, as demonstrated below, offers the
+ability to improve performance of specific queries with minimal storage cost by
+storing the incremental result of queries over inserted data (more details here).
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/_category_.json b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/_category_.json
new file mode 100644
index 00000000000..50b05cb45a0
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/04_snowflake/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Snowflake",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/05_elastic/01_overview.md b/docs/cloud/onboard/02_migrate/01_migration_guides/05_elastic/01_overview.md
new file mode 100644
index 00000000000..5b6a7476b7c
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/05_elastic/01_overview.md
@@ -0,0 +1,12 @@
+---
+sidebar_label: 'Overview'
+slug: /migrations/elastic-overview
+description: 'Migrating from Snowflake to ClickHouse'
+keywords: ['Snowflake']
+title: 'Migrate from Snowflake to ClickHouse'
+show_related_blogs: true
+---
+
+# Elasticsearch to ClickHouse migration
+
+This document provides an introduction to migrating data from Elasticsearch to ClickHouse.
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/05_elastic/_category_.json b/docs/cloud/onboard/02_migrate/01_migration_guides/05_elastic/_category_.json
new file mode 100644
index 00000000000..4f49621cf3d
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/05_elastic/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Elasticsearch",
+ "collapsible": true,
+ "collapsed": true
+}
\ No newline at end of file
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/01_overview.md b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/01_overview.md
new file mode 100644
index 00000000000..785eba5d98a
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/01_overview.md
@@ -0,0 +1,57 @@
+---
+sidebar_label: 'Overview'
+slug: /migrations/redshift-overview
+description: 'Migrating from Amazon Redshift to ClickHouse'
+keywords: ['Redshift']
+title: 'Comparing ClickHouse Cloud and Amazon Redshift'
+---
+
+# Amazon Redshift to ClickHouse migration
+
+> This document provides an introduction to migrating data from Amazon
+Redshift to ClickHouse.
+
+## Introduction {#introduction}
+
+Amazon Redshift is a cloud data warehouse that provides reporting and
+analytics capabilities for structured and semi-structured data. It was
+designed to handle analytical workloads on big data sets using
+column-oriented database principles similar to ClickHouse. As part of the
+AWS offering, it is often the default solution AWS users turn to for their
+analytical data needs.
+
+While attractive to existing AWS users due to its tight integration with the
+Amazon ecosystem, Redshift users that adopt it to power real-time analytics
+applications find themselves in need of a more optimized solution for this
+purpose. As a result, they increasingly turn to ClickHouse to benefit from
+superior query performance and data compression, either as a replacement or
+a "speed layer" deployed alongside existing Redshift workloads.
+
+## ClickHouse vs Redshift {#clickhouse-vs-redshift}
+
+For users heavily invested in the AWS ecosystem, Redshift represents a
+natural choice when faced with data warehousing needs. Redshift differs from
+ClickHouse in this important aspect – it optimizes its engine for data
+warehousing workloads requiring complex reporting and analytical queries.
+Across all deployment modes, the following two limitations make it difficult
+to use Redshift for real-time analytical workloads:
+* Redshift [compiles code for each query execution plan](https://docs.aws.amazon.com/redshift/latest/dg/c-query-performance.html),
+which adds significant overhead to first-time query execution. This overhead can
+be justified when query patterns are predictable and compiled execution plans
+can be stored in a query cache. However, this introduces challenges for interactive
+applications with variable queries. Even when Redshift is able to exploit this
+code compilation cache, ClickHouse is faster on most queries. See ["ClickBench"](https://benchmark.clickhouse.com/#system=+%E2%98%81w|%EF%B8%8Fr|C%20c|Rf&type=-&machine=-ca2|gl|6ax|6ale|3al&cluster_size=-&opensource=-&tuned=+n&metric=hot&queries=-).
+* Redshift [limits concurrency to 50 across all queues](https://docs.aws.amazon.com/redshift/latest/dg/c_workload_mngmt_classification.html),
+which (while adequate for BI) makes it inappropriate for highly concurrent
+analytical applications.
+
+Conversely, while ClickHouse can also be utilized for complex analytical queries
+it is optimized for real-time analytical workloads, either powering applications
+or acting as a warehouse acceleration later. As a result, Redshift users typically
+replace or augment Redshift with ClickHouse for the following reasons:
+
+| Advantage | Description |
+|------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **Lower query latencies** | ClickHouse achieves lower query latencies, including for varied query patterns, under high concurrency and while subjected to streaming inserts. Even when your query misses a cache, which is inevitable in interactive user-facing analytics, ClickHouse can still process it fast. |
+| **Higher concurrent query limits** | ClickHouse places much higher limits on concurrent queries, which is vital for real-time application experiences. In ClickHouse, self-managed as well as cloud, you can scale up your compute allocation to achieve the concurrency your application needs for each service. The level of permitted query concurrency is configurable in ClickHouse, with ClickHouse Cloud defaulting to a value of 1000. |
+| **Superior data compression** | ClickHouse offers superior data compression, which allows users to reduce their total storage (and thus cost) or persist more data at the same cost and derive more real-time insights from their data. See "ClickHouse vs Redshift Storage Efficiency" below. |
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/02_migration_guide.md b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/02_migration_guide.md
new file mode 100644
index 00000000000..506c9957e58
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/02_migration_guide.md
@@ -0,0 +1,13 @@
+---
+sidebar_label: 'Migration guide'
+slug: /migrations/redshift/migration-guide
+description: 'Migrating from Amazon Redshift to ClickHouse'
+keywords: ['Redshift']
+title: 'Amazon Redshift to ClickHouse migration guide'
+---
+
+import MigrationGuide from '@site/docs/integrations/data-ingestion/redshift/_snippets/_migration_guide.md'
+
+# Amazon Redshift to ClickHouse migration guide
+
+
\ No newline at end of file
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/03_sql_translation_reference.md b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/03_sql_translation_reference.md
new file mode 100644
index 00000000000..67585e4ea72
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/03_sql_translation_reference.md
@@ -0,0 +1,95 @@
+---
+sidebar_label: 'SQL translation reference'
+slug: /migrations/redshift/sql-translation-reference
+description: 'SQL translation reference for Amazon Redshift to ClickHouse'
+keywords: ['Redshift']
+title: 'Amazon Redshift SQL translation guide'
+---
+
+# Amazon Redshift SQL translation guide
+
+## Data types {#data-types}
+
+Users moving data between ClickHouse and Redshift will immediately notice
+that ClickHouse offers a more extensive range of types, which are also less
+restrictive. While Redshift requires users to specify possible string
+lengths, even if variable, ClickHouse removes this restriction and burden
+from the user by storing strings without encoding as bytes. The ClickHouse
+String type thus has no limits or length specification requirements.
+
+Furthermore, users can exploit Arrays, Tuples, and Enums - absent from
+Redshift as first-class citizens (although Arrays/Structs can be imitated
+with `SUPER`) and a common frustration of users. ClickHouse additionally
+allows the persistence, either at query time or even in a table, of
+aggregation states. This will enable data to be pre-aggregated, typically
+using a materialized view, and can dramatically improve query performance
+for common queries.
+
+Below we map the equivalent ClickHouse type for each Redshift type:
+
+| Redshift | ClickHouse |
+|------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [`SMALLINT`](https://docs.aws.amazon.com/redshift/latest/dg/r_Numeric_types201.html#r_Numeric_types201-integer-types) | [`Int8`](/sql-reference/data-types/int-uint) * |
+| [`INTEGER`](https://docs.aws.amazon.com/redshift/latest/dg/r_Numeric_types201.html#r_Numeric_types201-integer-types) | [`Int32`](/sql-reference/data-types/int-uint) * |
+| [`BIGINT`](https://docs.aws.amazon.com/redshift/latest/dg/r_Numeric_types201.html#r_Numeric_types201-integer-types) | [`Int64`](/sql-reference/data-types/int-uint) * |
+| [`DECIMAL`](https://docs.aws.amazon.com/redshift/latest/dg/r_Numeric_types201.html#r_Numeric_types201-decimal-or-numeric-type) | [`UInt128`, `UInt256`, `Int128`, `Int256`](/sql-reference/data-types/int-uint), [`Decimal(P, S)`, `Decimal32(S)`, `Decimal64(S)`, `Decimal128(S)`, `Decimal256(S)`](/sql-reference/data-types/decimal) - (high precision and ranges possible) |
+| [`REAL`](https://docs.aws.amazon.com/redshift/latest/dg/r_Numeric_types201.html#r_Numeric_types201-floating-point-types) | [`Float32`](/sql-reference/data-types/float) |
+| [`DOUBLE PRECISION`](https://docs.aws.amazon.com/redshift/latest/dg/r_Numeric_types201.html#r_Numeric_types201-floating-point-types) | [`Float64`](/sql-reference/data-types/float) |
+| [`BOOLEAN`](https://docs.aws.amazon.com/redshift/latest/dg/r_Boolean_type.html) | [`Bool`](/sql-reference/data-types/boolean) |
+| [`CHAR`](https://docs.aws.amazon.com/redshift/latest/dg/r_Character_types.html#r_Character_types-char-or-character) | [`String`](/sql-reference/data-types/string), [`FixedString`](/sql-reference/data-types/fixedstring) |
+| [`VARCHAR`](https://docs.aws.amazon.com/redshift/latest/dg/r_Character_types.html#r_Character_types-varchar-or-character-varying) ** | [`String`](/sql-reference/data-types/string) |
+| [`DATE`](https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html#r_Datetime_types-date) | [`Date32`](/sql-reference/data-types/date32) |
+| [`TIMESTAMP`](https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html#r_Datetime_types-timestamp) | [`DateTime`](/sql-reference/data-types/datetime), [`DateTime64`](/sql-reference/data-types/datetime64) |
+| [`TIMESTAMPTZ`](https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html#r_Datetime_types-timestamptz) | [`DateTime`](/sql-reference/data-types/datetime), [`DateTime64`](/sql-reference/data-types/datetime64) |
+| [`GEOMETRY`](https://docs.aws.amazon.com/redshift/latest/dg/geospatial-overview.html) | [Geo Data Types](/sql-reference/data-types/geo) |
+| [`GEOGRAPHY`](https://docs.aws.amazon.com/redshift/latest/dg/geospatial-overview.html) | [Geo Data Types](/sql-reference/data-types/geo) (less developed e.g. no coordinate systems - can be emulated [with functions](/sql-reference/functions/geo/)) |
+| [`HLLSKETCH`](https://docs.aws.amazon.com/redshift/latest/dg/r_HLLSKTECH_type.html) | [`AggregateFunction(uniqHLL12, X)`](/sql-reference/data-types/aggregatefunction) |
+| [`SUPER`](https://docs.aws.amazon.com/redshift/latest/dg/r_SUPER_type.html) | [`Tuple`](/sql-reference/data-types/tuple), [`Nested`](/sql-reference/data-types/nested-data-structures/nested), [`Array`](/sql-reference/data-types/array), [`JSON`](/sql-reference/data-types/newjson), [`Map`](/sql-reference/data-types/map) |
+| [`TIME`](https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html#r_Datetime_types-time) | [`DateTime`](/sql-reference/data-types/datetime), [`DateTime64`](/sql-reference/data-types/datetime64) |
+| [`TIMETZ`](https://docs.aws.amazon.com/redshift/latest/dg/r_Datetime_types.html#r_Datetime_types-timetz) | [`DateTime`](/sql-reference/data-types/datetime), [`DateTime64`](/sql-reference/data-types/datetime64) |
+| [`VARBYTE`](https://docs.aws.amazon.com/redshift/latest/dg/r_VARBYTE_type.html) ** | [`String`](/sql-reference/data-types/string) combined with [`Bit`](/sql-reference/functions/bit-functions) and [Encoding](/sql-reference/functions/encoding-functions/#hex) functions |
+
+* ClickHouse additionally supports unsigned integers with extended ranges i.e. `UInt8`, `UInt32`, `UInt32` and `UInt64`.
+**ClickHouse’s String type is unlimited by default but can be constrained to specific lengths using Constraints.
+
+## DDL syntax {#compression}
+
+### Sorting keys {#sorting-keys}
+
+Both ClickHouse and Redshift have the concept of a “sorting key”, which define
+how data is sorted when being stored. Redshift defines the sorting key using the
+`SORTKEY` clause:
+
+```sql
+CREATE TABLE some_table(...) SORTKEY (column1, column2)
+```
+
+Comparatively, ClickHouse uses an `ORDER BY` clause to specify the sort order:
+
+```sql
+CREATE TABLE some_table(...) ENGINE = MergeTree ORDER BY (column1, column2)
+```
+
+In most cases, you can use the same sorting key columns and order in ClickHouse
+as Redshift, assuming you are using the default `COMPOUND` type. When data is
+added to Redshift, you should run the `VACUUM` and `ANALYZE` commands to re-sort
+newly added data and update the statistics for the query planner - otherwise, the
+unsorted space grows. No such process is required for ClickHouse.
+
+Redshift supports a couple of convenience features for sorting keys. The first is
+automatic sorting keys (using `SORTKEY AUTO`). While this may be appropriate for
+getting started, explicit sorting keys ensure the best performance and storage
+efficiency when the sorting key is optimal. The second is the `INTERLEAVED` sort key,
+which gives equal weight to a subset of columns in the sort key to improve
+performance when a query uses one or more secondary sort columns. ClickHouse
+supports explicit [projections](/data-modeling/projections), which achieve the
+same end-result with a slightly different setup.
+
+Users should be aware that the “primary key” concept represents different things
+in ClickHouse and Redshift. In Redshift, the primary key resembles the traditional
+RDMS concept intended to enforce constraints. However, they are not strictly
+enforced in Redshift and instead act as hints for the query planner and data
+distribution among nodes. In ClickHouse, the primary key denotes columns used
+to construct the sparse primary index, used to ensure the data is ordered on
+disk, maximizing compression while avoiding pollution of the primary index and
+wasting memory.
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/_category_.json b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/_category_.json
new file mode 100644
index 00000000000..95419dcb41c
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/06_redshift/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Redshift",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/integrations/migration/clickhouse-to-cloud.md b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/01_clickhouse-to-cloud.md
similarity index 99%
rename from docs/integrations/migration/clickhouse-to-cloud.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/01_clickhouse-to-cloud.md
index 551314651e2..08ffe526dd7 100644
--- a/docs/integrations/migration/clickhouse-to-cloud.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/01_clickhouse-to-cloud.md
@@ -1,6 +1,5 @@
---
-sidebar_position: 10
-sidebar_label: 'ClickHouse to ClickHouse Cloud'
+sidebar_label: 'ClickHouse OSS'
slug: /cloud/migration/clickhouse-to-cloud
title: 'Migrating between self-managed ClickHouse and ClickHouse Cloud'
description: 'Page describing how to migrate between self-managed ClickHouse and ClickHouse Cloud'
diff --git a/docs/integrations/migration/clickhouse-local-etl.md b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/02_clickhouse-local-etl.md
similarity index 99%
rename from docs/integrations/migration/clickhouse-local-etl.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/02_clickhouse-local-etl.md
index 2faf0a935d7..5e3eabc70c9 100644
--- a/docs/integrations/migration/clickhouse-local-etl.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/02_clickhouse-local-etl.md
@@ -1,6 +1,5 @@
---
sidebar_label: 'Using clickhouse-local'
-sidebar_position: 20
keywords: ['clickhouse', 'migrate', 'migration', 'migrating', 'data', 'etl', 'elt', 'clickhouse-local', 'clickhouse-client']
slug: /cloud/migration/clickhouse-local
title: 'Migrating to ClickHouse using clickhouse-local'
diff --git a/docs/integrations/migration/etl-tool-to-clickhouse.md b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/03_etl-tool-to-clickhouse.md
similarity index 98%
rename from docs/integrations/migration/etl-tool-to-clickhouse.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/03_etl-tool-to-clickhouse.md
index f66e6ff2c47..32a0c168c5a 100644
--- a/docs/integrations/migration/etl-tool-to-clickhouse.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/03_etl-tool-to-clickhouse.md
@@ -1,6 +1,5 @@
---
sidebar_label: 'Using a 3rd-party ETL Tool'
-sidebar_position: 20
keywords: ['clickhouse', 'migrate', 'migration', 'migrating', 'data', 'etl', 'elt', 'clickhouse-local', 'clickhouse-client']
slug: /cloud/migration/etl-tool-to-clickhouse
title: 'Using a 3rd-party ETL Tool'
diff --git a/docs/integrations/migration/object-storage-to-clickhouse.md b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/04_object-storage-to-clickhouse.md
similarity index 97%
rename from docs/integrations/migration/object-storage-to-clickhouse.md
rename to docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/04_object-storage-to-clickhouse.md
index 2f323db04ef..5638fb48571 100644
--- a/docs/integrations/migration/object-storage-to-clickhouse.md
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/04_object-storage-to-clickhouse.md
@@ -1,5 +1,5 @@
---
-title: 'Object Storage to ClickHouse Cloud'
+title: 'Using object storage'
description: 'Moving data from object storage to ClickHouse Cloud'
keywords: ['object storage', 's3', 'azure blob', 'gcs', 'migration']
slug: /integrations/migration/object-storage-to-clickhouse
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/_category_.json b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/_category_.json
new file mode 100644
index 00000000000..61c592ce8a0
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/07_other_methods/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Other...",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/onboard/02_migrate/01_migration_guides/_category_.json b/docs/cloud/onboard/02_migrate/01_migration_guides/_category_.json
new file mode 100644
index 00000000000..aca0c529bce
--- /dev/null
+++ b/docs/cloud/onboard/02_migrate/01_migration_guides/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Migration guides",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/onboard/03_tune/_snippets/_monitoring_table_of_contents.md b/docs/cloud/onboard/03_tune/_snippets/_monitoring_table_of_contents.md
new file mode 100644
index 00000000000..e5d813d8226
--- /dev/null
+++ b/docs/cloud/onboard/03_tune/_snippets/_monitoring_table_of_contents.md
@@ -0,0 +1,3 @@
+| Page | Description |
+|------|-------------|
+| | |
diff --git a/docs/cloud/onboard/03_tune/resource_tour.md b/docs/cloud/onboard/03_tune/resource_tour.md
new file mode 100644
index 00000000000..2a4a23c3e64
--- /dev/null
+++ b/docs/cloud/onboard/03_tune/resource_tour.md
@@ -0,0 +1,54 @@
+---
+slug: /cloud/get-started/cloud/resource-tour
+title: 'Resource tour'
+keywords: ['clickhouse cloud']
+hide_title: true
+---
+
+import TableOfContentsBestPractices from '@site/docs/best-practices/_snippets/_table_of_contents.md';
+import TableOfContentsOptimizationAndPerformance from '@site/docs/guides/best-practices/_snippets/_performance_optimizations_table_of_contents.md';
+import TableOfContentsSecurity from '@site/docs/cloud/_snippets/_security_table_of_contents.md';
+
+# Resource tour
+
+This article is intended to provide you with an overview of the resources available
+to you in the docs to learn how to get the most out of your ClickHouse Cloud deployment.
+Explore resource organised by the following topics:
+
+- [Query optimization techniques and performance tuning](#query-optimization)
+- [Scaling strategies and resource management](#scaling)
+- [Monitoring](#monitoring)
+- [Security best practices and compliance features](#security)
+- [Cost optimization and billing](#cost-optimization)
+- Troubleshooting common issues (coming soon)
+- Production readiness checklist (coming soon)
+
+Before diving into more specific topics, we recommend you start with our general
+ClickHouse best practice guides which cover general best practices to follow when
+using ClickHouse:
+
+
+
+## Query optimization techniques and performance tuning {#query-optimization}
+
+
+
+## Scaling strategies and resource management {#scaling}
+
+## Monitoring {#monitoring}
+
+| Page | Description |
+|-----------------------------------------------------------------|-------------------------------------------------------------------------------|
+| [Advanced dashboard](/cloud/manage/monitor/advanced-dashboard) | Use the built in advanced dashboard to monitor service health and performance |
+| [Prometheus integration](/integrations/prometheus) | Use Prometheus to monitor Cloud services |
+
+## Security {#security}
+
+
+
+## Cost optimization and billing {#cost-optimization}
+
+| Page | Description |
+|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------|
+| [Data transfer](/cloud/manage/network-data-transfer)| Understand how ClickHouse Cloud meters data transferred ingress and egress |
+| [Notifications](/cloud/notifications) | Set up notifications for your ClickHouse Cloud service. For example, when credit usage passes a threshold |
diff --git a/docs/cloud/onboard/index.md b/docs/cloud/onboard/index.md
new file mode 100644
index 00000000000..403ef1a094c
--- /dev/null
+++ b/docs/cloud/onboard/index.md
@@ -0,0 +1,46 @@
+---
+slug: /cloud/get-started
+title: 'Get started with ClickHouse Cloud'
+hide_title: true
+---
+
+# Get started with ClickHouse Cloud
+
+New to ClickHouse Cloud and not sure where to begin? In this section of the docs,
+we'll walk you through everything you need to get up and running quickly. We've
+arranged this getting started section into three subsections to help guide
+you through each step of the process as you explore ClickHouse Cloud.
+
+
+
+## Discover ClickHouse Cloud {#discover-clickhouse-cloud}
+
+- Learn about what ClickHouse Cloud is, and how it differs from the open-source version
+- Discover the main use-cases of ClickHouse Cloud
+- Learn about ClickHouse Cloud pricing
+
+## Get set up with ClickHouse Cloud {#get-set-up-with-clickhouse-cloud}
+
+Now that you know what ClickHouse Cloud is, we'll walk you through the process
+of getting your data into ClickHouse Cloud, show you the main features available
+and point you towards some general best practices you should know.
+
+Topics include:
+
+- Migration guides from various platforms
+- Cloud architecture
+
+## Tune your ClickHouse Cloud deployment {#evaluate-clickhouse-cloud}
+
+Now that your data is in ClickHouse Cloud, we'll walk you through some more advanced
+topics to help you get the most out of your ClickHouse Cloud experience and explore
+what the platform has to offer.
+
+Topics include:
+
+- Query performance
+- Monitoring
+- Security considerations
+- Troubleshooting tips
+
+
\ No newline at end of file
diff --git a/docs/cloud/reference/changelog.md b/docs/cloud/reference/01_changelog/01_changelog.md
similarity index 99%
rename from docs/cloud/reference/changelog.md
rename to docs/cloud/reference/01_changelog/01_changelog.md
index 8f67d0076de..c792b3f4f93 100644
--- a/docs/cloud/reference/changelog.md
+++ b/docs/cloud/reference/01_changelog/01_changelog.md
@@ -63,10 +63,10 @@ to get up and running.
- New services now store database and table metadata in a central **SharedCatalog**,
a new model for coordination and object lifecycles which enables:
- - **Cloud-scale DDL**, even under high concurrency
- - **Resilient deletion and new DDL operations**
- - **Fast spin-up and wake-ups** as stateless nodes now launch with no disk dependencies
- - **Stateless compute across both native and open formats**, including Iceberg and Delta Lake
+ - **Cloud-scale DDL**, even under high concurrency
+ - **Resilient deletion and new DDL operations**
+ - **Fast spin-up and wake-ups** as stateless nodes now launch with no disk dependencies
+ - **Stateless compute across both native and open formats**, including Iceberg and Delta Lake
Read more about SharedCatalog in our [blog](https://clickhouse.com/blog/clickhouse-cloud-stateless-compute)
@@ -767,12 +767,12 @@ This release upgrades the core database version, adds ability to set up private
### Integrations changes {#integrations-changes-4}
* Kafka Connect
- * Support async_insert for exactly once (disabled by default)
+ * Support async_insert for exactly once (disabled by default)
* Golang client
- * Fixed DateTime binding
- * Improved batch insert performance
+ * Fixed DateTime binding
+ * Improved batch insert performance
* Java client
- * Fixed request compression problem
+ * Fixed request compression problem
### Settings changes {#settings-changes}
* `use_mysql_types_in_show_columns` is no longer required. It will be automatically enabled when you connect through the MySQL interface.
diff --git a/docs/cloud/changelogs/24_02.md b/docs/cloud/reference/01_changelog/02_release_notes/24_02.md
similarity index 100%
rename from docs/cloud/changelogs/24_02.md
rename to docs/cloud/reference/01_changelog/02_release_notes/24_02.md
diff --git a/docs/cloud/changelogs/24_05.md b/docs/cloud/reference/01_changelog/02_release_notes/24_05.md
similarity index 100%
rename from docs/cloud/changelogs/24_05.md
rename to docs/cloud/reference/01_changelog/02_release_notes/24_05.md
diff --git a/docs/cloud/changelogs/24_06.md b/docs/cloud/reference/01_changelog/02_release_notes/24_06.md
similarity index 100%
rename from docs/cloud/changelogs/24_06.md
rename to docs/cloud/reference/01_changelog/02_release_notes/24_06.md
diff --git a/docs/cloud/changelogs/24_08.md b/docs/cloud/reference/01_changelog/02_release_notes/24_08.md
similarity index 100%
rename from docs/cloud/changelogs/24_08.md
rename to docs/cloud/reference/01_changelog/02_release_notes/24_08.md
diff --git a/docs/cloud/changelogs/24_10.md b/docs/cloud/reference/01_changelog/02_release_notes/24_10.md
similarity index 100%
rename from docs/cloud/changelogs/24_10.md
rename to docs/cloud/reference/01_changelog/02_release_notes/24_10.md
diff --git a/docs/cloud/changelogs/24_12.md b/docs/cloud/reference/01_changelog/02_release_notes/24_12.md
similarity index 100%
rename from docs/cloud/changelogs/24_12.md
rename to docs/cloud/reference/01_changelog/02_release_notes/24_12.md
diff --git a/docs/cloud/changelogs/25_04.md b/docs/cloud/reference/01_changelog/02_release_notes/25_04.md
similarity index 100%
rename from docs/cloud/changelogs/25_04.md
rename to docs/cloud/reference/01_changelog/02_release_notes/25_04.md
diff --git a/docs/cloud/changelogs/25_06.md b/docs/cloud/reference/01_changelog/02_release_notes/25_06.md
similarity index 100%
rename from docs/cloud/changelogs/25_06.md
rename to docs/cloud/reference/01_changelog/02_release_notes/25_06.md
diff --git a/docs/cloud/reference/01_changelog/02_release_notes/_category_.json b/docs/cloud/reference/01_changelog/02_release_notes/_category_.json
new file mode 100644
index 00000000000..4eeae460788
--- /dev/null
+++ b/docs/cloud/reference/01_changelog/02_release_notes/_category_.json
@@ -0,0 +1,6 @@
+{
+ "label": "Release notes",
+ "collapsible": true,
+ "collapsed": true,
+ "link": { "type": "doc", "id": "cloud/reference/changelog/release_notes/index" }
+}
\ No newline at end of file
diff --git a/docs/cloud/reference/release-notes-index.md b/docs/cloud/reference/01_changelog/02_release_notes/index.md
similarity index 100%
rename from docs/cloud/reference/release-notes-index.md
rename to docs/cloud/reference/01_changelog/02_release_notes/index.md
diff --git a/docs/cloud/reference/01_changelog/_category_.json b/docs/cloud/reference/01_changelog/_category_.json
new file mode 100644
index 00000000000..60a9e95ee7e
--- /dev/null
+++ b/docs/cloud/reference/01_changelog/_category_.json
@@ -0,0 +1,6 @@
+{
+ "label": "Change logs",
+ "collapsible": true,
+ "collapsed": true,
+ "link": { "type": "doc", "id": "cloud/reference/changelog/index" }
+}
\ No newline at end of file
diff --git a/docs/cloud/reference/changelogs-index.md b/docs/cloud/reference/01_changelog/index.md
similarity index 91%
rename from docs/cloud/reference/changelogs-index.md
rename to docs/cloud/reference/01_changelog/index.md
index c23e70f4ea2..cfdb11087f8 100644
--- a/docs/cloud/reference/changelogs-index.md
+++ b/docs/cloud/reference/01_changelog/index.md
@@ -7,4 +7,4 @@ description: 'Landing page for Cloud changelogs'
| Page | Description |
|---------------------------------------------------------------|-------------------------------------------------|
| [Cloud Changelog](/whats-new/cloud) | Changelog for ClickHouse Cloud |
-| [Release Notes](/cloud/reference/changelogs/release-notes) | Release notes for all ClickHouse Cloud releases |
+| [Release Notes](/cloud/reference/changelogs/release-notes) | Release notes for all ClickHouse Cloud releases |
\ No newline at end of file
diff --git a/docs/cloud/reference/architecture.md b/docs/cloud/reference/02_architecture.md
similarity index 98%
rename from docs/cloud/reference/architecture.md
rename to docs/cloud/reference/02_architecture.md
index 9c3d7cf5f56..6e3294d3a97 100644
--- a/docs/cloud/reference/architecture.md
+++ b/docs/cloud/reference/02_architecture.md
@@ -1,7 +1,7 @@
---
sidebar_label: 'Architecture'
slug: /cloud/reference/architecture
-title: 'ClickHouse Cloud Architecture'
+title: 'ClickHouse Cloud architecture'
description: 'This page describes the architecture of ClickHouse Cloud'
---
diff --git a/docs/cloud/manage/billing.md b/docs/cloud/reference/03_billing/01_billing_overview.md
similarity index 99%
rename from docs/cloud/manage/billing.md
rename to docs/cloud/reference/03_billing/01_billing_overview.md
index 3745df1d2aa..0d6993702d7 100644
--- a/docs/cloud/manage/billing.md
+++ b/docs/cloud/reference/03_billing/01_billing_overview.md
@@ -5,7 +5,7 @@ title: 'Pricing'
description: 'Overview page for ClickHouse Cloud pricing'
---
-import ClickPipesFAQ from './jan2025_faq/_snippets/_clickpipes_faq.md'
+import ClickPipesFAQ from '../09_jan2025_faq/_snippets/_clickpipes_faq.md'
For pricing information, see the [ClickHouse Cloud Pricing](https://clickhouse.com/pricing#pricing-calculator) page.
ClickHouse Cloud bills based on the usage of compute, storage, [data transfer](/cloud/manage/network-data-transfer) (egress over the internet and cross-region), and [ClickPipes](/integrations/clickpipes).
diff --git a/docs/cloud/manage/billing/marketplace/aws-marketplace-committed.md b/docs/cloud/reference/03_billing/02_marketplace/aws-marketplace-committed.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/aws-marketplace-committed.md
rename to docs/cloud/reference/03_billing/02_marketplace/aws-marketplace-committed.md
diff --git a/docs/cloud/manage/billing/marketplace/aws-marketplace-payg.md b/docs/cloud/reference/03_billing/02_marketplace/aws-marketplace-payg.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/aws-marketplace-payg.md
rename to docs/cloud/reference/03_billing/02_marketplace/aws-marketplace-payg.md
diff --git a/docs/cloud/manage/billing/marketplace/azure-marketplace-committed.md b/docs/cloud/reference/03_billing/02_marketplace/azure-marketplace-committed.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/azure-marketplace-committed.md
rename to docs/cloud/reference/03_billing/02_marketplace/azure-marketplace-committed.md
diff --git a/docs/cloud/manage/billing/marketplace/azure-marketplace-payg.md b/docs/cloud/reference/03_billing/02_marketplace/azure-marketplace-payg.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/azure-marketplace-payg.md
rename to docs/cloud/reference/03_billing/02_marketplace/azure-marketplace-payg.md
diff --git a/docs/cloud/manage/billing/marketplace/gcp-marketplace-committed.md b/docs/cloud/reference/03_billing/02_marketplace/gcp-marketplace-committed.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/gcp-marketplace-committed.md
rename to docs/cloud/reference/03_billing/02_marketplace/gcp-marketplace-committed.md
diff --git a/docs/cloud/manage/billing/marketplace/gcp-marketplace-payg.md b/docs/cloud/reference/03_billing/02_marketplace/gcp-marketplace-payg.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/gcp-marketplace-payg.md
rename to docs/cloud/reference/03_billing/02_marketplace/gcp-marketplace-payg.md
diff --git a/docs/cloud/manage/billing/marketplace/index.md b/docs/cloud/reference/03_billing/02_marketplace/index.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/index.md
rename to docs/cloud/reference/03_billing/02_marketplace/index.md
diff --git a/docs/cloud/manage/billing/marketplace/overview.md b/docs/cloud/reference/03_billing/02_marketplace/overview.md
similarity index 100%
rename from docs/cloud/manage/billing/marketplace/overview.md
rename to docs/cloud/reference/03_billing/02_marketplace/overview.md
diff --git a/docs/cloud/manage/billing/payment-thresholds.md b/docs/cloud/reference/03_billing/03_payment-thresholds.md
similarity index 97%
rename from docs/cloud/manage/billing/payment-thresholds.md
rename to docs/cloud/reference/03_billing/03_payment-thresholds.md
index 0c2b6948d0e..2d9ce5f188a 100644
--- a/docs/cloud/manage/billing/payment-thresholds.md
+++ b/docs/cloud/reference/03_billing/03_payment-thresholds.md
@@ -1,7 +1,7 @@
---
sidebar_label: 'Payment Thresholds'
slug: /cloud/billing/payment-thresholds
-title: 'Payment Thresholds'
+title: 'Payment thresholds'
description: 'Payment thresholds and automatic invoicing for ClickHouse Cloud.'
keywords: ['billing', 'payment thresholds', 'automatic invoicing', 'invoice']
---
diff --git a/docs/cloud/reference/03_billing/04_network-data-transfer.mdx b/docs/cloud/reference/03_billing/04_network-data-transfer.mdx
new file mode 100644
index 00000000000..4013e1477b7
--- /dev/null
+++ b/docs/cloud/reference/03_billing/04_network-data-transfer.mdx
@@ -0,0 +1,56 @@
+---
+sidebar_label: 'Data Transfer'
+slug: /cloud/manage/network-data-transfer
+title: 'Data Transfer'
+description: 'Understand how ClickHouse Cloud meters data transferred ingress and egress'
+---
+
+import NetworkPricing from '@site/docs/cloud/reference/_snippets/_network_transfer_rates.md';
+
+ClickHouse Cloud meters data transferred ingress and egress.
+This includes any data in and out of ClickHouse Cloud as well as any intra-region
+and cross-region data transfer. This usage is tracked at the service level. Based
+on this usage, customers incur data transfer charges that are then added to their
+monthly bill.
+
+ClickHouse Cloud charges for:
+- Data egress from ClickHouse Cloud to the public Internet, including to other
+regions of other cloud providers.
+- Data egress to another region in the same cloud provider.
+
+There are no charges for intra-region data transfer or Private Link/Private
+Service Connect use and data transfer.However, we reserve the right to implement
+additional data transfer pricing dimensions if we see usage patterns that impact
+our ability to charge users appropriately.
+
+Data transfer charges vary by Cloud Service Provider (CSP) and region.
+Public internet egress pricing is based only on the origin region.
+Inter-region (or cross-region) pricing depends on both the origin and destination
+regions.
+
+**Best Practices to minimize Data Transfer Costs**
+
+There are some patterns to keep in mind when ingressing and egressing data to
+minimize data transfer costs.
+
+1. When ingressing or egressing data from Clickhouse Cloud, use compression where
+possible, to minimize the amount of data transferred and the associated cost.
+
+2. Be aware that when doing an INSERT over the native protocol with non-inlined
+values (e.g. `INSERT INTO [TABLE] FROM INFILE [FILE] FORMAT NATIVE`), ClickHouse
+clients pull metadata from servers to pack the data. If the metadata is larger
+than the `INSERT` payload, you might counterintuitively see more egress than
+there is ingress from the server perspective. If this is unacceptable, consider
+inlining data with `VALUES` syntax or using the HTTP protocol.
+
+The tables below shows how data transfer charges for egress vary across public
+internet or cross-region by cloud provider and region.
+
+:::note
+ClickHouse Cloud meters inter-region usage in terms of tiers, Tier 1 through
+Tier 4, depending on the origin and destination regions. The table below shows
+the tier for each combination of inter-region data transfer. In the Billing usage
+screen on ClickHouse Cloud you will see data transfer usage broken out by tiers.
+:::
+
+
diff --git a/docs/cloud/manage/troubleshooting-billing-issues.md b/docs/cloud/reference/03_billing/05_billing_compliance.md
similarity index 100%
rename from docs/cloud/manage/troubleshooting-billing-issues.md
rename to docs/cloud/reference/03_billing/05_billing_compliance.md
diff --git a/docs/cloud/manage/billing/index.md b/docs/cloud/reference/03_billing/index.md
similarity index 100%
rename from docs/cloud/manage/billing/index.md
rename to docs/cloud/reference/03_billing/index.md
diff --git a/docs/cloud/reference/supported-regions.md b/docs/cloud/reference/05_supported-regions.md
similarity index 98%
rename from docs/cloud/reference/supported-regions.md
rename to docs/cloud/reference/05_supported-regions.md
index f434b8786e1..4086227f4ab 100644
--- a/docs/cloud/reference/supported-regions.md
+++ b/docs/cloud/reference/05_supported-regions.md
@@ -1,6 +1,6 @@
---
title: 'Supported Cloud Regions'
-sidebar_label: 'Supported Cloud Regions'
+sidebar_label: 'Supported Cloud regions'
keywords: ['aws', 'gcp', 'google cloud', 'azure', 'cloud', 'regions']
description: 'Supported regions for ClickHouse Cloud'
slug: /cloud/reference/supported-regions
diff --git a/docs/cloud/manage/service-uptime.md b/docs/cloud/reference/06_service-uptime.md
similarity index 95%
rename from docs/cloud/manage/service-uptime.md
rename to docs/cloud/reference/06_service-uptime.md
index 3a31e459eaf..33397a626be 100644
--- a/docs/cloud/manage/service-uptime.md
+++ b/docs/cloud/reference/06_service-uptime.md
@@ -1,7 +1,7 @@
---
sidebar_label: 'Service Uptime and SLA'
slug: /cloud/manage/service-uptime
-title: 'Service Uptime'
+title: 'Service uptime'
description: 'Users can now see regional uptimes on the status page and subscribe to alerts on service disruptions.'
---
diff --git a/docs/cloud/manage/settings.md b/docs/cloud/reference/08_settings.md
similarity index 94%
rename from docs/cloud/manage/settings.md
rename to docs/cloud/reference/08_settings.md
index a766ef59c13..9926c5833cb 100644
--- a/docs/cloud/manage/settings.md
+++ b/docs/cloud/reference/08_settings.md
@@ -1,7 +1,7 @@
---
-sidebar_label: 'Configuring Settings'
+sidebar_label: 'Configuring settings'
slug: /manage/settings
-title: 'Configuring Settings'
+title: 'Configuring settings'
description: 'How to configure settings for your ClickHouse Cloud service for a specific user or role'
---
diff --git a/docs/cloud/manage/jan2025_faq/_snippets/_clickpipes_faq.md b/docs/cloud/reference/09_jan2025_faq/_snippets/_clickpipes_faq.md
similarity index 100%
rename from docs/cloud/manage/jan2025_faq/_snippets/_clickpipes_faq.md
rename to docs/cloud/reference/09_jan2025_faq/_snippets/_clickpipes_faq.md
diff --git a/docs/cloud/manage/jan2025_faq/backup.md b/docs/cloud/reference/09_jan2025_faq/backup.md
similarity index 100%
rename from docs/cloud/manage/jan2025_faq/backup.md
rename to docs/cloud/reference/09_jan2025_faq/backup.md
diff --git a/docs/cloud/manage/jan2025_faq/billing.md b/docs/cloud/reference/09_jan2025_faq/billing.md
similarity index 100%
rename from docs/cloud/manage/jan2025_faq/billing.md
rename to docs/cloud/reference/09_jan2025_faq/billing.md
diff --git a/docs/cloud/manage/jan2025_faq/dimensions.md b/docs/cloud/reference/09_jan2025_faq/dimensions.md
similarity index 94%
rename from docs/cloud/manage/jan2025_faq/dimensions.md
rename to docs/cloud/reference/09_jan2025_faq/dimensions.md
index c4dd9268593..01e4937000f 100644
--- a/docs/cloud/manage/jan2025_faq/dimensions.md
+++ b/docs/cloud/reference/09_jan2025_faq/dimensions.md
@@ -1,5 +1,5 @@
---
-title: 'New Pricing Dimensions'
+title: 'New pricing dimensions'
slug: /cloud/manage/jan-2025-faq/pricing-dimensions
keywords: ['new pricing', 'dimensions']
description: 'Pricing dimensions for data transfer and ClickPipes'
@@ -9,7 +9,7 @@ import Image from '@theme/IdealImage';
import clickpipesPricingFaq1 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_1.png';
import clickpipesPricingFaq2 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_2.png';
import clickpipesPricingFaq3 from '@site/static/images/cloud/manage/jan2025_faq/external_clickpipes_pricing_faq_3.png';
-import NetworkPricing from '@site/docs/cloud/manage/_snippets/_network_transfer_rates.md';
+import NetworkPricing from '@site/docs/cloud/reference/_snippets/_network_transfer_rates.md';
import ClickPipesFAQ from './_snippets/_clickpipes_faq.md'
The following dimensions have been added to the new ClickHouse Cloud pricing.
diff --git a/docs/cloud/manage/jan2025_faq/index.md b/docs/cloud/reference/09_jan2025_faq/index.md
similarity index 100%
rename from docs/cloud/manage/jan2025_faq/index.md
rename to docs/cloud/reference/09_jan2025_faq/index.md
diff --git a/docs/cloud/manage/jan2025_faq/new_tiers.md b/docs/cloud/reference/09_jan2025_faq/new_tiers.md
similarity index 99%
rename from docs/cloud/manage/jan2025_faq/new_tiers.md
rename to docs/cloud/reference/09_jan2025_faq/new_tiers.md
index b90874aedb8..6aa943808d2 100644
--- a/docs/cloud/manage/jan2025_faq/new_tiers.md
+++ b/docs/cloud/reference/09_jan2025_faq/new_tiers.md
@@ -1,5 +1,5 @@
---
-title: 'Description of New Tiers'
+title: 'Description of new tiers'
slug: /cloud/manage/jan-2025-faq/new-tiers
keywords: ['new tiers', 'features', 'pricing', 'description']
description: 'Description of new tiers and features'
diff --git a/docs/cloud/manage/jan2025_faq/plan_migrations.md b/docs/cloud/reference/09_jan2025_faq/plan_migrations.md
similarity index 99%
rename from docs/cloud/manage/jan2025_faq/plan_migrations.md
rename to docs/cloud/reference/09_jan2025_faq/plan_migrations.md
index fffdebfe45b..fc5cfc56233 100644
--- a/docs/cloud/manage/jan2025_faq/plan_migrations.md
+++ b/docs/cloud/reference/09_jan2025_faq/plan_migrations.md
@@ -1,5 +1,5 @@
---
-title: 'Migrating to New Plans'
+title: 'Migrating to new plans'
slug: /cloud/manage/jan-2025-faq/plan-migrations
keywords: ['migration', 'new tiers', 'pricing', 'cost', 'estimation']
description: 'Migrating to new plans, tiers, pricing, how to decide and estimate costs'
diff --git a/docs/cloud/manage/jan2025_faq/scaling.md b/docs/cloud/reference/09_jan2025_faq/scaling.md
similarity index 100%
rename from docs/cloud/manage/jan2025_faq/scaling.md
rename to docs/cloud/reference/09_jan2025_faq/scaling.md
diff --git a/docs/cloud/manage/jan2025_faq/summary.md b/docs/cloud/reference/09_jan2025_faq/summary.md
similarity index 100%
rename from docs/cloud/manage/jan2025_faq/summary.md
rename to docs/cloud/reference/09_jan2025_faq/summary.md
diff --git a/docs/cloud/reference/09_security/_category_.json b/docs/cloud/reference/09_security/_category_.json
new file mode 100644
index 00000000000..aed26fa7f7a
--- /dev/null
+++ b/docs/cloud/reference/09_security/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Security",
+ "collapsible": true,
+ "collapsed": true,
+}
\ No newline at end of file
diff --git a/docs/cloud/security/audit-logging.md b/docs/cloud/reference/09_security/audit-logging.md
similarity index 100%
rename from docs/cloud/security/audit-logging.md
rename to docs/cloud/reference/09_security/audit-logging.md
diff --git a/docs/cloud/reference/09_security/privacy_and_compliance/_category_.json b/docs/cloud/reference/09_security/privacy_and_compliance/_category_.json
new file mode 100644
index 00000000000..99beeb3e924
--- /dev/null
+++ b/docs/cloud/reference/09_security/privacy_and_compliance/_category_.json
@@ -0,0 +1,6 @@
+{
+ "label": "Privacy and compliance",
+ "collapsible": true,
+ "collapsed": true,
+ "link": { "type": "doc", "id": "cloud/reference/security/privacy_and_compliance/index" }
+}
\ No newline at end of file
diff --git a/docs/cloud/security/compliance-overview.md b/docs/cloud/reference/09_security/privacy_and_compliance/compliance-overview.md
similarity index 95%
rename from docs/cloud/security/compliance-overview.md
rename to docs/cloud/reference/09_security/privacy_and_compliance/compliance-overview.md
index 4653c0f09c1..8b5be8b5766 100644
--- a/docs/cloud/security/compliance-overview.md
+++ b/docs/cloud/reference/09_security/privacy_and_compliance/compliance-overview.md
@@ -1,9 +1,3 @@
----
-sidebar_label: 'Security and Compliance'
-slug: /cloud/security/security-and-compliance
-title: 'Security and Compliance'
-description: 'This page describes the security and compliance measures implemented by ClickHouse Cloud to protect customer data.'
----
import BetaBadge from '@theme/badges/BetaBadge';
import EnterprisePlanFeatureBadge from '@theme/badges/EnterprisePlanFeatureBadge';
diff --git a/docs/cloud/security/privacy-compliance-overview.md b/docs/cloud/reference/09_security/privacy_and_compliance/index.md
similarity index 100%
rename from docs/cloud/security/privacy-compliance-overview.md
rename to docs/cloud/reference/09_security/privacy_and_compliance/index.md
diff --git a/docs/cloud/security/personal-data-access.md b/docs/cloud/reference/09_security/privacy_and_compliance/personal-data-access.md
similarity index 98%
rename from docs/cloud/security/personal-data-access.md
rename to docs/cloud/reference/09_security/privacy_and_compliance/personal-data-access.md
index bcf4514b301..3bdc8ca3302 100644
--- a/docs/cloud/security/personal-data-access.md
+++ b/docs/cloud/reference/09_security/privacy_and_compliance/personal-data-access.md
@@ -1,7 +1,7 @@
---
-sidebar_label: 'Personal Data Access'
+sidebar_label: 'Personal data access'
slug: /cloud/security/personal-data-access
-title: 'Personal Data Access'
+title: 'Personal data access'
description: 'As a registered user, ClickHouse allows you to view and manage your personal account data, including contact information.'
---
diff --git a/docs/cloud/manage/account-close.md b/docs/cloud/reference/10_account-close.md
similarity index 98%
rename from docs/cloud/manage/account-close.md
rename to docs/cloud/reference/10_account-close.md
index ac9a79eeeea..021345d4a94 100644
--- a/docs/cloud/manage/account-close.md
+++ b/docs/cloud/reference/10_account-close.md
@@ -1,11 +1,12 @@
---
-sidebar_label: 'Delete Account'
+sidebar_label: 'Account closure'
slug: /cloud/manage/close_account
-title: 'Account Close & Deletion'
+title: 'Account closure and deletion'
description: 'We know there are circumstances that sometimes necessitate account closure. This guide will help you through the process.'
---
## Account closure and deletion {#account-close--deletion}
+
Our goal is to help you be successful in your project. If you have questions that are not answered on this site or need help evaluating a
unique use case, please contact us at [support@clickhouse.com](mailto:support@clickhouse.com).
diff --git a/docs/cloud/manage/_snippets/_network_transfer_rates.md b/docs/cloud/reference/_snippets/_network_transfer_rates.md
similarity index 100%
rename from docs/cloud/manage/_snippets/_network_transfer_rates.md
rename to docs/cloud/reference/_snippets/_network_transfer_rates.md
diff --git a/docs/cloud/security/_category_.yml b/docs/cloud/security/_category_.yml
deleted file mode 100644
index b7253753fd5..00000000000
--- a/docs/cloud/security/_category_.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-label: 'Cloud Security'
-collapsible: true
-collapsed: true
-link:
- type: generated-index
- title: Cloud Security
diff --git a/docs/cloud/security/index.md b/docs/cloud/security/index.md
deleted file mode 100644
index b6a2d56ab1b..00000000000
--- a/docs/cloud/security/index.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-slug: /cloud/security
-keywords: ['Cloud', 'Security']
-title: 'Overview'
-hide_title: true
-description: 'Landing page for ClickHouse Cloud Security'
----
-
-# ClickHouse Cloud security
-
-This section delves into security in ClickHouse Cloud and contains the following pages:
-
-| Page | Description |
-|---------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Shared Responsibility Model](shared-responsibility-model.md) | Information on the security features offered for each service type. |
-| [Cloud Access Management](cloud-access-management/index.md) | Information on access control, authentication, SSO setup, common access management queries and how to invite new users. |
-| [Connectivity](connectivity-overview.md) | Information on setting IP filters, private networking, secure access of S3 data and Cloud IP addresses. |
-| [Enhanced Encryption](cmek.md) | Data at rest is encrypted by default using cloud provider-managed AES 256 keys. Customers may enable Transparent Data Encryption (TDE) to provide an additional layer of protection for service data. |
-| [Audit Logging](audit-logging.md) | A guide to audit logging in ClickHouse Cloud. |
-| [Privacy and Compliance](privacy-compliance-overview.md) | Information on security and compliance of ClickHouse Cloud, a guide on how to view and correct your personal information. |
diff --git a/docs/guides/best-practices/_snippets/_performance_optimizations_table_of_contents.md b/docs/guides/best-practices/_snippets/_performance_optimizations_table_of_contents.md
new file mode 100644
index 00000000000..3cbd396bb55
--- /dev/null
+++ b/docs/guides/best-practices/_snippets/_performance_optimizations_table_of_contents.md
@@ -0,0 +1,17 @@
+| Topic | Description |
+|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [Query optimization guide](/optimize/query-optimization) | Start here for query optimization fundamentals, covering common scenarios and performance techniques to improve query execution speed. |
+| [Primary indexes advanced guide](/guides/best-practices/sparse-primary-indexes) | Deep dive into ClickHouse's unique sparse primary indexing system, how it differs from traditional databases, and best practices for optimal indexing strategies. |
+| [Query parallelism](/optimize/query-parallelism) | Learn how ClickHouse parallelizes query execution using processing lanes and `max_threads` settings, including how to inspect and optimize parallel execution. |
+| [Partitioning key](/optimize/partitioning-key) | Master partition key selection to dramatically improve query performance by enabling efficient data segment pruning and avoiding common partitioning pitfalls. |
+| [Data skipping indexes](/optimize/skipping-indexes) | Apply secondary indexes strategically to skip irrelevant data blocks and accelerate filtered queries on non-primary key columns. |
+| [`PREWHERE` optimization](/optimize/prewhere) | Understand how `PREWHERE` automatically reduces I/O by filtering data before reading unnecessary columns, plus how to monitor its effectiveness. |
+| [Bulk inserts](/optimize/bulk-inserts) | Maximize ingestion throughput and reduce resource overhead by batching data insertions effectively. |
+| [Asynchronous inserts](/optimize/asynchronous-inserts) | Improve insert performance by leveraging server-side batching to reduce client-side complexity and increase throughput for high-frequency insertions. |
+| [Avoid mutations](/optimize/avoid-mutations) | Design append-only workflows that eliminate costly `UPDATE` and `DELETE` operations while maintaining data accuracy and performance. |
+| [Avoid nullable columns](/optimize/avoid-nullable-columns) | Reduce storage overhead and improve query performance by using default values instead of nullable columns where possible. |
+| [Avoid `OPTIMIZE FINAL`](/optimize/avoidoptimizefinal) | Understand when you should and should not use `OPTIMIZE TABLE FINAL` |
+| [Analyzer](/operations/analyzer) | Leverage ClickHouse's new query analyzer to identify performance bottlenecks and optimize query execution plans for better efficiency. |
+| [Query profiling](/operations/optimizing-performance/sampling-query-profiler) | Use the sampling query profiler to analyze query execution patterns, identify performance hot spots, and optimize resource usage. |
+| [Query cache](/operations/query-cache) | Accelerate frequently executed `SELECT` queries by enabling and configuring ClickHouse's built-in query result caching. |
+| [Testing hardware](/operations/performance-test) | Run ClickHouse performance benchmarks on any server without installation to evaluate hardware capabilities. (Not applicable to ClickHouse Cloud) |
\ No newline at end of file
diff --git a/docs/guides/best-practices/index.md b/docs/guides/best-practices/index.md
index 0c52281492f..ef320eaf03c 100644
--- a/docs/guides/best-practices/index.md
+++ b/docs/guides/best-practices/index.md
@@ -5,26 +5,12 @@ description: 'Overview page of Performance and Optimizations'
title: 'Performance and Optimizations'
---
-# Performance and optimizations
+import TableOfContents from '@site/docs/guides/best-practices/_snippets/_performance_optimizations_table_of_contents.md';
+
+# Performance and Optimizations
This section contains tips and best practices for improving performance with ClickHouse.
We recommend users read [Core Concepts](/parts) as a precursor to this section,
which covers the main concepts required to improve performance.
-| Topic | Description |
-|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Query Optimization Guide](/optimize/query-optimization) | A good place to start for query optimization, this simple guide describes common scenarios of how to use different performance and optimization techniques to improve query performance. |
-| [Primary Indexes Advanced Guide](/guides/best-practices/sparse-primary-indexes) | A deep dive into ClickHouse indexing including how it differs from other DB systems, how ClickHouse builds and uses a table's spare primary index and what some of the best practices are for indexing in ClickHouse. |
-| [Query Parallelism](/optimize/query-parallelism) | Explains how ClickHouse parallelizes query execution using processing lanes and the max_threads setting. Covers how data is distributed across lanes, how max_threads is applied, when it isn't fully used, and how to inspect execution with tools like EXPLAIN and trace logs. |
-| [Partitioning Key](/optimize/partitioning-key) | Delves into ClickHouse partition key optimization. Explains how choosing the right partition key can significantly improve query performance by allowing ClickHouse to quickly locate relevant data segments. Covers best practices for selecting efficient partition keys and potential pitfalls to avoid. |
-| [Data Skipping Indexes](/optimize/skipping-indexes) | Explains data skipping indexes as a way to optimize performance. |
-| [PREWHERE Optimization](/optimize/prewhere) | Explains how PREWHERE reduces I/O by avoiding reading unnecessary column data. Shows how it's applied automatically, how the filtering order is chosen, and how to monitor it using EXPLAIN and logs. |
-| [Bulk Inserts](/optimize/bulk-inserts) | Explains the benefits of using bulk inserts in ClickHouse. |
-| [Asynchronous Inserts](/optimize/asynchronous-inserts) | Focuses on ClickHouse's asynchronous inserts feature. It likely explains how asynchronous inserts work (batching data on the server for efficient insertion) and their benefits (improved performance by offloading insert processing). It might also cover enabling asynchronous inserts and considerations for using them effectively in your ClickHouse environment. |
-| [Avoid Mutations](/optimize/avoid-mutations) | Discusses the importance of avoiding mutations (updates and deletes) in ClickHouse. It recommends using append-only inserts for optimal performance and suggests alternative approaches for handling data changes. |
-| [Avoid nullable columns](/optimize/avoid-nullable-columns) | Discusses why you may want to avoid nullable columns to save space and increase performance. Demonstrates how to set a default value for a column. |
-| [Avoid `OPTIMIZE FINAL`](/optimize/avoidoptimizefinal) | Explains how the `OPTIMIZE TABLE ... FINAL` query is resource-intensive and suggests alternative approaches to optimize ClickHouse performance. |
-| [Analyzer](/operations/analyzer) | Looks at the ClickHouse Analyzer, a tool for analyzing and optimizing queries. Discusses how the Analyzer works, its benefits (e.g., identifying performance bottlenecks), and how to use it to improve your ClickHouse queries' efficiency. |
-| [Query Profiling](/operations/optimizing-performance/sampling-query-profiler) | Explains ClickHouse's Sampling Query Profiler, a tool that helps analyze query execution. |
-| [Query Cache](/operations/query-cache) | Details ClickHouse's Query Cache, a feature that aims to improve performance by caching the results of frequently executed `SELECT` queries. |
-| [Testing Hardware](/operations/performance-test) | How to run a basic ClickHouse performance test on any server without installation of ClickHouse packages. (Not applicable to ClickHouse Cloud) |
+
\ No newline at end of file
diff --git a/docs/integrations/data-ingestion/redshift/_snippets/_migration_guide.md b/docs/integrations/data-ingestion/redshift/_snippets/_migration_guide.md
new file mode 100644
index 00000000000..960120aa751
--- /dev/null
+++ b/docs/integrations/data-ingestion/redshift/_snippets/_migration_guide.md
@@ -0,0 +1,254 @@
+import redshiftToClickhouse from '@site/static/images/integrations/data-ingestion/redshift/redshift-to-clickhouse.png';
+import push from '@site/static/images/integrations/data-ingestion/redshift/push.png';
+import pull from '@site/static/images/integrations/data-ingestion/redshift/pull.png';
+import pivot from '@site/static/images/integrations/data-ingestion/redshift/pivot.png';
+import s3_1 from '@site/static/images/integrations/data-ingestion/redshift/s3-1.png';
+import s3_2 from '@site/static/images/integrations/data-ingestion/redshift/s3-2.png';
+import Image from '@theme/IdealImage';
+
+## Introduction {#introduction}
+
+[Amazon Redshift](https://aws.amazon.com/redshift/) is a popular cloud data warehousing solution that is part of the Amazon Web Services offerings. This guide presents different approaches to migrating data from a Redshift instance to ClickHouse. We will cover three options:
+
+
+
+From the ClickHouse instance standpoint, you can either:
+
+1. **[PUSH](#push-data-from-redshift-to-clickhouse)** data to ClickHouse using a third party ETL/ELT tool or service
+
+2. **[PULL](#pull-data-from-redshift-to-clickhouse)** data from Redshift leveraging the ClickHouse JDBC Bridge
+
+3. **[PIVOT](#pivot-data-from-redshift-to-clickhouse-using-s3)** using S3 object storage using an "Unload then load" logic
+
+:::note
+We used Redshift as a data source in this tutorial. However, the migration approaches presented here are not exclusive to Redshift, and similar steps can be derived for any compatible data source.
+:::
+
+## Push Data from Redshift to ClickHouse {#push-data-from-redshift-to-clickhouse}
+
+In the push scenario, the idea is to leverage a third-party tool or service (either custom code or an [ETL/ELT](https://en.wikipedia.org/wiki/Extract,_transform,_load#ETL_vs._ELT)) to send your data to your ClickHouse instance. For example, you can use a software like [Airbyte](https://www.airbyte.com/) to move data between your Redshift instance (as a source) and ClickHouse as a destination ([see our integration guide for Airbyte](/integrations/data-ingestion/etl-tools/airbyte-and-clickhouse.md))
+
+
+
+### Pros {#pros}
+
+* It can leverage the existing catalog of connectors from the ETL/ELT software.
+* Built-in capabilities to keep data in sync (append/overwrite/increment logic).
+* Enable data transformation scenarios (for example, see our [integration guide for dbt](/integrations/data-ingestion/etl-tools/dbt/index.md)).
+
+### Cons {#cons}
+
+* Users need to set up and maintain an ETL/ELT infrastructure.
+* Introduces a third-party element in the architecture which can turn into a potential scalability bottleneck.
+
+## Pull Data from Redshift to ClickHouse {#pull-data-from-redshift-to-clickhouse}
+
+In the pull scenario, the idea is to leverage the ClickHouse JDBC Bridge to connect to a Redshift cluster directly from a ClickHouse instance and perform `INSERT INTO ... SELECT` queries:
+
+
+
+### Pros {#pros-1}
+
+* Generic to all JDBC compatible tools
+* Elegant solution to allow querying multiple external data sources from within ClickHouse
+
+### Cons {#cons-1}
+
+* Requires a ClickHouse JDBC Bridge instance which can turn into a potential scalability bottleneck
+
+:::note
+Even though Redshift is based on PostgreSQL, using the ClickHouse PostgreSQL table function or table engine is not possible since ClickHouse requires PostgreSQL version 9 or above and the Redshift API is based on an earlier version (8.x).
+:::
+
+### Tutorial {#tutorial}
+
+To use this option, you need to set up a ClickHouse JDBC Bridge. ClickHouse JDBC Bridge is a standalone Java application that handles JDBC connectivity and acts as a proxy between the ClickHouse instance and the data sources. For this tutorial, we used a pre-populated Redshift instance with a [sample database](https://docs.aws.amazon.com/redshift/latest/dg/c_sampledb.html).
+
+
+
+#### Deploy ClickHouse JDBC Bridge {#deploy-clickhouse-jdbc-bridge}
+
+Deploy the ClickHouse JDBC Bridge. For more details, see our user guide on [JDBC for External Data sources](/integrations/data-ingestion/dbms/jdbc-with-clickhouse.md)
+
+:::note
+If you are using ClickHouse Cloud, you will need to run your ClickHouse JDBC Bridge on a separate environment and connect to ClickHouse Cloud using the [remoteSecure](/sql-reference/table-functions/remote/) function
+:::
+
+#### Configure your Redshift datasource {#configure-your-redshift-datasource}
+
+Configure your Redshift datasource for ClickHouse JDBC Bridge. For example, `/etc/clickhouse-jdbc-bridge/config/datasources/redshift.json `
+
+```json
+{
+ "redshift-server": {
+ "aliases": [
+ "redshift"
+ ],
+ "driverUrls": [
+ "https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/2.1.0.4/redshift-jdbc42-2.1.0.4.jar"
+ ],
+ "driverClassName": "com.amazon.redshift.jdbc.Driver",
+ "jdbcUrl": "jdbc:redshift://redshift-cluster-1.ckubnplpz1uv.us-east-1.redshift.amazonaws.com:5439/dev",
+ "username": "awsuser",
+ "password": "",
+ "maximumPoolSize": 5
+ }
+}
+```
+
+#### Query your Redshift instance from ClickHouse {#query-your-redshift-instance-from-clickhouse}
+
+Once ClickHouse JDBC Bridge deployed and running, you can start querying your Redshift instance from ClickHouse
+
+```sql
+SELECT *
+FROM jdbc('redshift', 'select username, firstname, lastname from users limit 5')
+```
+
+```response
+Query id: 1b7de211-c0f6-4117-86a2-276484f9f4c0
+
+┌─username─┬─firstname─┬─lastname─┐
+│ PGL08LJI │ Vladimir │ Humphrey │
+│ XDZ38RDD │ Barry │ Roy │
+│ AEB55QTM │ Reagan │ Hodge │
+│ OWY35QYB │ Tamekah │ Juarez │
+│ MSD36KVR │ Mufutau │ Watkins │
+└──────────┴───────────┴──────────┘
+
+5 rows in set. Elapsed: 0.438 sec.
+```
+
+```sql
+SELECT *
+FROM jdbc('redshift', 'select count(*) from sales')
+```
+
+```response
+Query id: 2d0f957c-8f4e-43b2-a66a-cc48cc96237b
+
+┌──count─┐
+│ 172456 │
+└────────┘
+
+1 rows in set. Elapsed: 0.304 sec.
+```
+
+#### Import Data from Redshift to ClickHouse {#import-data-from-redshift-to-clickhouse}
+
+In the following, we display importing data using an `INSERT INTO ... SELECT` statement
+
+```sql
+# TABLE CREATION with 3 columns
+CREATE TABLE users_imported
+(
+ `username` String,
+ `firstname` String,
+ `lastname` String
+)
+ENGINE = MergeTree
+ORDER BY firstname
+```
+
+```response
+Query id: c7c4c44b-cdb2-49cf-b319-4e569976ab05
+
+Ok.
+
+0 rows in set. Elapsed: 0.233 sec.
+```
+
+```sql
+INSERT INTO users_imported (*) SELECT *
+FROM jdbc('redshift', 'select username, firstname, lastname from users')
+```
+
+```response
+Query id: 9d3a688d-b45a-40f4-a7c7-97d93d7149f1
+
+Ok.
+
+0 rows in set. Elapsed: 4.498 sec. Processed 49.99 thousand rows, 2.49 MB (11.11 thousand rows/s., 554.27 KB/s.)
+```
+
+
+
+## Pivot Data from Redshift to ClickHouse using S3 {#pivot-data-from-redshift-to-clickhouse-using-s3}
+
+In this scenario, we export data to S3 in an intermediary pivot format and, in a second step, load the data from S3 into ClickHouse.
+
+
+
+### Pros {#pros-2}
+
+* Both Redshift and ClickHouse have powerful S3 integration features.
+* Leverages the existing features such as the Redshift `UNLOAD` command and ClickHouse S3 table function / table engine.
+* Scales seamlessly thanks to parallel reads and high throughput capabilities from/to S3 in ClickHouse.
+* Can leverage sophisticated and compressed formats like Apache Parquet.
+
+### Cons {#cons-2}
+
+* Two steps in the process (unload from Redshift then load into ClickHouse).
+
+### Tutorial {#tutorial-1}
+
+
+
+#### Export data into an S3 bucket using UNLOAD {#export-data-into-an-s3-bucket-using-unload}
+
+Using Redshift's [UNLOAD](https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html) feature, export the data into an existing private S3 bucket:
+
+
+
+It will generate part files containing the raw data in S3
+
+
+
+#### Create the table in ClickHouse {#create-the-table-in-clickhouse}
+
+Create the table in ClickHouse:
+
+```sql
+CREATE TABLE users
+(
+ username String,
+ firstname String,
+ lastname String
+)
+ENGINE = MergeTree
+ORDER BY username
+```
+
+Alternatively, ClickHouse can try to infer the table structure using `CREATE TABLE ... EMPTY AS SELECT`:
+
+```sql
+CREATE TABLE users
+ENGINE = MergeTree ORDER BY username
+EMPTY AS
+SELECT * FROM s3('https://your-bucket.s3.amazonaws.com/unload/users/*', '', '', 'CSV')
+```
+
+This works especially well when the data is in a format that contains information about data types, like Parquet.
+
+#### Load S3 files into ClickHouse {#load-s3-files-into-clickhouse}
+
+Load the S3 files into ClickHouse using an `INSERT INTO ... SELECT` statement:
+
+```sql
+INSERT INTO users SELECT *
+FROM s3('https://your-bucket.s3.amazonaws.com/unload/users/*', '', '', 'CSV')
+```
+
+```response
+Query id: 2e7e219a-6124-461c-8d75-e4f5002c8557
+
+Ok.
+
+0 rows in set. Elapsed: 0.545 sec. Processed 49.99 thousand rows, 2.34 MB (91.72 thousand rows/s., 4.30 MB/s.)
+```
+
+:::note
+This example used CSV as the pivot format. However, for production workloads we recommend Apache Parquet as the best option for large migrations since it comes with compression and can save some storage costs while reducing transfer times. (By default, each row group is compressed using SNAPPY). ClickHouse also leverages Parquet's column orientation to speed up data ingestion.
+:::
+
+
\ No newline at end of file
diff --git a/docs/integrations/data-ingestion/redshift/index.md b/docs/integrations/data-ingestion/redshift/index.md
index 3e936cec37b..217609acecd 100644
--- a/docs/integrations/data-ingestion/redshift/index.md
+++ b/docs/integrations/data-ingestion/redshift/index.md
@@ -7,17 +7,11 @@ keywords: ['Redshift']
show_related_blogs: true
---
-import redshiftToClickhouse from '@site/static/images/integrations/data-ingestion/redshift/redshift-to-clickhouse.png';
-import push from '@site/static/images/integrations/data-ingestion/redshift/push.png';
-import pull from '@site/static/images/integrations/data-ingestion/redshift/pull.png';
-import pivot from '@site/static/images/integrations/data-ingestion/redshift/pivot.png';
-import s3_1 from '@site/static/images/integrations/data-ingestion/redshift/s3-1.png';
-import s3_2 from '@site/static/images/integrations/data-ingestion/redshift/s3-2.png';
-import Image from '@theme/IdealImage';
+import MigrationGuide from '@site/docs/integrations/data-ingestion/redshift/_snippets/_migration_guide.md';
-# Migrating data from Redshift to ClickHouse
+# Migrating Data from Redshift to ClickHouse
-## Related content {#related-content}
+## Related Content {#related-content}
-## Introduction {#introduction}
-
-[Amazon Redshift](https://aws.amazon.com/redshift/) is a popular cloud data warehousing solution that is part of the Amazon Web Services offerings. This guide presents different approaches to migrating data from a Redshift instance to ClickHouse. We will cover three options:
-
-
-
-From the ClickHouse instance standpoint, you can either:
-
-1. **[PUSH](#push-data-from-redshift-to-clickhouse)** data to ClickHouse using a third party ETL/ELT tool or service
-
-2. **[PULL](#pull-data-from-redshift-to-clickhouse)** data from Redshift leveraging the ClickHouse JDBC Bridge
-
-3. **[PIVOT](#pivot-data-from-redshift-to-clickhouse-using-s3)** using S3 object storage using an "Unload then load" logic
-
-:::note
-We used Redshift as a data source in this tutorial. However, the migration approaches presented here are not exclusive to Redshift, and similar steps can be derived for any compatible data source.
-:::
-
-## Push data from Redshift to ClickHouse {#push-data-from-redshift-to-clickhouse}
-
-In the push scenario, the idea is to leverage a third-party tool or service (either custom code or an [ETL/ELT](https://en.wikipedia.org/wiki/Extract,_transform,_load#ETL_vs._ELT)) to send your data to your ClickHouse instance. For example, you can use a software like [Airbyte](https://www.airbyte.com/) to move data between your Redshift instance (as a source) and ClickHouse as a destination ([see our integration guide for Airbyte](/integrations/data-ingestion/etl-tools/airbyte-and-clickhouse.md))
-
-
-
-### Pros {#pros}
-
-* It can leverage the existing catalog of connectors from the ETL/ELT software.
-* Built-in capabilities to keep data in sync (append/overwrite/increment logic).
-* Enable data transformation scenarios (for example, see our [integration guide for dbt](/integrations/data-ingestion/etl-tools/dbt/index.md)).
-
-### Cons {#cons}
-
-* Users need to set up and maintain an ETL/ELT infrastructure.
-* Introduces a third-party element in the architecture which can turn into a potential scalability bottleneck.
-
-## Pull data from Redshift to ClickHouse {#pull-data-from-redshift-to-clickhouse}
-
-In the pull scenario, the idea is to leverage the ClickHouse JDBC Bridge to connect to a Redshift cluster directly from a ClickHouse instance and perform `INSERT INTO ... SELECT` queries:
-
-
-
-### Pros {#pros-1}
-
-* Generic to all JDBC compatible tools
-* Elegant solution to allow querying multiple external data sources from within ClickHouse
-
-### Cons {#cons-1}
-
-* Requires a ClickHouse JDBC Bridge instance which can turn into a potential scalability bottleneck
-
-:::note
-Even though Redshift is based on PostgreSQL, using the ClickHouse PostgreSQL table function or table engine is not possible since ClickHouse requires PostgreSQL version 9 or above and the Redshift API is based on an earlier version (8.x).
-:::
-
-### Tutorial {#tutorial}
-
-To use this option, you need to set up a ClickHouse JDBC Bridge. ClickHouse JDBC Bridge is a standalone Java application that handles JDBC connectivity and acts as a proxy between the ClickHouse instance and the data sources. For this tutorial, we used a pre-populated Redshift instance with a [sample database](https://docs.aws.amazon.com/redshift/latest/dg/c_sampledb.html).
-
-1. Deploy the ClickHouse JDBC Bridge. For more details, see our user guide on [JDBC for External Data sources](/integrations/data-ingestion/dbms/jdbc-with-clickhouse.md)
-
-:::note
-If you are using ClickHouse Cloud, you will need to run your ClickHouse JDBC Bridge on a separate environment and connect to ClickHouse Cloud using the [remoteSecure](/sql-reference/table-functions/remote/) function
-:::
-
-2. Configure your Redshift datasource for ClickHouse JDBC Bridge. For example, `/etc/clickhouse-jdbc-bridge/config/datasources/redshift.json `
-
- ```json
- {
- "redshift-server": {
- "aliases": [
- "redshift"
- ],
- "driverUrls": [
- "https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/2.1.0.4/redshift-jdbc42-2.1.0.4.jar"
- ],
- "driverClassName": "com.amazon.redshift.jdbc.Driver",
- "jdbcUrl": "jdbc:redshift://redshift-cluster-1.ckubnplpz1uv.us-east-1.redshift.amazonaws.com:5439/dev",
- "username": "awsuser",
- "password": "",
- "maximumPoolSize": 5
- }
- }
- ```
-
-3. Once ClickHouse JDBC Bridge deployed and running, you can start querying your Redshift instance from ClickHouse
-
- ```sql
- SELECT *
- FROM jdbc('redshift', 'select username, firstname, lastname from users limit 5')
- ```
-
- ```response
- Query id: 1b7de211-c0f6-4117-86a2-276484f9f4c0
-
- ┌─username─┬─firstname─┬─lastname─┐
- │ PGL08LJI │ Vladimir │ Humphrey │
- │ XDZ38RDD │ Barry │ Roy │
- │ AEB55QTM │ Reagan │ Hodge │
- │ OWY35QYB │ Tamekah │ Juarez │
- │ MSD36KVR │ Mufutau │ Watkins │
- └──────────┴───────────┴──────────┘
-
- 5 rows in set. Elapsed: 0.438 sec.
- ```
-
- ```sql
- SELECT *
- FROM jdbc('redshift', 'select count(*) from sales')
- ```
-
- ```response
- Query id: 2d0f957c-8f4e-43b2-a66a-cc48cc96237b
-
- ┌──count─┐
- │ 172456 │
- └────────┘
-
- 1 rows in set. Elapsed: 0.304 sec.
- ```
-
-4. In the following, we display importing data using an `INSERT INTO ... SELECT` statement
-
- ```sql
- # TABLE CREATION with 3 columns
- CREATE TABLE users_imported
- (
- `username` String,
- `firstname` String,
- `lastname` String
- )
- ENGINE = MergeTree
- ORDER BY firstname
- ```
-
- ```response
- Query id: c7c4c44b-cdb2-49cf-b319-4e569976ab05
-
- Ok.
-
- 0 rows in set. Elapsed: 0.233 sec.
- ```
-
- ```sql
- # IMPORTING DATA
- INSERT INTO users_imported (*) SELECT *
- FROM jdbc('redshift', 'select username, firstname, lastname from users')
- ```
-
- ```response
- Query id: 9d3a688d-b45a-40f4-a7c7-97d93d7149f1
-
- Ok.
-
- 0 rows in set. Elapsed: 4.498 sec. Processed 49.99 thousand rows, 2.49 MB (11.11 thousand rows/s., 554.27 KB/s.)
- ```
-
-## Pivot data from Redshift to ClickHouse using S3 {#pivot-data-from-redshift-to-clickhouse-using-s3}
-
-In this scenario, we export data to S3 in an intermediary pivot format and, in a second step, load the data from S3 into ClickHouse.
-
-
-
-### Pros {#pros-2}
-
-* Both Redshift and ClickHouse have powerful S3 integration features.
-* Leverages the existing features such as the Redshift `UNLOAD` command and ClickHouse S3 table function / table engine.
-* Scales seamlessly thanks to parallel reads and high throughput capabilities from/to S3 in ClickHouse.
-* Can leverage sophisticated and compressed formats like Apache Parquet.
-
-### Cons {#cons-2}
-
-* Two steps in the process (unload from Redshift then load into ClickHouse).
-
-### Tutorial {#tutorial-1}
-
-1. Using Redshift's [UNLOAD](https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html) feature, export the data into a an existing private S3 bucket:
-
-
-
- It will generate part files containing the raw data in S3
-
-
-
-2. Create the table in ClickHouse:
-
- ```sql
- CREATE TABLE users
- (
- username String,
- firstname String,
- lastname String
- )
- ENGINE = MergeTree
- ORDER BY username
- ```
-
- Alternatively, ClickHouse can try to infer the table structure using `CREATE TABLE ... EMPTY AS SELECT`:
-
- ```sql
- CREATE TABLE users
- ENGINE = MergeTree ORDER BY username
- EMPTY AS
- SELECT * FROM s3('https://your-bucket.s3.amazonaws.com/unload/users/*', '', '', 'CSV')
- ```
-
- This works especially well when the data is in a format that contains information about data types, like Parquet.
-
-3. Load the S3 files into ClickHouse using an `INSERT INTO ... SELECT` statement:
- ```sql
- INSERT INTO users SELECT *
- FROM s3('https://your-bucket.s3.amazonaws.com/unload/users/*', '', '', 'CSV')
- ```
-
- ```response
- Query id: 2e7e219a-6124-461c-8d75-e4f5002c8557
-
- Ok.
-
- 0 rows in set. Elapsed: 0.545 sec. Processed 49.99 thousand rows, 2.34 MB (91.72 thousand rows/s., 4.30 MB/s.)
- ```
-
-:::note
-This example used CSV as the pivot format. However, for production workloads we recommend Apache Parquet as the best option for large migrations since it comes with compression and can save some storage costs while reducing transfer times. (By default, each row group is compressed using SNAPPY). ClickHouse also leverages Parquet's column orientation to speed up data ingestion.
-:::
+
diff --git a/docs/migrations/index.md b/docs/migrations/index.md
deleted file mode 100644
index 3404415cac1..00000000000
--- a/docs/migrations/index.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-slug: migrations
-title: 'Migrations'
-pagination_prev: null
-pagination_next: null
-description: 'Landing page for the migrations section'
----
-
-| Page | Description |
-|------------------------------------------------------------------------|-------------------------------------------------|
-| [BigQuery](bigquery/index.md) | Migration guide for BigQuery |
-| [Snowflake](./snowflake.md) | Migration guide for Snowflake |
-| [PostgreSQL](postgres/index.md) | Migration guide for PostgreSQL |
-| [MySQL](../integrations/data-ingestion/dbms/mysql/index.md) | Migration guide for MySQL |
-| [Redshift](../integrations/data-ingestion/redshift/index.md) | Migration guide for Redshift |
-| [DynamoDB](../integrations/data-ingestion/dbms/dynamodb/index.md) | Migration guide for DynamoDB |
-| [Elasticsearch](/use-cases/observability/clickstack/migration/elastic) | Migration guide for Elasticsearch to ClickStack |
diff --git a/docs/whats-new/changelog/cloud.md b/docs/whats-new/changelog/cloud.md
index cbc3f51a2f8..dd3faf69c73 100644
--- a/docs/whats-new/changelog/cloud.md
+++ b/docs/whats-new/changelog/cloud.md
@@ -8,6 +8,6 @@ description: 'Learn about Cloud Changelog'
# Cloud Changelog
-import CloudChangelog from '@site/docs/cloud/reference/changelog.md';
+import CloudChangelog from '@site/docs/cloud/reference/01_changelog/01_changelog.md';
diff --git a/docs/whats-new/changelog/index.md b/docs/whats-new/changelog/index.md
index 5e70c9ecc81..4a14fcb7dbf 100644
--- a/docs/whats-new/changelog/index.md
+++ b/docs/whats-new/changelog/index.md
@@ -7,1386 +7,3 @@ sidebar_label: '2025'
title: '2025 Changelog'
---
-### Table of Contents
-**[ClickHouse release v25.7, 2025-07-24](#256)**
-**[ClickHouse release v25.6, 2025-06-26](#256)**
-**[ClickHouse release v25.5, 2025-05-22](#255)**
-**[ClickHouse release v25.4, 2025-04-22](#254)**
-**[ClickHouse release v25.3 LTS, 2025-03-20](#253)**
-**[ClickHouse release v25.2, 2025-02-27](#252)**
-**[ClickHouse release v25.1, 2025-01-28](#251)**
-**[Changelog for 2024](https://clickhouse.com/docs/whats-new/changelog/2024/)**
-**[Changelog for 2023](https://clickhouse.com/docs/whats-new/changelog/2023/)**
-**[Changelog for 2022](https://clickhouse.com/docs/whats-new/changelog/2022/)**
-**[Changelog for 2021](https://clickhouse.com/docs/whats-new/changelog/2021/)**
-**[Changelog for 2020](https://clickhouse.com/docs/whats-new/changelog/2020/)**
-**[Changelog for 2019](https://clickhouse.com/docs/whats-new/changelog/2019/)**
-**[Changelog for 2018](https://clickhouse.com/docs/whats-new/changelog/2018/)**
-**[Changelog for 2017](https://clickhouse.com/docs/whats-new/changelog/2017/)**
-
-
-### ClickHouse release 25.7, 2025-07-24 {#257}
-
-#### Backward Incompatible Change
-* Changes to `extractKeyValuePairs` function: Introduce a new argument `unexpected_quoting_character_strategy` that controls what happens when a `quoting_character` is unexpectedly found when reading a non quoted key or value. The value can be one of: `invalid`, `accept` or `promote`. Invalid will discard the key and go back to waiting key state. Accept will treat it as part of the key. Promote will discard previous character and start parsing as a quoted key. In addition, after parsing a quoted value, only parse the next key if a pair delimiter is found. [#80657](https://github.com/ClickHouse/ClickHouse/pull/80657) ([Arthur Passos](https://github.com/arthurpassos)).
-* Support zero-byte match in `countMatches` function. Users who like to retain the old behavior can enable setting `count_matches_stop_at_empty_match`. [#81676](https://github.com/ClickHouse/ClickHouse/pull/81676) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Use server-wide throttlers for local (`max_local_read_bandwidth_for_server` and `max_local_write_bandwidth_for_server`) and remote (`max_remote_read_network_bandwidth_for_server` and `max_remote_write_network_bandwidth_for_server`) when generating BACKUPs in addition to their dedicated server settings (`max_backup_bandwidth_for_server`, `max_mutations_bandwidth_for_server` and `max_merges_bandwidth_for_server`). [#81753](https://github.com/ClickHouse/ClickHouse/pull/81753) ([Sergei Trifonov](https://github.com/serxa)).
-* Forbid the creation of a table without insertable columns. [#81835](https://github.com/ClickHouse/ClickHouse/pull/81835) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Cluster functions with archives used to send over the whole archives to replicas, making reading within archives not-parallelizable with clusters (e.g. with a single archive we would just send it to one of the replicas as a whole to process and all other replicas will just be idle, which is inefficient). Added a new setting `cluster_function_process_archive_on_multiple_nodes`, by default equal to `true`. If set to `true`, increases performance of processing archives in cluster functions. Should be set to `false` for compatibility and to avoid errors during upgrade to 25.7+ if using cluster functions with archives on earlier versions. [#82355](https://github.com/ClickHouse/ClickHouse/pull/82355) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* `SYSTEM RESTART REPLICAS` query led to the wakeup of tables in the Lazy database, even without access to that database, and it happened while these tables were being concurrently dropped. Note: Now `SYSTEM RESTART REPLICAS` will only restart replicas in the databases where you have permission to `SHOW TABLES`, which is natural. [#83321](https://github.com/ClickHouse/ClickHouse/pull/83321) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-
-#### New Feature
-* Added support for lightweight updates for `MergeTree`-family tables. Lightweight updates can be used by a new syntax: `UPDATE
SET col1 = val1, col2 = val2, ... WHERE `. Added implementation of lightweight deletes via lightweight updates. It can be enabled by setting `lightweight_delete_mode = 'lightweight_update'`. [#82004](https://github.com/ClickHouse/ClickHouse/pull/82004) ([Anton Popov](https://github.com/CurtizJ)).
-* Support complex types in iceberg schema evolution. [#73714](https://github.com/ClickHouse/ClickHouse/pull/73714) ([Konstantin Vedernikov](https://github.com/scanhex12)).
-* NumericIndexedVector: new vector data-structure backed by bit-sliced, Roaring-bitmap compression, together with more than 20 functions for building, analysing and point-wise arithmetic. Can cut storage and speed up joins, filters and aggregations on sparse data. Implements [#70582](https://github.com/ClickHouse/ClickHouse/issues/70582) and [“Large-Scale Metric Computation in Online Controlled Experiment Platform” paper](https://arxiv.org/abs/2405.08411) by T. Xiong and Y. Wang from VLDB 2024. [#74193](https://github.com/ClickHouse/ClickHouse/pull/74193) ([FriendLey](https://github.com/FriendLey)).
-* The workload setting `max_waiting_queries` is now supported. It can be used to limit the size of the query queue. If the limit is reached, all subsequent queries will be terminated with the `SERVER_OVERLOADED` error. [#81250](https://github.com/ClickHouse/ClickHouse/pull/81250) ([Oleg Doronin](https://github.com/dorooleg)).
-* Add financial functions: `financialInternalRateOfReturnExtended` (`XIRR`), `financialInternalRateOfReturn` (`IRR`), `financialNetPresentValueExtended` (`XNPV`), `financialNetPresentValue` (`NPV`). [#81599](https://github.com/ClickHouse/ClickHouse/pull/81599) ([Joanna Hulboj](https://github.com/jh0x)).
-* Add the geospatial functions `polygonIntersectsCartesian` and `polygonIntersectsSpherical` to check if two polygons intersect. [#81882](https://github.com/ClickHouse/ClickHouse/pull/81882) ([Paul Lamb](https://github.com/plamb)).
-* Support `_part_granule_offset` virtual column in MergeTree-family tables. This column indicates the zero-based index of the granule/mark each row belongs to within its data part. This addresses [#79572](https://github.com/ClickHouse/ClickHouse/issues/79572). [#82341](https://github.com/ClickHouse/ClickHouse/pull/82341) ([Amos Bird](https://github.com/amosbird)).
-* Introduce Iceberg writes for `insert` queries. [#82692](https://github.com/ClickHouse/ClickHouse/pull/82692) ([Konstantin Vedernikov](https://github.com/scanhex12)).
-* Add SZ3 as a lossy yet error-bounded compression codec for columns of type `Float32` and `Float64`. [#83088](https://github.com/ClickHouse/ClickHouse/pull/83088) ([Konstantin Vedernikov](https://github.com/scanhex12)).
-* Add AI-powered SQL generation to ClickHouse client. Users can now generate SQL queries from natural language descriptions by prefixing their query with "??". Supports OpenAI and Anthropic providers with automatic schema discovery. [#83314](https://github.com/ClickHouse/ClickHouse/pull/83314) ([Kaushik Iska](https://github.com/iskakaushik)).
-* Read iceberg data files by field ids. This closes [#83065](https://github.com/ClickHouse/ClickHouse/issues/83065). [#83653](https://github.com/ClickHouse/ClickHouse/pull/83653) ([Konstantin Vedernikov](https://github.com/scanhex12)).
-* Added SQL functions `colorSRGBToOkLCH` and `colorOkLCHToSRGB` for converting colours between the sRGB and OkLCH colour spaces. [#83679](https://github.com/ClickHouse/ClickHouse/pull/83679) ([Fgrtue](https://github.com/Fgrtue)).
-* Add _part_granule_offset column". [#82341](https://github.com/ClickHouse/ClickHouse/pull/82341) ([Amos Bird](https://github.com/amosbird)).
-
-#### Experimental Feature
-* Added functions `searchAny` and `searchAll` which are general purpose tools to search text indexes. [#80641](https://github.com/ClickHouse/ClickHouse/pull/80641) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* The text index now supports `string` tokenizer. [#81752](https://github.com/ClickHouse/ClickHouse/pull/81752) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Changed the default index granularity value for `text` indexes to 64. This improves the expected performance for the average test query in internal benchmarks. [#82162](https://github.com/ClickHouse/ClickHouse/pull/82162) ([Jimmy Aguilar Mena](https://github.com/Ergus)).
-* The 256-bit bitmap stores the outgoing labels of a state ordered, but outgoing states are saved into disk in the order they appear in the hash table. Therefore, a label would point to a wrong next state while reading from disk. [#82783](https://github.com/ClickHouse/ClickHouse/pull/82783) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Enable zstd compression for FST tree blob. [#83093](https://github.com/ClickHouse/ClickHouse/pull/83093) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Promote vector similarity index to BETA. Introduced alias setting `enable_vector_similarity_index` which must be enabled to use the vector similarity index. [#83459](https://github.com/ClickHouse/ClickHouse/pull/83459) ([Robert Schulze](https://github.com/rschu1ze)).
-
-#### Performance Improvement
-* Trivial optimization for -If combinator. [#78454](https://github.com/ClickHouse/ClickHouse/pull/78454) ([李扬](https://github.com/taiyang-li)).
-* Vector search queries using a vector similarity index complete with lower latency due to reduced storage reads and reduced CPU usage. [#79103](https://github.com/ClickHouse/ClickHouse/pull/79103) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Respect `merge_tree_min_{rows,bytes}_for_seek` in `filterPartsByQueryConditionCache` to align it with other methods filtering by indexes. [#80312](https://github.com/ClickHouse/ClickHouse/pull/80312) ([李扬](https://github.com/taiyang-li)).
-* Make the pipeline after the TOTALS step multithreaded. [#80331](https://github.com/ClickHouse/ClickHouse/pull/80331) ([UnamedRus](https://github.com/UnamedRus)).
-* Fix filter by key for Redis and KeeperMap storages. [#81833](https://github.com/ClickHouse/ClickHouse/pull/81833) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Add new setting `min_joined_block_size_rows` (analogous to `min_joined_block_size_bytes`; defaults to 65409) to control the minimum block size (in rows) for JOIN input and output blocks (if the join algorithm supports it). Small blocks will be squashed. [#81886](https://github.com/ClickHouse/ClickHouse/pull/81886) ([Nikita Taranov](https://github.com/nickitat)).
-* When the aggregation query contains only a single `COUNT()` function on a NOT NULL column, the aggregation logic is fully inlined during hash table probing. This avoids allocating and maintaining any aggregation state, significantly reducing memory usage and CPU overhead. This partially addresses [#81982](https://github.com/ClickHouse/ClickHouse/issues/81982). [#82104](https://github.com/ClickHouse/ClickHouse/pull/82104) ([Amos Bird](https://github.com/amosbird)).
-* Performance of `HashJoin` optimised by removing the additional loop over hash maps in the typical case of only one key column, also `null_map` and `join_mask` checks are eliminated when they're always `true`/`false`. [#82308](https://github.com/ClickHouse/ClickHouse/pull/82308) ([Nikita Taranov](https://github.com/nickitat)).
-* `ATTACH PARTITION` no longer leads to the dropping of all caches. [#82377](https://github.com/ClickHouse/ClickHouse/pull/82377) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Optimize the generated plan for correlated subqueries by removing redundant JOIN operations using equivalence classes. If there are equivalent expressions for all correlated columns, `CROSS JOIN` is not produced if `query_plan_correlated_subqueries_use_substitution` setting is enabled. [#82435](https://github.com/ClickHouse/ClickHouse/pull/82435) ([Dmitry Novik](https://github.com/novikd)).
-* Read only required columns in correlated subquery when it appears to be an argument of function `EXISTS`. [#82443](https://github.com/ClickHouse/ClickHouse/pull/82443) ([Dmitry Novik](https://github.com/novikd)).
-* Introduce async logging. [#82516](https://github.com/ClickHouse/ClickHouse/pull/82516) ([Raúl Marín](https://github.com/Algunenano)).
-* Speedup QueryTreeHash a bit. [#82617](https://github.com/ClickHouse/ClickHouse/pull/82617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Add alignment in the Counter of ProfileEvents to reduce false sharing. [#82697](https://github.com/ClickHouse/ClickHouse/pull/82697) ([Jiebin Sun](https://github.com/jiebinn)).
-* Parallel distributed INSERT SELECT is enabled by default in mode where INSERT SELECT executed on each shard independently, see `parallel_distributed_insert_select` setting. [#83040](https://github.com/ClickHouse/ClickHouse/pull/83040) ([Igor Nikonov](https://github.com/devcrafter)).
-* The optimizations for `null_map` and `JoinMask` from [#82308](https://github.com/ClickHouse/ClickHouse/issues/82308) were applied to the case of JOIN with multiple disjuncts. Also, the `KnownRowsHolder` data structure was optimized. [#83041](https://github.com/ClickHouse/ClickHouse/pull/83041) ([Nikita Taranov](https://github.com/nickitat)).
-* Plain `std::vector` is used for join flags to avoid calculating a hash on each access to flags. [#83043](https://github.com/ClickHouse/ClickHouse/pull/83043) ([Nikita Taranov](https://github.com/nickitat)).
-* Don't pre-allocate memory for result columns beforehand when `HashJoin` uses `lazy` output mode. It is suboptimal, especially when the number of matches is low. Moreover, we know the exact amount of matches after joining is done, so we can preallocate more precisely. [#83304](https://github.com/ClickHouse/ClickHouse/pull/83304) ([Nikita Taranov](https://github.com/nickitat)).
-* Minimize memory copy in port headers during pipeline construction. Original [PR](https://github.com/ClickHouse/ClickHouse/pull/70105) by [heymind](https://github.com/heymind). [#83381](https://github.com/ClickHouse/ClickHouse/pull/83381) ([Raúl Marín](https://github.com/Algunenano)).
-* Improve Keeper with rocksdb initial loading. [#83390](https://github.com/ClickHouse/ClickHouse/pull/83390) ([Antonio Andelic](https://github.com/antonio2368)).
-* Avoid holding the lock while creating storage snapshot data to reduce lock contention with high concurrent load. [#83510](https://github.com/ClickHouse/ClickHouse/pull/83510) ([Duc Canh Le](https://github.com/canhld94)).
-* Improved performance of the ProtobufSingle input format by reusing the serializer when no parsing errors occur. [#83613](https://github.com/ClickHouse/ClickHouse/pull/83613) ([Eduard Karacharov](https://github.com/korowa)).
-* Improve the performance of pipeline building. [#83631](https://github.com/ClickHouse/ClickHouse/pull/83631) ([Raúl Marín](https://github.com/Algunenano)).
-* Optimize MergeTreeReadersChain::getSampleBlock. [#83875](https://github.com/ClickHouse/ClickHouse/pull/83875) ([Raúl Marín](https://github.com/Algunenano)).
-
-#### Improvement
-* Introduced two new access types: `READ` and `WRITE` for sources and deprecates all previous access types related to sources. Before `GRANT S3 ON *.* TO user`, now: `GRANT READ, WRITE ON S3 TO user`. This also allows to separate `READ` and `WRITE` permissions for sources, e.g.: `GRANT READ ON * TO user`, `GRANT WRITE ON S3 TO user`. The feature is controlled by a setting `access_control_improvements.enable_read_write_grants` and disabled by default. [#73659](https://github.com/ClickHouse/ClickHouse/pull/73659) ([pufit](https://github.com/pufit)).
-* Verify the part has consistent checksum.txt file right before committing it. [#76625](https://github.com/ClickHouse/ClickHouse/pull/76625) ([Sema Checherinda](https://github.com/CheSema)).
-* Implement methods `moveFile` and `replaceFile` in s3_plain_rewritable to support it as a database disk. [#79424](https://github.com/ClickHouse/ClickHouse/pull/79424) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Allow backups for PostgreSQL, MySQL & DataLake databases. A backup of such a database would only save the definition and not the data inside of it. [#79982](https://github.com/ClickHouse/ClickHouse/pull/79982) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Support position deletes for Iceberg TableEngine. [#80237](https://github.com/ClickHouse/ClickHouse/pull/80237) ([YanghongZhong](https://github.com/yahoNanJing)).
-* Setting `allow_experimental_join_condition` marked as obsolete. [#80566](https://github.com/ClickHouse/ClickHouse/pull/80566) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Add pressure metrics to ClickHouse async metrics. [#80779](https://github.com/ClickHouse/ClickHouse/pull/80779) ([Xander Garbett](https://github.com/Garbett1)).
-* Added metrics `MarkCacheEvictedBytes`, `MarkCacheEvictedMarks`, `MarkCacheEvictedFiles` for tracking evictions from the mark cache. (issue [#60989](https://github.com/ClickHouse/ClickHouse/issues/60989)). [#80799](https://github.com/ClickHouse/ClickHouse/pull/80799) ([Shivji Kumar Jha](https://github.com/shiv4289)).
-* Speedup tables listing in data catalogs by asynchronous requests. [#81084](https://github.com/ClickHouse/ClickHouse/pull/81084) ([alesapin](https://github.com/alesapin)).
-* Support writing parquet enum as byte array as the [spec](https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#enum) dictates. I'll write more info later. [#81090](https://github.com/ClickHouse/ClickHouse/pull/81090) ([Arthur Passos](https://github.com/arthurpassos)).
-* An improvement for `DeltaLake` table engine: delta-kernel-rs has `ExpressionVisitor` API which is implemented in this PR and is applied to partition column expressions transform (it will replace an old deprecated within the delta-kernel-rs way, which was used before in our code). In the future this `ExpressionVisitor` will also allow to implement statistics based pruning and some delta-lake proprietary features. Additionally the purpose of this change is to support partition pruning in `DeltaLakeCluster` table engine (the result of a parsed expression - ActionsDAG - will be serialized and sent from the initiator along with the data path, because this kind of information, which is needed for pruning, is only available as meta information on data files listing, which is done by initiator only, but it has to be applied to data on each reading server). [#81136](https://github.com/ClickHouse/ClickHouse/pull/81136) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Try to preserve element names when deriving supertypes for named tuples. [#81345](https://github.com/ClickHouse/ClickHouse/pull/81345) ([lgbo](https://github.com/lgbo-ustc)).
-* Allow parameters in `CREATE USER` queries for usernames. [#81387](https://github.com/ClickHouse/ClickHouse/pull/81387) ([Diskein](https://github.com/Diskein)).
-* Now clickhouse supports compressed `metadata.json` files for Iceberg. Fixes [#70874](https://github.com/ClickHouse/ClickHouse/issues/70874). [#81451](https://github.com/ClickHouse/ClickHouse/pull/81451) ([alesapin](https://github.com/alesapin)).
-* The `system.formats` table now contains extended information about formats, such as HTTP content type, the capabilities of schema inference, etc. [#81505](https://github.com/ClickHouse/ClickHouse/pull/81505) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Count consumed messages manually to avoid depending on previous committed offset in StorageKafka2. [#81662](https://github.com/ClickHouse/ClickHouse/pull/81662) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Added `clickhouse-keeper-utils`, a new command-line tool for managing and analyzing ClickHouse Keeper data. The tool supports dumping state from snapshots and changelogs, analyzing changelog files, and extracting specific log ranges. [#81677](https://github.com/ClickHouse/ClickHouse/pull/81677) ([Antonio Andelic](https://github.com/antonio2368)).
-* The total and per-user network throttlers are never reset, which ensures that `max_network_bandwidth_for_all_users` and `max_network_bandwidth_for_all_users` limits are never exceeded. [#81729](https://github.com/ClickHouse/ClickHouse/pull/81729) ([Sergei Trifonov](https://github.com/serxa)).
-* Support writing geoparquets as output format. [#81784](https://github.com/ClickHouse/ClickHouse/pull/81784) ([Konstantin Vedernikov](https://github.com/scanhex12)).
-* Forbid to start `RENAME COLUMN` alter mutation if it will rename some column that right now affected by incomplete data mutation. [#81823](https://github.com/ClickHouse/ClickHouse/pull/81823) ([Mikhail Artemenko](https://github.com/Michicosun)).
-* Introduce jitter to the S3 retry mechanism when the `s3_slow_all_threads_after_network_error` configuration is enabled. [#81849](https://github.com/ClickHouse/ClickHouse/pull/81849) ([zoomxi](https://github.com/zoomxi)).
-* Header Connection is send at the end of headers. When we know is the connection should be preserved. [#81951](https://github.com/ClickHouse/ClickHouse/pull/81951) ([Sema Checherinda](https://github.com/CheSema)).
-* Tune TCP servers queue (64 by default) based on listen_backlog (4096 by default). [#82045](https://github.com/ClickHouse/ClickHouse/pull/82045) ([Azat Khuzhin](https://github.com/azat)).
-* Add ability to reload `max_local_read_bandwidth_for_server` and `max_local_write_bandwidth_for_server` on fly without restart server. [#82083](https://github.com/ClickHouse/ClickHouse/pull/82083) ([Kai Zhu](https://github.com/nauu)).
-* Add support for clearing all warnings from the `system.warnings` table using `TRUNCATE TABLE system.warnings`. [#82087](https://github.com/ClickHouse/ClickHouse/pull/82087) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fix partition pruning with data lake cluster functions. [#82131](https://github.com/ClickHouse/ClickHouse/pull/82131) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix reading partitioned data in DeltaLakeCluster table function. In this PR cluster functions protocol version is increased, allowing to send extra info from initiator to replicas. This extra info contains delta-kernel transform expression, which is needed to parse partition columns (and some other staff in the future, like generated columns, etc). [#82132](https://github.com/ClickHouse/ClickHouse/pull/82132) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix a list of problems that can occur when trying to run integration tests on a local host. [#82135](https://github.com/ClickHouse/ClickHouse/pull/82135) ([Oleg Doronin](https://github.com/dorooleg)).
-* Now database Datalake throw more convenient exception. Fixes [#81211](https://github.com/ClickHouse/ClickHouse/issues/81211). [#82304](https://github.com/ClickHouse/ClickHouse/pull/82304) ([alesapin](https://github.com/alesapin)).
-* Improve HashJoin::needUsedFlagsForPerRightTableRow, returns false for cross join. [#82379](https://github.com/ClickHouse/ClickHouse/pull/82379) ([lgbo](https://github.com/lgbo-ustc)).
-* Allow write/read map columns as array of tuples. [#82408](https://github.com/ClickHouse/ClickHouse/pull/82408) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
-* List the licenses of rust crates in system.licenses. [#82440](https://github.com/ClickHouse/ClickHouse/pull/82440) ([Raúl Marín](https://github.com/Algunenano)).
-* Macros like `{uuid}` can now be used in the `keeper_path` setting of the S3Queue table engine. [#82463](https://github.com/ClickHouse/ClickHouse/pull/82463) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Keeper improvement: move changelog files between disk in a background thread. Previously, moving changelog to a different disk would block Keeper globally until the move is finished. This lead to performance degradation if moving is a long operation (e.g. to S3 disk). [#82485](https://github.com/ClickHouse/ClickHouse/pull/82485) ([Antonio Andelic](https://github.com/antonio2368)).
-* Keeper improvement: add new config `keeper_server.cleanup_old_and_ignore_new_acl`. If enabled, all nodes will have their ACLs cleared while ACL for new requests will be ignored. If the goal is to completely remove ACL from nodes, it's important to leave the config enabled until a new snapshot is created. [#82496](https://github.com/ClickHouse/ClickHouse/pull/82496) ([Antonio Andelic](https://github.com/antonio2368)).
-* Removed experimental `send_metadata` logic related to experimental zero-copy replication. It wasn't ever used and nobody supports this code. Since there were even no tests related to it, there is a high chance that it's broken long time ago. [#82508](https://github.com/ClickHouse/ClickHouse/pull/82508) ([alesapin](https://github.com/alesapin)).
-* Added a new server setting `s3queue_disable_streaming` which disables streaming in tables with S3Queue table engine. This setting is changeable without server restart. [#82515](https://github.com/ClickHouse/ClickHouse/pull/82515) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Color parenthesis in multiple colors for better readability. [#82538](https://github.com/ClickHouse/ClickHouse/pull/82538) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Refactor dynamic resize feature of filesystem cache. Added more logs for introspection. [#82556](https://github.com/ClickHouse/ClickHouse/pull/82556) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* `clickhouse-server` without a configuration file will also listen to the PostgreSQL port 9005, like with the default config. [#82633](https://github.com/ClickHouse/ClickHouse/pull/82633) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Integrate `StorageKafka2` to `system.kafka_consumers`. [#82652](https://github.com/ClickHouse/ClickHouse/pull/82652) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Estimate complex cnf/dnf, for example, `(a < 1 and a > 0) or b = 3`, by statistics. [#82663](https://github.com/ClickHouse/ClickHouse/pull/82663) ([Han Fei](https://github.com/hanfei1991)).
-* In `ReplicatedMergeTree::executeMetadataAlter`, we get the StorageID, and without taking DDLGuard, we try to call `IDatabase::alterTable`. In between this time we could have technically exchanged the table in question with another table, so when we get the definiton we would get the wrong one. To avoid this we add a separate check for UUIDs to match when we try to call `IDatabase::alterTable`. [#82666](https://github.com/ClickHouse/ClickHouse/pull/82666) ([Nikolay Degterinsky](https://github.com/evillique)).
-* When attaching a database with a read-only remote disk, manually add table UUIDs into DatabaseCatalog. [#82670](https://github.com/ClickHouse/ClickHouse/pull/82670) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Prevent user from using `nan` and `inf` with `NumericIndexedVector`. Fixes [#82239](https://github.com/ClickHouse/ClickHouse/issues/82239) and a little more. [#82681](https://github.com/ClickHouse/ClickHouse/pull/82681) ([Raufs Dunamalijevs](https://github.com/rienath)).
-* Do not omit zero values in the `X-ClickHouse-Progress` and `X-ClickHouse-Summary` header formats. [#82727](https://github.com/ClickHouse/ClickHouse/pull/82727) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Keeper improvement: support specific permissions for world:anyone ACL. [#82755](https://github.com/ClickHouse/ClickHouse/pull/82755) ([Antonio Andelic](https://github.com/antonio2368)).
-* Do not allow RENAME COLUMN or DROP COLUMN involving explicitly listed columns to sum in SummingMergeTree. Closes [#81836](https://github.com/ClickHouse/ClickHouse/issues/81836). [#82821](https://github.com/ClickHouse/ClickHouse/pull/82821) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Improve the precision of conversion from `Decimal` to `Float32`. Implement conversion from `Decimal` to `BFloat16`. Closes [#82660](https://github.com/ClickHouse/ClickHouse/issues/82660). [#82823](https://github.com/ClickHouse/ClickHouse/pull/82823) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Scrollbars in the Web UI will look slightly better. [#82869](https://github.com/ClickHouse/ClickHouse/pull/82869) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* `clickhouse-server` with embedded configuration will allow using the Web UI by providing an HTTP OPTIONS response. [#82870](https://github.com/ClickHouse/ClickHouse/pull/82870) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Highlight metacharacters in LIKE/REGEXP patterns as you type. We already have it in `clickhouse-format` and in the echo in `clickhouse-client`, but now it is done in the command prompt as well. [#82871](https://github.com/ClickHouse/ClickHouse/pull/82871) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Highlighting in `clickhouse-format` and in the client's echo will work in the same way as the highlighting in the command line prompt. [#82874](https://github.com/ClickHouse/ClickHouse/pull/82874) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add support for specifying extra Keeper ACL for paths in config. If you want to add extra ACL for a specific path you define it in the config under `zookeeper.path_acls`. [#82898](https://github.com/ClickHouse/ClickHouse/pull/82898) ([Antonio Andelic](https://github.com/antonio2368)).
-* Add function to write types into wkb format. [#82935](https://github.com/ClickHouse/ClickHouse/pull/82935) ([Konstantin Vedernikov](https://github.com/scanhex12)).
-* Now mutations snapshot will be built from the visible parts snapshot. Also mutation counters used in snapshot will be recalculated from the included mutations. [#82945](https://github.com/ClickHouse/ClickHouse/pull/82945) ([Mikhail Artemenko](https://github.com/Michicosun)).
-* Adds ProfileEvent when Keeper rejects a write due to soft memory limit. [#82963](https://github.com/ClickHouse/ClickHouse/pull/82963) ([Xander Garbett](https://github.com/Garbett1)).
-* Add columns `commit_time`, `commit_id` to `system.s3queue_log`. [#83016](https://github.com/ClickHouse/ClickHouse/pull/83016) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* In some cases, we need to have multiple dimensions to our metrics. For example, counting failed merges or mutations by error codes rather than having a single counter. [#83030](https://github.com/ClickHouse/ClickHouse/pull/83030) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Consolidate unknown settings warnings in clickhouse client and log them as a summary. [#83042](https://github.com/ClickHouse/ClickHouse/pull/83042) ([Bharat Nallan](https://github.com/bharatnc)).
-* Clickhouse client now reports the local port when connection error happens. [#83050](https://github.com/ClickHouse/ClickHouse/pull/83050) ([Jianfei Hu](https://github.com/incfly)).
-* Slightly better error handling in `AsynchronousMetrics`. If the `/sys/block` directory exists but is not accessible, the server will start without monitoring the block devices. Closes [#79229](https://github.com/ClickHouse/ClickHouse/issues/79229). [#83115](https://github.com/ClickHouse/ClickHouse/pull/83115) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Support `TimestampTZ` in Glue catalog. This closes [#81654](https://github.com/ClickHouse/ClickHouse/issues/81654). [#83132](https://github.com/ClickHouse/ClickHouse/pull/83132) ([Konstantin Vedernikov](https://github.com/scanhex12)).
-* Shutdown SystemLogs after ordinary tables (and before system tables, instead of before ordinary). [#83134](https://github.com/ClickHouse/ClickHouse/pull/83134) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Add logs for s3queue shutdown process. [#83163](https://github.com/ClickHouse/ClickHouse/pull/83163) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Async logs: Limit the max number of entries that are hold in the queue. [#83214](https://github.com/ClickHouse/ClickHouse/pull/83214) ([Raúl Marín](https://github.com/Algunenano)).
-* Possibility to parse Time and Time64 as MM:SS, M:SS, SS, or S. [#83299](https://github.com/ClickHouse/ClickHouse/pull/83299) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* When `distributed_ddl_output_mode='*_only_active'`, don't wait for new or recovered replicas that have replication lag bigger than `max_replication_lag_to_enqueue`. This should help to avoid `DDL task is not finished on some hosts` when a new replica becomes active after finishing initialization or recovery, but it accumulated huge replication log while initializing. Also, implement `SYSTEM SYNC DATABASE REPLICA STRICT` query that waits for replication log to become below `max_replication_lag_to_enqueue`. [#83302](https://github.com/ClickHouse/ClickHouse/pull/83302) ([Alexander Tokmakov](https://github.com/tavplubix)).
-* Do not output too long descriptions of expression actions in exception messages. Closes [#83164](https://github.com/ClickHouse/ClickHouse/issues/83164). [#83350](https://github.com/ClickHouse/ClickHouse/pull/83350) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add ability to parse part's prefix and suffix and also check coverage for non constant columns. [#83377](https://github.com/ClickHouse/ClickHouse/pull/83377) ([Mikhail Artemenko](https://github.com/Michicosun)).
-* Function `reinterpret()` function now supports conversion to `Array(T)` where `T` is a fixed-size data type (issue [#82621](https://github.com/ClickHouse/ClickHouse/issues/82621)). [#83399](https://github.com/ClickHouse/ClickHouse/pull/83399) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Unify parameter names in ODBC and JDBC when using named collections. [#83410](https://github.com/ClickHouse/ClickHouse/pull/83410) ([Andrey Zvonov](https://github.com/zvonand)).
-* When the storage is shutting down, `getStatus` throws an `ErrorCodes::ABORTED` exception. Previously, this would fail the select query. Now we catch the `ErrorCodes::ABORTED` exceptions and intentionally ignore them instead. [#83435](https://github.com/ClickHouse/ClickHouse/pull/83435) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Add process resource metrics (such as `UserTimeMicroseconds`, `SystemTimeMicroseconds`, `RealTimeMicroseconds`) to part_log profile events for `MergeParts` entries. [#83460](https://github.com/ClickHouse/ClickHouse/pull/83460) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Enable `create_if_not_exists`, `check_not_exists`, `remove_recursive` feature flags in Keeper by default which enable new types of requests. [#83488](https://github.com/ClickHouse/ClickHouse/pull/83488) ([Antonio Andelic](https://github.com/antonio2368)).
-* Shutdown S3(Azure/etc)Queue streaming before shutting down any tables on server shutdown. [#83530](https://github.com/ClickHouse/ClickHouse/pull/83530) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Enable Date/Date32 as integers in JSON input formats. [#83597](https://github.com/ClickHouse/ClickHouse/pull/83597) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
-* Made exception messages for certain situations for loading and adding projections easier to read. [#83728](https://github.com/ClickHouse/ClickHouse/pull/83728) ([Robert Schulze](https://github.com/rschu1ze)).
-* Introduce a configuration option to skip binary checksum integrity checks for `clickhouse-server`. Resolves [#83637](https://github.com/ClickHouse/ClickHouse/issues/83637). [#83749](https://github.com/ClickHouse/ClickHouse/pull/83749) ([Rafael Roquetto](https://github.com/rafaelroquetto)).
-
-#### Bug Fix (user-visible misbehavior in an official stable release)
-* Fix the wrong default value for the `--reconnect` option in `clickhouse-benchmark`. It was changed by mistake in [#79465](https://github.com/ClickHouse/ClickHouse/issues/79465). [#82677](https://github.com/ClickHouse/ClickHouse/pull/82677) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix inconsistent formatting of `CREATE DICTIONARY`. Closes [#82105](https://github.com/ClickHouse/ClickHouse/issues/82105). [#82829](https://github.com/ClickHouse/ClickHouse/pull/82829) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix inconsistent formatting of TTL when it contains a `materialize` function. Closes [#82828](https://github.com/ClickHouse/ClickHouse/issues/82828). [#82831](https://github.com/ClickHouse/ClickHouse/pull/82831) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix inconsistent formatting of EXPLAIN AST in a subquery when it contains output options such as INTO OUTFILE. Closes [#82826](https://github.com/ClickHouse/ClickHouse/issues/82826). [#82840](https://github.com/ClickHouse/ClickHouse/pull/82840) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix inconsistent formatting of parenthesized expressions with aliases in the context when no aliases are allowed. Closes [#82836](https://github.com/ClickHouse/ClickHouse/issues/82836). Closes [#82837](https://github.com/ClickHouse/ClickHouse/issues/82837). [#82867](https://github.com/ClickHouse/ClickHouse/pull/82867) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Use the proper error code when multiplying an aggregate function state with IPv4. Closes [#82817](https://github.com/ClickHouse/ClickHouse/issues/82817). [#82818](https://github.com/ClickHouse/ClickHouse/pull/82818) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix logical error in filesystem cache: "Having zero bytes but range is not finished". [#81868](https://github.com/ClickHouse/ClickHouse/pull/81868) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Recalculate the min-max index when TTL reduces rows to ensure the correctness of algorithms relying on it, such as `minmax_count_projection`. This resolves [#77091](https://github.com/ClickHouse/ClickHouse/issues/77091). [#77166](https://github.com/ClickHouse/ClickHouse/pull/77166) ([Amos Bird](https://github.com/amosbird)).
-* For queries with combination of `ORDER BY ... LIMIT BY ... LIMIT N`, when ORDER BY is executed as a PartialSorting, the counter `rows_before_limit_at_least` now reflects the number of rows consumed by LIMIT clause instead of number of rows consumed by sorting transform. [#78999](https://github.com/ClickHouse/ClickHouse/pull/78999) ([Eduard Karacharov](https://github.com/korowa)).
-* Fix excessive granule skipping for filtering over token/ngram indexes with regexp which contains alternation and non-literal first alternative. [#79373](https://github.com/ClickHouse/ClickHouse/pull/79373) ([Eduard Karacharov](https://github.com/korowa)).
-* Fix logical error with `<=>` operator and Join storage, now query returns proper error code. [#80165](https://github.com/ClickHouse/ClickHouse/pull/80165) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fix a crash in the `loop` function when used with the `remote` function family. Ensure the LIMIT clause is respected in `loop(remote(...))`. [#80299](https://github.com/ClickHouse/ClickHouse/pull/80299) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fix incorrect behavior of `to_utc_timestamp` and `from_utc_timestamp` functions when handling dates before Unix epoch (1970-01-01) and after maximum date (2106-02-07 06:28:15). Now these functions properly clamp values to epoch start and maximum date respectively. [#80498](https://github.com/ClickHouse/ClickHouse/pull/80498) ([Surya Kant Ranjan](https://github.com/iit2009046)).
-* For some queries executed with parallel replicas, reading in order optimization(s) could be applied on initiator while can't be applied on remote nodes. It leads to different reading modes used by parallel replicas coordinator (on initiator) and on remoted nodes, which is a logical error. [#80652](https://github.com/ClickHouse/ClickHouse/pull/80652) ([Igor Nikonov](https://github.com/devcrafter)).
-* Fix logical error during materialize projection when column type was changed to Nullable. [#80741](https://github.com/ClickHouse/ClickHouse/pull/80741) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix incorrect TTL recalculation in TTL GROUP BY when updating TTL. [#81222](https://github.com/ClickHouse/ClickHouse/pull/81222) ([Evgeniy Ulasik](https://github.com/H0uston)).
-* Fixed Parquet bloom filter incorrectly applying condition like `WHERE function(key) IN (...)` as if it were `WHERE key IN (...)`. [#81255](https://github.com/ClickHouse/ClickHouse/pull/81255) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fixed possible crash in `Aggregator` in case of exception during merge. [#81450](https://github.com/ClickHouse/ClickHouse/pull/81450) ([Nikita Taranov](https://github.com/nickitat)).
-* Fixed `InterpreterInsertQuery::extendQueryLogElemImpl` to add backquotes to database and table names when needed (f.g., when names contain special characters like `-`). [#81528](https://github.com/ClickHouse/ClickHouse/pull/81528) ([Ilia Shvyrialkin](https://github.com/Harzu)).
-* Fix `IN` execution with `transform_null_in=1` with null in the left argument and non-nullable subquery result. [#81584](https://github.com/ClickHouse/ClickHouse/pull/81584) ([Pavel Kruglov](https://github.com/Avogar)).
-* Don't validate experimental/suspicious types in default/materialize expression execution during reading from existing table. [#81618](https://github.com/ClickHouse/ClickHouse/pull/81618) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix "Context has expired" during merges when dict used in TTL expression. [#81690](https://github.com/ClickHouse/ClickHouse/pull/81690) ([Azat Khuzhin](https://github.com/azat)).
-* Fix monotonicity of the cast function. [#81722](https://github.com/ClickHouse/ClickHouse/pull/81722) ([zoomxi](https://github.com/zoomxi)).
-* Fix the issue where required columns are not read during scalar correlated subquery processing. Fixes [#81716](https://github.com/ClickHouse/ClickHouse/issues/81716). [#81805](https://github.com/ClickHouse/ClickHouse/pull/81805) ([Dmitry Novik](https://github.com/novikd)).
-* In previous versions, the server returned excessive content for requests to `/js`. This closes [#61890](https://github.com/ClickHouse/ClickHouse/issues/61890). [#81895](https://github.com/ClickHouse/ClickHouse/pull/81895) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Previously, `MongoDB` table engine definitions could include a path component in the `host:port` argument which was silently ignored. The mongodb integration refuses to load such tables. With this fix *we allow loading such tables and ignore path component* if `MongoDB` engine has five arguments, using the database name from arguments. *Note:* The fix is not applied for newly created tables or queries with `mongo` table function, as well as for dictionary sources and named collections. [#81942](https://github.com/ClickHouse/ClickHouse/pull/81942) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fixed possible crash in `Aggregator` in case of exception during merge. [#82022](https://github.com/ClickHouse/ClickHouse/pull/82022) ([Nikita Taranov](https://github.com/nickitat)).
-* Fix filter analysis when only a constant alias column is used in the query. Fixes [#79448](https://github.com/ClickHouse/ClickHouse/issues/79448). [#82037](https://github.com/ClickHouse/ClickHouse/pull/82037) ([Dmitry Novik](https://github.com/novikd)).
-* Fix LOGICAL_ERROR and following crash when using the same column in the TTL for GROUP BY and SET. [#82054](https://github.com/ClickHouse/ClickHouse/pull/82054) ([Pablo Marcos](https://github.com/pamarcos)).
-* Fix S3 table function argument validation in secret masking, preventing possible `LOGICAL_ERROR`, close [#80620](https://github.com/ClickHouse/ClickHouse/issues/80620). [#82056](https://github.com/ClickHouse/ClickHouse/pull/82056) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fix data races in Iceberg. [#82088](https://github.com/ClickHouse/ClickHouse/pull/82088) ([Azat Khuzhin](https://github.com/azat)).
-* Fix `DatabaseReplicated::getClusterImpl`. If the first element (or elements) of `hosts` has `id == DROPPED_MARK` and there are no other elements for the same shard, the first element of `shards` will be an empty vector, leading to `std::out_of_range`. [#82093](https://github.com/ClickHouse/ClickHouse/pull/82093) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Fixing copy-paste error in arraySimilarity, disallowing the use of UInt32 and Int32 weights. Update tests and docs. [#82103](https://github.com/ClickHouse/ClickHouse/pull/82103) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
-* Fix the `Not found column` error for queries with `arrayJoin` under `WHERE` condition and `IndexSet`. [#82113](https://github.com/ClickHouse/ClickHouse/pull/82113) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix bug in glue catalog integration. Now clickhouse can read tables with nested data types where some of subcolumns contain decimals, for example: `map`. Fixes [#81301](https://github.com/ClickHouse/ClickHouse/issues/81301). [#82114](https://github.com/ClickHouse/ClickHouse/pull/82114) ([alesapin](https://github.com/alesapin)).
-* Fix performance degradation in SummingMergeTree that was intorduced in 25.5 in https://github.com/ClickHouse/ClickHouse/pull/79051. [#82130](https://github.com/ClickHouse/ClickHouse/pull/82130) ([Pavel Kruglov](https://github.com/Avogar)).
-* When passing settings over uri the last value is considered. [#82137](https://github.com/ClickHouse/ClickHouse/pull/82137) ([Sema Checherinda](https://github.com/CheSema)).
-* Fix "Context has expired" for Iceberg. [#82146](https://github.com/ClickHouse/ClickHouse/pull/82146) ([Azat Khuzhin](https://github.com/azat)).
-* Fix possible deadlock for remote queries when server is under memory pressure. [#82160](https://github.com/ClickHouse/ClickHouse/pull/82160) ([Kirill](https://github.com/kirillgarbar)).
-* Fixes overflow in `numericIndexedVectorPointwiseAdd`, `numericIndexedVectorPointwiseSubtract`, `numericIndexedVectorPointwiseMultiply`, `numericIndexedVectorPointwiseDivide` functions that happened when we applied them to large numbers. [#82165](https://github.com/ClickHouse/ClickHouse/pull/82165) ([Raufs Dunamalijevs](https://github.com/rienath)).
-* Fix a bug in table dependencies causing Materialized Views to miss INSERT queries. [#82222](https://github.com/ClickHouse/ClickHouse/pull/82222) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Fix possible data-race between suggestion thread and main client thread. [#82233](https://github.com/ClickHouse/ClickHouse/pull/82233) ([Azat Khuzhin](https://github.com/azat)).
-* Now ClickHouse can read iceberg tables from Glue catalog after schema evolution. Fixes [#81272](https://github.com/ClickHouse/ClickHouse/issues/81272). [#82301](https://github.com/ClickHouse/ClickHouse/pull/82301) ([alesapin](https://github.com/alesapin)).
-* Fix the validation of async metrics settings `asynchronous_metrics_update_period_s` and `asynchronous_heavy_metrics_update_period_s`. [#82310](https://github.com/ClickHouse/ClickHouse/pull/82310) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fix logical error when resolving matcher in query with multiple JOINs, close [#81969](https://github.com/ClickHouse/ClickHouse/issues/81969). [#82421](https://github.com/ClickHouse/ClickHouse/pull/82421) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Add expiration to AWS ECS token so it can be reloaded. [#82422](https://github.com/ClickHouse/ClickHouse/pull/82422) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fixes a bug for `NULL` arguments in `CASE` function. [#82436](https://github.com/ClickHouse/ClickHouse/pull/82436) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fix data-races in client (by not using global context) and `session_timezone` overrides (previously in case of `session_timezone` was set in i.e. `users.xml`/client options to non empty and in query context to empty, then, value from `users.xml` was used, while this is wrong, now query context will always have a priority over global context). [#82444](https://github.com/ClickHouse/ClickHouse/pull/82444) ([Azat Khuzhin](https://github.com/azat)).
-* Fix disabling boundary alignment for cached buffer in external table engines. It was broken in https://github.com/ClickHouse/ClickHouse/pull/81868. [#82493](https://github.com/ClickHouse/ClickHouse/pull/82493) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix the crash if key-value storage is joined with a type-casted key. [#82497](https://github.com/ClickHouse/ClickHouse/pull/82497) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix hiding named collection values in logs/query_log. Closes [#82405](https://github.com/ClickHouse/ClickHouse/issues/82405). [#82510](https://github.com/ClickHouse/ClickHouse/pull/82510) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix a possible crash in logging while terminating a session as the user_id might sometimes be empty. [#82513](https://github.com/ClickHouse/ClickHouse/pull/82513) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fixes cases where parsing of Time could cause msan issues. This fixes: [#82477](https://github.com/ClickHouse/ClickHouse/issues/82477). [#82514](https://github.com/ClickHouse/ClickHouse/pull/82514) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Disallow setting `threadpool_writer_pool_size` to zero to ensure that server operations don't get stuck. [#82532](https://github.com/ClickHouse/ClickHouse/pull/82532) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fix `LOGICAL_ERROR` during row policy expression analysis for correlated columns. [#82618](https://github.com/ClickHouse/ClickHouse/pull/82618) ([Dmitry Novik](https://github.com/novikd)).
-* Fix incorrect usage of parent metadata in `mergeTreeProjection` table function when `enable_shared_storage_snapshot_in_query = 1`. This is for [#82634](https://github.com/ClickHouse/ClickHouse/issues/82634). [#82638](https://github.com/ClickHouse/ClickHouse/pull/82638) ([Amos Bird](https://github.com/amosbird)).
-* Functions `trim{Left,Right,Both}` now support input strings of type "FixedString(N)". For example, `SELECT trimBoth(toFixedString('abc', 3), 'ac')` now works. [#82691](https://github.com/ClickHouse/ClickHouse/pull/82691) ([Robert Schulze](https://github.com/rschu1ze)).
-* In AzureBlobStorage, for native copy we compare authentication methods, during which if we get an exception, updated the code to fallback to read and copy (i.e. non native copy). [#82693](https://github.com/ClickHouse/ClickHouse/pull/82693) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
-* Fix deserialization of `groupArraySample`/`groupArrayLast` in case of empty elements (deserialization could skip part of the binary if the input was empty, this can lead to corruption during data read and UNKNOWN_PACKET_FROM_SERVER in TCP protocol). This does not affect numbers and date time types. [#82763](https://github.com/ClickHouse/ClickHouse/pull/82763) ([Pedro Ferreira](https://github.com/PedroTadim)).
-* Fix backup of an empty `Memory` table, causing the backup restore to fail with with `BACKUP_ENTRY_NOT_FOUND` error. [#82791](https://github.com/ClickHouse/ClickHouse/pull/82791) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fix exception safety in union/intersect/except_default_mode rewrite. Closes [#82664](https://github.com/ClickHouse/ClickHouse/issues/82664). [#82820](https://github.com/ClickHouse/ClickHouse/pull/82820) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Keep track of the number of async tables loading jobs. If there are some running jobs, do not update `tail_ptr` in `TransactionLog::removeOldEntries`. [#82824](https://github.com/ClickHouse/ClickHouse/pull/82824) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Fix data races in Iceberg. [#82841](https://github.com/ClickHouse/ClickHouse/pull/82841) ([Azat Khuzhin](https://github.com/azat)).
-* Setting `use_skip_indexes_if_final_exact_mode` optimization (introduced in 25.6) could fail to select a relevant candidate range depending upon `MergeTree` engine settings / data distribution. That has been resolved now. [#82879](https://github.com/ClickHouse/ClickHouse/pull/82879) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Set salt for auth data when parsing from AST with type SCRAM_SHA256_PASSWORD. [#82888](https://github.com/ClickHouse/ClickHouse/pull/82888) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* When using a non-caching Database implementation, the metadata of the corresponding table is deleted after the columns are returned and the reference is invalidated. [#82939](https://github.com/ClickHouse/ClickHouse/pull/82939) ([buyval01](https://github.com/buyval01)).
-* Fix filter modification for queries with a JOIN expression with a table with storage `Merge`. Fixes [#82092](https://github.com/ClickHouse/ClickHouse/issues/82092). [#82950](https://github.com/ClickHouse/ClickHouse/pull/82950) ([Dmitry Novik](https://github.com/novikd)).
-* Fix LOGICAL_ERROR in QueryMetricLog: Mutex cannot be NULL. [#82979](https://github.com/ClickHouse/ClickHouse/pull/82979) ([Pablo Marcos](https://github.com/pamarcos)).
-* Fixed incorrect output of function `formatDateTime` when formatter `%f` is used together with variable-size formatters (e.g. `%M`). [#83020](https://github.com/ClickHouse/ClickHouse/pull/83020) ([Robert Schulze](https://github.com/rschu1ze)).
-* Fix performance degradation with the enabled analyzer when secondary queries always read all columns from the VIEWs. Fixes [#81718](https://github.com/ClickHouse/ClickHouse/issues/81718). [#83036](https://github.com/ClickHouse/ClickHouse/pull/83036) ([Dmitry Novik](https://github.com/novikd)).
-* Fix misleading error message when restoring a backup on a read-only disk. [#83051](https://github.com/ClickHouse/ClickHouse/pull/83051) ([Julia Kartseva](https://github.com/jkartseva)).
-* Do not check for cyclic dependencies on create table with no dependencies. It fixes performance degradation of the use cases with creation of thousands of tables that was introduced in https://github.com/ClickHouse/ClickHouse/pull/65405. [#83077](https://github.com/ClickHouse/ClickHouse/pull/83077) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fixes issue with implicit reading of negative Time values into the table and make the docs not confusing. [#83091](https://github.com/ClickHouse/ClickHouse/pull/83091) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Do not use unrelated parts of a shared dictionary in the `lowCardinalityKeys` function. [#83118](https://github.com/ClickHouse/ClickHouse/pull/83118) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix the regression in use of subcolumns with Materialized Views. This fixes: [#82784](https://github.com/ClickHouse/ClickHouse/issues/82784). [#83221](https://github.com/ClickHouse/ClickHouse/pull/83221) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Fix crash in client due to connection left in disconnected state after bad INSERT. [#83253](https://github.com/ClickHouse/ClickHouse/pull/83253) ([Azat Khuzhin](https://github.com/azat)).
-* Fix crash when calculating the size of a block with empty columns. [#83271](https://github.com/ClickHouse/ClickHouse/pull/83271) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix possible crash in Variant type in UNION. [#83295](https://github.com/ClickHouse/ClickHouse/pull/83295) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix LOGICAL_ERROR in clickhouse-local for unsupported SYSTEM queries. [#83333](https://github.com/ClickHouse/ClickHouse/pull/83333) ([Surya Kant Ranjan](https://github.com/iit2009046)).
-* Fix `no_sign_request` for S3 client. It can be used to explicitly avoid signing S3 requests. It can also be defined for specific endpoints using endpoint-based settings. [#83379](https://github.com/ClickHouse/ClickHouse/pull/83379) ([Antonio Andelic](https://github.com/antonio2368)).
-* Fixes a crash that may happen for a query with a setting 'max_threads=1' when executed under load with CPU scheduling enabled. [#83387](https://github.com/ClickHouse/ClickHouse/pull/83387) ([Fan Ziqi](https://github.com/f2quantum)).
-* Fix `TOO_DEEP_SUBQUERIES` exception when CTE definition references another table expression with the same name. [#83413](https://github.com/ClickHouse/ClickHouse/pull/83413) ([Dmitry Novik](https://github.com/novikd)).
-* Fix incorrect behavior when executing `REVOKE S3 ON system.*` revokes S3 permissions for `*.*`. This fixes [#83417](https://github.com/ClickHouse/ClickHouse/issues/83417). [#83420](https://github.com/ClickHouse/ClickHouse/pull/83420) ([pufit](https://github.com/pufit)).
-* Do not share async_read_counters between queries. [#83423](https://github.com/ClickHouse/ClickHouse/pull/83423) ([Azat Khuzhin](https://github.com/azat)).
-* Disable parallel replicas when a subquery contains the FINAL. [#83455](https://github.com/ClickHouse/ClickHouse/pull/83455) ([zoomxi](https://github.com/zoomxi)).
-* Resolve minor integer overflow in configuration of setting `role_cache_expiration_time_seconds` (issue [#83374](https://github.com/ClickHouse/ClickHouse/issues/83374)). [#83461](https://github.com/ClickHouse/ClickHouse/pull/83461) ([wushap](https://github.com/wushap)).
-* Fix a bug introduced in https://github.com/ClickHouse/ClickHouse/pull/79963. When inserting into an MV with a definer, the permission check should use the definer's grants. This fixes [#79951](https://github.com/ClickHouse/ClickHouse/issues/79951). [#83502](https://github.com/ClickHouse/ClickHouse/pull/83502) ([pufit](https://github.com/pufit)).
-* Disable bounds-based file pruning for iceberg array element and iceberg map values, including all their nested subfields. [#83520](https://github.com/ClickHouse/ClickHouse/pull/83520) ([Daniil Ivanik](https://github.com/divanik)).
-* Fix possible file cache not initialized errors when it's used as a temporary data storage. [#83539](https://github.com/ClickHouse/ClickHouse/pull/83539) ([Bharat Nallan](https://github.com/bharatnc)).
-* Keeper fix: update total watch count correctly when ephemeral nodes are deleted on session close. [#83583](https://github.com/ClickHouse/ClickHouse/pull/83583) ([Antonio Andelic](https://github.com/antonio2368)).
-* Fix incorrect memory around max_untracked_memory. [#83607](https://github.com/ClickHouse/ClickHouse/pull/83607) ([Azat Khuzhin](https://github.com/azat)).
-* INSERT SELECT with UNION ALL could lead to a null pointer dereference in a corner case. This closes [#83618](https://github.com/ClickHouse/ClickHouse/issues/83618). [#83643](https://github.com/ClickHouse/ClickHouse/pull/83643) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Disallow zero value for max_insert_block_size as it could cause logical error. [#83688](https://github.com/ClickHouse/ClickHouse/pull/83688) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fix endless loop in estimateCompressionRatio() with block_size_bytes=0. [#83704](https://github.com/ClickHouse/ClickHouse/pull/83704) ([Azat Khuzhin](https://github.com/azat)).
-* Fix `IndexUncompressedCacheBytes`/`IndexUncompressedCacheCells`/`IndexMarkCacheBytes`/`IndexMarkCacheFiles` metrics (previously they were included into metric w/o `Cache` prefix). [#83730](https://github.com/ClickHouse/ClickHouse/pull/83730) ([Azat Khuzhin](https://github.com/azat)).
-* Fix possible abort (due to joining threads from the task) and hopefully hungs (in unit tests) during `BackgroundSchedulePool` shutdown. [#83769](https://github.com/ClickHouse/ClickHouse/pull/83769) ([Azat Khuzhin](https://github.com/azat)).
-* Introduce backward compatibility setting to allow new analyzer to reference outer alias in WITH clause in the case of name clashes. Fixes [#82700](https://github.com/ClickHouse/ClickHouse/issues/82700). [#83797](https://github.com/ClickHouse/ClickHouse/pull/83797) ([Dmitry Novik](https://github.com/novikd)).
-* Fix deadlock on shutdown due to recursive context locking during library bridge cleanup. [#83824](https://github.com/ClickHouse/ClickHouse/pull/83824) ([Azat Khuzhin](https://github.com/azat)).
-
-#### Build/Testing/Packaging Improvement
-* Build a minimal C library (10 KB) for the ClickHouse lexer. This is needed for [#80977](https://github.com/ClickHouse/ClickHouse/issues/80977). [#81347](https://github.com/ClickHouse/ClickHouse/pull/81347) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add a check for Nix submodule inputs. [#81691](https://github.com/ClickHouse/ClickHouse/pull/81691) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Compile SymbolIndex on Mac and FreeBSD. (But it will work only on ELF systems, Linux and FreeBSD). [#82347](https://github.com/ClickHouse/ClickHouse/pull/82347) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add test for standalone lexer, add test tag `fasttest-only`. [#82472](https://github.com/ClickHouse/ClickHouse/pull/82472) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Bumped Azure SDK to v1.15.0. [#82747](https://github.com/ClickHouse/ClickHouse/pull/82747) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
-* Add storage module from google-cloud-cpp to build system. [#82881](https://github.com/ClickHouse/ClickHouse/pull/82881) ([Pablo Marcos](https://github.com/pamarcos)).
-* Change `Dockerfile.ubuntu` for clickhouse-server to fit requirements in Docker Official Library. [#83039](https://github.com/ClickHouse/ClickHouse/pull/83039) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
-* A follow-up for [#83158](https://github.com/ClickHouse/ClickHouse/issues/83158) to fix uploading builds to `curl clickhouse.com`. [#83463](https://github.com/ClickHouse/ClickHouse/pull/83463) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
-* Adding `busybox` binary and install tools in `clickhouse/clickhouse-server` and official `clickhouse` images. [#83735](https://github.com/ClickHouse/ClickHouse/pull/83735) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
-* Added support for the **`CLICKHOUSE_HOST` environment variable** to specify the ClickHouse server host, aligning with existing `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables. This allows for easier configuration without modifying client or configuration files directly. [#83659](https://github.com/ClickHouse/ClickHouse/pull/83659) ([Doron David](https://github.com/dorki)).
-
-
-### ClickHouse release 25.6, 2025-06-26 {#256}
-
-#### Backward Incompatible Change
-* Previously, function `countMatches` would stop counting at the first empty match even if the pattern accepts it. To overcome this issue, `countMatches` now continues execution by advancing by a single character when an empty match occurs. Users who like to retain the old behavior can enable setting `count_matches_stop_at_empty_match`. [#81676](https://github.com/ClickHouse/ClickHouse/pull/81676) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Minor: Force `backup_threads` and `restore_threads` server settings to be non zero. [#80224](https://github.com/ClickHouse/ClickHouse/pull/80224) ([Raúl Marín](https://github.com/Algunenano)).
-* Minor: Fix `bitNot` for `String` will return a zero-terminated string in the internal memory representation. This should not affect any user visible behavior, however the author wanted to highlight this change. [#80791](https://github.com/ClickHouse/ClickHouse/pull/80791) ([Azat Khuzhin](https://github.com/azat)).
-
-#### New Feature
-* New data types: `Time` (\[H\]HH:MM:SS) and `Time64` (\[H\]HH:MM:SS\[.fractional\]), and some basic cast functions and functions to interact with other data types. Added settings for compatibility with the existing function `toTime`. The setting `use_legacy_to_time` is set to keep the old behavior for now. [#81217](https://github.com/ClickHouse/ClickHouse/pull/81217) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). Support comparison between Time/Time64. [#80327](https://github.com/ClickHouse/ClickHouse/pull/80327) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* A new CLI tool, [`chdig`](https://github.com/azat/chdig/) - TUI interface for ClickHouse (top like) as part of ClickHouse. [#79666](https://github.com/ClickHouse/ClickHouse/pull/79666) ([Azat Khuzhin](https://github.com/azat)).
-* Support `disk` setting for `Atomic` and `Ordinary` database engines, specifying the disk to store table metadata files. [#80546](https://github.com/ClickHouse/ClickHouse/pull/80546) ([Tuan Pham Anh](https://github.com/tuanpach)). This allows attaching databases from external sources.
-* A new type of MergeTree, `CoalescingMergeTree` - the engine takes the first non-Null value during background merges. This closes [#78869](https://github.com/ClickHouse/ClickHouse/issues/78869). [#79344](https://github.com/ClickHouse/ClickHouse/pull/79344) ([scanhex12](https://github.com/scanhex12)).
-* Support functions to read WKB ("Well-Known Binary" is a format for binary encoding of various geometry types, used in GIS applications). See [#43941](https://github.com/ClickHouse/ClickHouse/issues/43941). [#80139](https://github.com/ClickHouse/ClickHouse/pull/80139) ([scanhex12](https://github.com/scanhex12)).
-* Added query slot scheduling for workloads, see [workload scheduling](https://clickhouse.com/docs/operations/workload-scheduling#query_scheduling) for details. [#78415](https://github.com/ClickHouse/ClickHouse/pull/78415) ([Sergei Trifonov](https://github.com/serxa)).
-* `timeSeries*` helper functions to speedup some scenarios when working with time series data: - re-sample the data to the time grid with specified start timestamp, end timestamp and step - calculate PromQL-like `delta`, `rate`, `idelta` and `irate`. [#80590](https://github.com/ClickHouse/ClickHouse/pull/80590) ([Alexander Gololobov](https://github.com/davenger)).
-* Add `mapContainsValuesLike`/`mapContainsValues`/`mapExtractValuesLike` functions to filter on map values and their support in bloom filter based indexes. [#78171](https://github.com/ClickHouse/ClickHouse/pull/78171) ([UnamedRus](https://github.com/UnamedRus)).
-* Now settings constraints can specify a set of disallowed values. [#78499](https://github.com/ClickHouse/ClickHouse/pull/78499) ([Bharat Nallan](https://github.com/bharatnc)).
-* Added a setting `enable_shared_storage_snapshot_in_query` to enable sharing the same storage snapshot across all subqueries in a single query. This ensures consistent reads from the same table, even when the table is referenced multiple times within a query. [#79471](https://github.com/ClickHouse/ClickHouse/pull/79471) ([Amos Bird](https://github.com/amosbird)).
-* Support writing `JSON` columns to `Parquet` and reading `JSON` columns from `Parquet` directly. [#79649](https://github.com/ClickHouse/ClickHouse/pull/79649) ([Nihal Z. Miaji](https://github.com/nihalzp)).
-* Add `MultiPolygon` support for `pointInPolygon`. [#79773](https://github.com/ClickHouse/ClickHouse/pull/79773) ([Nihal Z. Miaji](https://github.com/nihalzp)).
-* Add support for querying local filesystem-mounted Delta tables via `deltaLakeLocal` table function. [#79781](https://github.com/ClickHouse/ClickHouse/pull/79781) ([roykim98](https://github.com/roykim98)).
-* Add new setting `cast_string_to_date_time_mode` that allows to choose DateTime parsing mode during cast from String. [#80210](https://github.com/ClickHouse/ClickHouse/pull/80210) ([Pavel Kruglov](https://github.com/Avogar)). For example, you can set it to the best effort mode.
-* Added `bech32Encode` and `bech32Decode` functions for working with Bitcoin's Bech algorithm (issue [#40381](https://github.com/ClickHouse/ClickHouse/issues/40381)). [#80239](https://github.com/ClickHouse/ClickHouse/pull/80239) ([George Larionov](https://github.com/glarik)).
-* Add SQL functions to analyse the names of MergeTree parts. [#80573](https://github.com/ClickHouse/ClickHouse/pull/80573) ([Mikhail Artemenko](https://github.com/Michicosun)).
-* Allow filtering parts selected for query by the disk they reside on by introducing a new virtual column, `_disk_name`. [#80650](https://github.com/ClickHouse/ClickHouse/pull/80650) ([tanner-bruce](https://github.com/tanner-bruce)).
-* Add a landing page with the list of embedded web tools. It will open when requested by a browser-like user agent. [#81129](https://github.com/ClickHouse/ClickHouse/pull/81129) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Function `arrayFirst`, `arrayFirstIndex`, `arrayLast` and `arrayLastIndex` will filter away NULL values returned by the filter expression. In previous versions, Nullable filter results were not supported. Fixes [#81113](https://github.com/ClickHouse/ClickHouse/issues/81113). [#81197](https://github.com/ClickHouse/ClickHouse/pull/81197) ([Lennard Eijsackers](https://github.com/Blokje5)).
-* It's now possible to write `USE DATABASE name` instead of `USE name`. [#81307](https://github.com/ClickHouse/ClickHouse/pull/81307) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Added a new system table `system.codecs` to introspect the available codecs. (issue [#81525](https://github.com/ClickHouse/ClickHouse/issues/81525)). [#81600](https://github.com/ClickHouse/ClickHouse/pull/81600) ([Jimmy Aguilar Mena](https://github.com/Ergus)).
-* Support `lag` and `lead` window functions. Closes [#9887](https://github.com/ClickHouse/ClickHouse/issues/9887). [#82108](https://github.com/ClickHouse/ClickHouse/pull/82108) ([Dmitry Novik](https://github.com/novikd)).
-* Function `tokens` now supports a new tokenizer, named `split`, which is good for logs. [#80195](https://github.com/ClickHouse/ClickHouse/pull/80195) ([Robert Schulze](https://github.com/rschu1ze)).
-* Add support for the `--database` argument in `clickhouse-local`. You can switch to a previously created database. This closes [#44115](https://github.com/ClickHouse/ClickHouse/issues/44115). [#81465](https://github.com/ClickHouse/ClickHouse/pull/81465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-
-#### Experimental Feature
-* Implement Kafka rebalance like logic for `Kafka2` using ClickHouse Keeper For each replica we support two types of partition locks: permanent locks and temporary locks. The replica tries to hold permanent locks as long as possible, at any given time there are no more than `all_topic_partitions / active_replicas_count` (here `all_topic_partitions` is the number of all partitions, `active_replicas_count` is the number of active replicas) permanent locks on the replica, if there are more, then the replica releases some partitions. Some partitions are temporarily held by the replica. The maximum number of temporary locks on a replica changes dynamically to give other replicas a chance to take some partitions into permanent locks. When updating temporary locks, the replica releases them all and tries to take some others again. [#78726](https://github.com/ClickHouse/ClickHouse/pull/78726) ([Daria Fomina](https://github.com/sinfillo)).
-* An improvement for the experimental text index: explicit parameters are supported via key-value pairs. Currently, supported parameters are a mandatory `tokenizer` and two optional `max_rows_per_postings_list` and `ngram_size`. [#80262](https://github.com/ClickHouse/ClickHouse/pull/80262) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Previously, `packed` storage was not supported for the full-text index, because the segment id was updated on-fly by reading and writing (`.gin_sid`) file on disk. In case of packed storage, reading a value from the uncommited file is not supported and this led to an issue. Now it is alright. [#80852](https://github.com/ClickHouse/ClickHouse/pull/80852) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Experimental indexes of type `gin` (which I don't like because it is an inside joke of PostgreSQL hackers) were renamed to `text`. Existing indexes of type `gin` remain loadable but they will throw an exception (suggesting `text` indexes instead) when one tries to use them in searches. [#80855](https://github.com/ClickHouse/ClickHouse/pull/80855) ([Robert Schulze](https://github.com/rschu1ze)).
-
-#### Performance Improvement
-* Enable multiple-projection filtering support, allowing to use more than one projection for part-level filtering. This addresses [#55525](https://github.com/ClickHouse/ClickHouse/issues/55525). This is the second step to implement projection index, following [#78429](https://github.com/ClickHouse/ClickHouse/issues/78429). [#80343](https://github.com/ClickHouse/ClickHouse/pull/80343) ([Amos Bird](https://github.com/amosbird)).
-* Use `SLRU` cache policy in filesystem cache by default. [#75072](https://github.com/ClickHouse/ClickHouse/pull/75072) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Remove contention in the Resize step in query pipeline. [#77562](https://github.com/ClickHouse/ClickHouse/pull/77562) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
-* Introduced an option to offload (de)compression and (de)serialization of blocks into pipeline threads instead of a single thread associated with a network connection. Controlled by the setting `enable_parallel_blocks_marshalling`. It should speed up distributed queries that transfer significant amounts of data between the initiator and remote nodes. [#78694](https://github.com/ClickHouse/ClickHouse/pull/78694) ([Nikita Taranov](https://github.com/nickitat)).
-* Performance improvements to all bloom filter types. [Video from the OpenHouse conference](https://www.youtube.com/watch?v=yIVz0NKwQvA&pp=ygUQb3BlbmhvdXNlIG9wZW5haQ%3D%3D) [#79800](https://github.com/ClickHouse/ClickHouse/pull/79800) ([Delyan Kratunov](https://github.com/dkratunov)).
-* Introduced a happy path in `UniqExactSet::merge` when one of the sets is empty. Also, now if the LHS set is two-level and the RHS is single-level, we won't do the conversion to two-level for the RHS. [#79971](https://github.com/ClickHouse/ClickHouse/pull/79971) ([Nikita Taranov](https://github.com/nickitat)).
-* Improve memory reuse efficiency and reduce page faults when using the two-level hash tables. This meant to speed-up GROUP BY. [#80245](https://github.com/ClickHouse/ClickHouse/pull/80245) ([Jiebin Sun](https://github.com/jiebinn)).
-* Avoid unnecessary update and reduce lock contention in query condition cache. [#80247](https://github.com/ClickHouse/ClickHouse/pull/80247) ([Jiebin Sun](https://github.com/jiebinn)).
-* Trivial optimization for `concatenateBlocks`. Chances are it's good for parallel hash join. [#80328](https://github.com/ClickHouse/ClickHouse/pull/80328) ([李扬](https://github.com/taiyang-li)).
-* When selecting mark ranges from the primary key range, binary search cannot be used if the primary key is wrapped with functions. This PR improves this limitation: binary search can still be applied when the primary key is wrapped with an always monotonic function chain, or when the RPN contains an element that is always true. Closes [#45536](https://github.com/ClickHouse/ClickHouse/issues/45536). [#80597](https://github.com/ClickHouse/ClickHouse/pull/80597) ([zoomxi](https://github.com/zoomxi)).
-* Improve shutdown speed of `Kafka` engine (remove extra 3 seconds delay in case of multiple `Kafka` tables). [#80796](https://github.com/ClickHouse/ClickHouse/pull/80796) ([Azat Khuzhin](https://github.com/azat)).
-* Async inserts: reduce memory usage and improve performance of insert queries. [#80972](https://github.com/ClickHouse/ClickHouse/pull/80972) ([Raúl Marín](https://github.com/Algunenano)).
-* Don't profile processors if the log table is disabled. [#81256](https://github.com/ClickHouse/ClickHouse/pull/81256) ([Raúl Marín](https://github.com/Algunenano)). This speeds up very short queries.
-* Speed up `toFixedString` when the source is exactly what's requested. [#81257](https://github.com/ClickHouse/ClickHouse/pull/81257) ([Raúl Marín](https://github.com/Algunenano)).
-* Don't process quota values if the user is not limited. [#81549](https://github.com/ClickHouse/ClickHouse/pull/81549) ([Raúl Marín](https://github.com/Algunenano)). This speeds up very short queries.
-* Fixed performance regression in memory tracking. [#81694](https://github.com/ClickHouse/ClickHouse/pull/81694) ([Michael Kolupaev](https://github.com/al13n321)).
-* Improve sharding key optimization on distributed query. [#78452](https://github.com/ClickHouse/ClickHouse/pull/78452) ([fhw12345](https://github.com/fhw12345)).
-* Parallel replicas: avoid waiting for slow unused replicas if all read tasks have been assigned to other replicas. [#80199](https://github.com/ClickHouse/ClickHouse/pull/80199) ([Igor Nikonov](https://github.com/devcrafter)).
-* Parallel replicas uses separate connection timeout, see `parallel_replicas_connect_timeout_ms` setting. Before `connect_timeout_with_failover_ms`/`connect_timeout_with_failover_secure_ms` settings were used as connection timeout values for parallel replicas queries (1 second by default). [#80421](https://github.com/ClickHouse/ClickHouse/pull/80421) ([Igor Nikonov](https://github.com/devcrafter)).
-* In filesystem with journal `mkdir` is written to the journal of filesystem which is persisted to disk. In case of slow disk this can take long time. Move it out from reserve lock scope. [#81371](https://github.com/ClickHouse/ClickHouse/pull/81371) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Postpone reading of Iceberg manifest files until first reading query. [#81619](https://github.com/ClickHouse/ClickHouse/pull/81619) ([Daniil Ivanik](https://github.com/divanik)).
-* Allow moving `GLOBAL [NOT] IN` predicate to `PREWHERE` clause if applicable. [#79996](https://github.com/ClickHouse/ClickHouse/pull/79996) ([Eduard Karacharov](https://github.com/korowa)).
-
-#### Improvement
-* `EXPLAIN SYNTAX` now uses a new analyzer. It returns AST built from the query tree. Added option `query_tree_passes` to control the number of passes to executed before converting query tree to the AST. [#74536](https://github.com/ClickHouse/ClickHouse/pull/74536) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Implement flattened serialization for Dynamic and JSON in Native format that allows to serialize/deserialize Dynamic and JSON data without special structures like shared variant for Dynamic and shared data for JSON. This serialization can be enabled by setting `output_format_native_use_flattened_dynamic_and_json_serialization`. This serialization can be used for easier support for Dynamic and JSON in TCP protocol in clients in different languages. [#80499](https://github.com/ClickHouse/ClickHouse/pull/80499) ([Pavel Kruglov](https://github.com/Avogar)).
-* Refresh `S3` credentials after error `AuthenticationRequired`. [#77353](https://github.com/ClickHouse/ClickHouse/pull/77353) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Added dictionary metrics to `system.asynchronous_metrics` - `DictionaryMaxUpdateDelay` - the maximum delay (in seconds) of dictionary update. - `DictionaryTotalFailedUpdates` - the number of errors since last successful loading in all dictionaries. [#78175](https://github.com/ClickHouse/ClickHouse/pull/78175) ([Vlad](https://github.com/codeworse)).
-* Add a warning about databases that were potentially created to save broken tables. [#78841](https://github.com/ClickHouse/ClickHouse/pull/78841) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Add `_time` virtual column in `S3Queue`, `AzureQueue` engine. [#78926](https://github.com/ClickHouse/ClickHouse/pull/78926) ([Anton Ivashkin](https://github.com/ianton-ru)).
-* Make settings controlling connection drop on overloaded CPU hot-reloadable. [#79052](https://github.com/ClickHouse/ClickHouse/pull/79052) ([Alexey Katsman](https://github.com/alexkats)).
-* Add container prefix to data paths reported in system.tables for plain disks in Azure blob storage, making reporting consistent with S3 and GCP. [#79241](https://github.com/ClickHouse/ClickHouse/pull/79241) ([Julia Kartseva](https://github.com/jkartseva)).
-* Now, clickhouse-client and local also accept query parameters as `param-` (dash) along with `param_` (underscore). This closes [#63093](https://github.com/ClickHouse/ClickHouse/issues/63093). [#79429](https://github.com/ClickHouse/ClickHouse/pull/79429) ([Engel Danila](https://github.com/aaaengel)).
-* Detailed warning msg for bandwidth discount when copying data from local to remote S3 with checksum enabled. [#79464](https://github.com/ClickHouse/ClickHouse/pull/79464) ([VicoWu](https://github.com/VicoWu)).
-* Previously, when `input_format_parquet_max_block_size = 0` (an invalid value) ClickHouse would stuck. Now this behaviour is fixed. This closes [#79394](https://github.com/ClickHouse/ClickHouse/issues/79394). [#79601](https://github.com/ClickHouse/ClickHouse/pull/79601) ([abashkeev](https://github.com/abashkeev)).
-* Add `throw_on_error` setting for `startup_scripts`: when `throw_on_error` is true, the server will not start unless all queries complete successfully. By default, `throw_on_error` is false, preserving the previous behavior. [#79732](https://github.com/ClickHouse/ClickHouse/pull/79732) ([Aleksandr Musorin](https://github.com/AVMusorin)).
-* Allow to add `http_response_headers` in `http_handlers` of any kind. [#79975](https://github.com/ClickHouse/ClickHouse/pull/79975) ([Andrey Zvonov](https://github.com/zvonand)).
-* Function `reverse` now supports `Tuple` data type. Closes [#80053](https://github.com/ClickHouse/ClickHouse/issues/80053). [#80083](https://github.com/ClickHouse/ClickHouse/pull/80083) ([flynn](https://github.com/ucasfl)).
-* Resolve [#75817](https://github.com/ClickHouse/ClickHouse/issues/75817): allow getting `auxiliary_zookeepers` data from `system.zookeeper` table. [#80146](https://github.com/ClickHouse/ClickHouse/pull/80146) ([Nikolay Govorov](https://github.com/mrdimidium)).
-* Add asynchronous metrics about the server's TCP sockets. This improves the observability. Closes [#80187](https://github.com/ClickHouse/ClickHouse/issues/80187). [#80188](https://github.com/ClickHouse/ClickHouse/pull/80188) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Support `anyLast_respect_nulls` and `any_respect_nulls` as a `SimpleAggregateFunction`. [#80219](https://github.com/ClickHouse/ClickHouse/pull/80219) ([Diskein](https://github.com/Diskein)).
-* Remove unnecessary call `adjustCreateQueryForBackup` for replicated databases. [#80282](https://github.com/ClickHouse/ClickHouse/pull/80282) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Allow extra options (that go after `--` like `-- --config.value='abc'`) in `clickhouse-local` without the equality sign. Closes [#80292](https://github.com/ClickHouse/ClickHouse/issues/80292). [#80293](https://github.com/ClickHouse/ClickHouse/pull/80293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Highlight metacharacters in `SHOW ... LIKE` queries. This closes [#80275](https://github.com/ClickHouse/ClickHouse/issues/80275). [#80297](https://github.com/ClickHouse/ClickHouse/pull/80297) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Make SQL UDF persistent in `clickhouse-local`. The previously created function will be loaded at startup. This closes [#80085](https://github.com/ClickHouse/ClickHouse/issues/80085). [#80300](https://github.com/ClickHouse/ClickHouse/pull/80300) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix description in explain plan for preliminary DISTINCT step. [#80330](https://github.com/ClickHouse/ClickHouse/pull/80330) ([UnamedRus](https://github.com/UnamedRus)).
-* Allow to use named collections in ODBC/JDBC. [#80334](https://github.com/ClickHouse/ClickHouse/pull/80334) ([Andrey Zvonov](https://github.com/zvonand)).
-* Metrics for number of readonly and broken disks. Indicator logs when DiskLocalCheckThread is started. [#80391](https://github.com/ClickHouse/ClickHouse/pull/80391) ([VicoWu](https://github.com/VicoWu)).
-* Implement support for `s3_plain_rewritable` storage with projections. In previous versions, metadata objects in S3 referencing projections would not get updated when moved. Closes [#70258](https://github.com/ClickHouse/ClickHouse/issues/70258). [#80393](https://github.com/ClickHouse/ClickHouse/pull/80393) ([Sav](https://github.com/sberss)).
-* The `SYSTEM UNFREEZE` command will not try to look up parts in readonly and write-once disks. This closes [#80430](https://github.com/ClickHouse/ClickHouse/issues/80430). [#80432](https://github.com/ClickHouse/ClickHouse/pull/80432) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Lowered the log level of merged parts messages. [#80476](https://github.com/ClickHouse/ClickHouse/pull/80476) ([Hans Krutzer](https://github.com/hkrutzer)).
-* Change the default behavior of partition pruning for Iceberg tables. [#80583](https://github.com/ClickHouse/ClickHouse/pull/80583) ([Melvyn Peignon](https://github.com/melvynator)).
-* Add two new ProfileEvents for index search algorithm observability: `IndexBinarySearchAlgorithm` and `IndexGenericExclusionSearchAlgorithm`. [#80679](https://github.com/ClickHouse/ClickHouse/pull/80679) ([Pablo Marcos](https://github.com/pamarcos)).
-* Do not complain about unsupported `MADV_POPULATE_WRITE` for older kernels in logs (to avoid logs polluting). [#80704](https://github.com/ClickHouse/ClickHouse/pull/80704) ([Robert Schulze](https://github.com/rschu1ze)).
-* Added support for `Date32` and `DateTime64` in `TTL` expressions. [#80710](https://github.com/ClickHouse/ClickHouse/pull/80710) ([Andrey Zvonov](https://github.com/zvonand)).
-* Adjust compatibility values for `max_merge_delayed_streams_for_parallel_write`. [#80760](https://github.com/ClickHouse/ClickHouse/pull/80760) ([Azat Khuzhin](https://github.com/azat)).
-* Fix a crash: if an exception is thrown in an attempt to remove a temporary file (they are used for spilling temporary data on disk) in the destructor, the program can terminate. [#80776](https://github.com/ClickHouse/ClickHouse/pull/80776) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add `IF EXISTS` modifier to `SYSTEM SYNC REPLICA`. [#80810](https://github.com/ClickHouse/ClickHouse/pull/80810) ([Raúl Marín](https://github.com/Algunenano)).
-* Extend exception message about "Having zero bytes, but read range is not finished...", add finished_download_time column to `system.filesystem_cache`. [#80849](https://github.com/ClickHouse/ClickHouse/pull/80849) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Add search algorithm section to `EXPLAIN` output when using it with indexes = 1. If shows either "binary search" or "generic exclusion search". [#80881](https://github.com/ClickHouse/ClickHouse/pull/80881) ([Pablo Marcos](https://github.com/pamarcos)).
-* At the beginning of 2024, `prefer_column_name_to_alias` was hardcoded to true for MySQL handler because the new analyzer was not enabled by default. Now, it can be unhardcoded. [#80916](https://github.com/ClickHouse/ClickHouse/pull/80916) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Now `system.iceberg_history` shows history for catalogs databases like glue or iceberg rest. Also renamed `table_name` and `database_name` columns to `table` and `database` in `system.iceberg_history` for consistency. [#80975](https://github.com/ClickHouse/ClickHouse/pull/80975) ([alesapin](https://github.com/alesapin)).
-* Allow read-only mode for the `merge` table function, so the `CREATE TEMPORARY TABLE` grant is not required for using it. [#80981](https://github.com/ClickHouse/ClickHouse/pull/80981) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Better introspection of in-memory caches (expose information about caches in `system.metrics` over incomplete `system.asynchronouse_metrics`). Add in-memory caches size (in bytes) into `dashboard.html`. `VectorSimilarityIndexCacheSize`/`IcebergMetadataFilesCacheSize` has been renamed to `VectorSimilarityIndexCacheBytes`/`IcebergMetadataFilesCacheBytes`. [#81023](https://github.com/ClickHouse/ClickHouse/pull/81023) ([Azat Khuzhin](https://github.com/azat)).
-* Ignore databases with engines that can't contain `RocksDB` tables while reading from `system.rocksdb`. [#81083](https://github.com/ClickHouse/ClickHouse/pull/81083) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Allow `filesystem_caches` and `named_collections` in the `clickhouse-local` configuration file. [#81105](https://github.com/ClickHouse/ClickHouse/pull/81105) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix highlighting of `PARTITION BY` in `INSERT` queries. In previous versions, `PARTITION BY` was not highlighted as a keyword. [#81106](https://github.com/ClickHouse/ClickHouse/pull/81106) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Two mini improvements in Web UI: - correctly handle queries without output, such as `CREATE`, `INSERT` (until recently, these queries resulted in an infinite spinner); - when double clicking on a table, scroll to the top. [#81131](https://github.com/ClickHouse/ClickHouse/pull/81131) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* `MemoryResidentWithoutPageCache` metric provides the amount of physical memory used by the server process, excluding userspace page cache, in bytes. This provides a more accurate view of actual memory usage when userspace page cache is utilized. When userspace page cache is disabled, this value equals `MemoryResident`. [#81233](https://github.com/ClickHouse/ClickHouse/pull/81233) ([Jayme Bird](https://github.com/jaymebrd)).
-* Mark manually logged exceptions in client, local server, keeper client and disks app as logged, so that they are not logged twice. [#81271](https://github.com/ClickHouse/ClickHouse/pull/81271) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Setting `use_skip_indexes_if_final` and `use_skip_indexes_if_final_exact_mode` now default to `True`. Queries with `FINAL` clause will now use skip indexes (if applicable) to shortlist granules and also read any additional granules corresponding to matching primary key ranges. Users needing earlier behaviour of approximate/imprecise results can set `use_skip_indexes_if_final_exact_mode` to FALSE after careful evaluation. [#81331](https://github.com/ClickHouse/ClickHouse/pull/81331) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* When you have multiple queries in the web UI, it will run the one under the cursor. Continuation of [#80977](https://github.com/ClickHouse/ClickHouse/issues/80977). [#81354](https://github.com/ClickHouse/ClickHouse/pull/81354) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* This PR addresses issues with the implementation of `is_strict` in the monotonicity checks for conversion functions. Currently, some conversion functions, such as `toFloat64(UInt32)` and `toDate(UInt8)`, incorrectly return `is_strict` as false when they should return true. [#81359](https://github.com/ClickHouse/ClickHouse/pull/81359) ([zoomxi](https://github.com/zoomxi)).
-* When checking if a `KeyCondition` matches a continuous range, if the key is wrapped with a non-strict function chain, a `Constraint::POINT` may needs to be converted to a`Constraint::RANGE`. For example: `toDate(event_time) = '2025-06-03'` implies a range for `event_time`: ['2025-06-03 00:00:00', '2025-06-04 00:00:00'). This PR fixes this behavior. [#81400](https://github.com/ClickHouse/ClickHouse/pull/81400) ([zoomxi](https://github.com/zoomxi)).
-* `clickhouse`/`ch` aliases will invoke `clickhouse-client` instead of `clickhouse-local` if `--host` or `--port` are specified. Continuation of [#79422](https://github.com/ClickHouse/ClickHouse/issues/79422). Closes [#65252](https://github.com/ClickHouse/ClickHouse/issues/65252). [#81509](https://github.com/ClickHouse/ClickHouse/pull/81509) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Now that we have the keeper response time distribution data, we can tune the histogram buckets for metrics. [#81516](https://github.com/ClickHouse/ClickHouse/pull/81516) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Add profile event `PageCacheReadBytes`. [#81742](https://github.com/ClickHouse/ClickHouse/pull/81742) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix logical error in filesystem cache: "Having zero bytes but range is not finished". [#81868](https://github.com/ClickHouse/ClickHouse/pull/81868) ([Kseniia Sumarokova](https://github.com/kssenii)).
-
-#### Bug Fix (user-visible misbehavior in an official stable release)
-* Fix parameterized view with SELECT EXCEPT query. Closes [#49447](https://github.com/ClickHouse/ClickHouse/issues/49447). [#57380](https://github.com/ClickHouse/ClickHouse/pull/57380) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Analyzer: Fix column projection name after column type promotion in join. Closes [#63345](https://github.com/ClickHouse/ClickHouse/issues/63345). [#63519](https://github.com/ClickHouse/ClickHouse/pull/63519) ([Dmitry Novik](https://github.com/novikd)).
-* Fixed a logical error in cases of column name clashes when analyzer_compatibility_join_using_top_level_identifier is enabled. [#75676](https://github.com/ClickHouse/ClickHouse/pull/75676) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fix CTE usage in pushed-down predicates when `allow_push_predicate_ast_for_distributed_subqueries` is enabled. Fixes [#75647](https://github.com/ClickHouse/ClickHouse/issues/75647). Fixes [#79672](https://github.com/ClickHouse/ClickHouse/issues/79672). [#77316](https://github.com/ClickHouse/ClickHouse/pull/77316) ([Dmitry Novik](https://github.com/novikd)).
-* Fixes an issue where SYSTEM SYNC REPLICA LIGHTWEIGHT 'foo' would report success even when the specified replica didn't exist. The command now properly validates that the replica exists in Keeper before attempting synchronization. [#78405](https://github.com/ClickHouse/ClickHouse/pull/78405) ([Jayme Bird](https://github.com/jaymebrd)).
-* Fix crash for a very specific situation when the `currentDatabase` function was used in `CONSTRAINT` sections for `ON CLUSTER` queries Closes [#78100](https://github.com/ClickHouse/ClickHouse/issues/78100). [#79070](https://github.com/ClickHouse/ClickHouse/pull/79070) ([pufit](https://github.com/pufit)).
-* Fix passing of external roles in interserver queries. [#79099](https://github.com/ClickHouse/ClickHouse/pull/79099) ([Andrey Zvonov](https://github.com/zvonand)).
-* Try to use IColumn instead of Field in SingleValueDataGeneric. It fixes the incorrect return values for some aggregate functions like `argMax` for types `Dynamic/Variant/JSON`. [#79166](https://github.com/ClickHouse/ClickHouse/pull/79166) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix applying use_native_copy and allow_azure_native_copy setting for azure blob storage and updated to use native copy only when credentials match resolves [#78964](https://github.com/ClickHouse/ClickHouse/issues/78964). [#79561](https://github.com/ClickHouse/ClickHouse/pull/79561) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
-* Fix logical errors about a column's unknown origin scope produced while checking if this column is correlated. Fixes [#78183](https://github.com/ClickHouse/ClickHouse/issues/78183). Fixes [#79451](https://github.com/ClickHouse/ClickHouse/issues/79451). [#79727](https://github.com/ClickHouse/ClickHouse/pull/79727) ([Dmitry Novik](https://github.com/novikd)).
-* Fix wrong results for grouping sets with ColumnConst and Analyzer. [#79743](https://github.com/ClickHouse/ClickHouse/pull/79743) ([Andrey Zvonov](https://github.com/zvonand)).
-* Fix local shard result duplication when reading from distributed table with local replica being stale. [#79761](https://github.com/ClickHouse/ClickHouse/pull/79761) ([Eduard Karacharov](https://github.com/korowa)).
-* Fix the sorting order of the NaNs with a negative sign bit. [#79847](https://github.com/ClickHouse/ClickHouse/pull/79847) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Now GROUP BY ALL doesn't take into account the GROUPING part. [#79915](https://github.com/ClickHouse/ClickHouse/pull/79915) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fixed incorrect state merging for `TopK` / `TopKWeighted` functions that would cause excessive error values even when capacity was not exhausted. [#79939](https://github.com/ClickHouse/ClickHouse/pull/79939) ([Joel Höner](https://github.com/athre0z)).
-* Respect `readonly` setting in `azure_blob_storage` object storage. [#79954](https://github.com/ClickHouse/ClickHouse/pull/79954) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fixed incorrect query results and out-of-memory crashes when using `match(column, '^…')` with backslash-escaped characters. [#79969](https://github.com/ClickHouse/ClickHouse/pull/79969) ([filimonov](https://github.com/filimonov)).
-* Disabling hive partitioning for datalakes Partially addresses https://github.com/issues/assigned?issue=ClickHouse%7CClickHouse%7C79937. [#80005](https://github.com/ClickHouse/ClickHouse/pull/80005) ([Daniil Ivanik](https://github.com/divanik)).
-* Skip indexes with lambda expressions could not be applied. Fix the case when high-level functions in the index definition exactly match the one in the query. [#80025](https://github.com/ClickHouse/ClickHouse/pull/80025) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix metadata version during attach part on the replica executing ATTACH_PART command from replication log. [#80038](https://github.com/ClickHouse/ClickHouse/pull/80038) ([Aleksei Filatov](https://github.com/aalexfvk)).
-* Executable User Defined Functions (eUDF) names are not added to the `used_functions` column of the `system.query_log` table, unlike other functions. This PR implements the addition of the eUDF name if the eUDF was used in the request. [#80073](https://github.com/ClickHouse/ClickHouse/pull/80073) ([Kyamran](https://github.com/nibblerenush)).
-* Fix logical error in Arrow format with LowCardinality(FixedString). [#80156](https://github.com/ClickHouse/ClickHouse/pull/80156) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix reading subcolumns from Merge engine. [#80158](https://github.com/ClickHouse/ClickHouse/pull/80158) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix a bug about the comparison between numeric types in `KeyCondition`. [#80207](https://github.com/ClickHouse/ClickHouse/pull/80207) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fix AMBIGUOUS_COLUMN_NAME when lazy materialization applied to table with projections. [#80251](https://github.com/ClickHouse/ClickHouse/pull/80251) ([Igor Nikonov](https://github.com/devcrafter)).
-* Fix incorrect count optimization for string prefix filters like LIKE 'ab_c%' when using implicit projections. This fixes [#80250](https://github.com/ClickHouse/ClickHouse/issues/80250). [#80261](https://github.com/ClickHouse/ClickHouse/pull/80261) ([Amos Bird](https://github.com/amosbird)).
-* Fix improper serialization of nested numeric fields as strings in MongoDB documents. Remove maximum depth limit for documents from MongoDB. [#80289](https://github.com/ClickHouse/ClickHouse/pull/80289) ([Kirill Nikiforov](https://github.com/allmazz)).
-* Perform less strict metadata checks for RMT in the Replicated database. Closes [#80296](https://github.com/ClickHouse/ClickHouse/issues/80296). [#80298](https://github.com/ClickHouse/ClickHouse/pull/80298) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Fix text representation of DateTime and DateTime64 for PostgreSQL storage. [#80301](https://github.com/ClickHouse/ClickHouse/pull/80301) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Allow `DateTime` with timezone in `StripeLog` tables. This closes [#44120](https://github.com/ClickHouse/ClickHouse/issues/44120). [#80304](https://github.com/ClickHouse/ClickHouse/pull/80304) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Disable filter-push-down for the predicate with a non-deterministic function in case the query plan step changes the number of rows. Fixes [#40273](https://github.com/ClickHouse/ClickHouse/issues/40273). [#80329](https://github.com/ClickHouse/ClickHouse/pull/80329) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix possible logical errors and crashes in projections with subcolumns. [#80333](https://github.com/ClickHouse/ClickHouse/pull/80333) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix `NOT_FOUND_COLUMN_IN_BLOCK` error caused by filter-push-down optimization of the logical JOIN sep in case `ON` expression is not a trivial equality. Fixes [#79647](https://github.com/ClickHouse/ClickHouse/issues/79647) Fixes [#77848](https://github.com/ClickHouse/ClickHouse/issues/77848). [#80360](https://github.com/ClickHouse/ClickHouse/pull/80360) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix incorrect result when reading reverse-ordered keys in partitioned tables. This fixes [#79987](https://github.com/ClickHouse/ClickHouse/issues/79987). [#80448](https://github.com/ClickHouse/ClickHouse/pull/80448) ([Amos Bird](https://github.com/amosbird)).
-* Fixed wrong sorting in tables with a nullable key and enabled optimize_read_in_order. [#80515](https://github.com/ClickHouse/ClickHouse/pull/80515) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fixed refreshable materialized view DROP getting stuck if the view was paused using SYSTEM STOP REPLICATED VIEW. [#80543](https://github.com/ClickHouse/ClickHouse/pull/80543) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix 'Cannot find column' with constant tuple in distributed query. [#80596](https://github.com/ClickHouse/ClickHouse/pull/80596) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fix `shardNum` function in Distributed tables with `join_use_nulls`. [#80612](https://github.com/ClickHouse/ClickHouse/pull/80612) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Fix incorrect result during reading column that exists in subset of tables in Merge engine. [#80643](https://github.com/ClickHouse/ClickHouse/pull/80643) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix possible SSH protocol (due to hang in replxx). [#80688](https://github.com/ClickHouse/ClickHouse/pull/80688) ([Azat Khuzhin](https://github.com/azat)).
-* The timestamp in the iceberg_history table should now be correct. [#80711](https://github.com/ClickHouse/ClickHouse/pull/80711) ([Melvyn Peignon](https://github.com/melvynator)).
-* Fix possible crash in case of dictionary registration failed (when `CREATE DICTIONARY` failed with `CANNOT_SCHEDULE_TASK` it is possible to leave dangling pointer in the dictionary registry, which later lead to crash). [#80714](https://github.com/ClickHouse/ClickHouse/pull/80714) ([Azat Khuzhin](https://github.com/azat)).
-* Fix handling of enum globs of a single element in object storage table functions. [#80716](https://github.com/ClickHouse/ClickHouse/pull/80716) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fix wrong result type of comparison functions with Tuple(Dynamic) and String that led to logical error. [#80728](https://github.com/ClickHouse/ClickHouse/pull/80728) ([Pavel Kruglov](https://github.com/Avogar)).
-* Add missing support data type `timestamp_ntz` for unity catalog. Fixes [#79535](https://github.com/ClickHouse/ClickHouse/issues/79535), Fixes [#79875](https://github.com/ClickHouse/ClickHouse/issues/79875). [#80740](https://github.com/ClickHouse/ClickHouse/pull/80740) ([alesapin](https://github.com/alesapin)).
-* Fix `THERE_IS_NO_COLUMN` error for distributed queries with `IN cte`. Fixes [#75032](https://github.com/ClickHouse/ClickHouse/issues/75032). [#80757](https://github.com/ClickHouse/ClickHouse/pull/80757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix excessive number of files (leads to excessive memory usage) for external ORDER BY. [#80777](https://github.com/ClickHouse/ClickHouse/pull/80777) ([Azat Khuzhin](https://github.com/azat)).
-* This PR might close [#80742](https://github.com/ClickHouse/ClickHouse/issues/80742). [#80783](https://github.com/ClickHouse/ClickHouse/pull/80783) ([zoomxi](https://github.com/zoomxi)).
-* Fix crash in Kafka due to get_member_id() was creating std::string from NULL (it was likely an issue only in case of connection to broker had been failed). [#80793](https://github.com/ClickHouse/ClickHouse/pull/80793) ([Azat Khuzhin](https://github.com/azat)).
-* Properly wait consumers before shutting down Kafka engine (active consumers after shutdown can trigger various debug assertions and also may read data from brokers in background after table has been dropped/detached). [#80795](https://github.com/ClickHouse/ClickHouse/pull/80795) ([Azat Khuzhin](https://github.com/azat)).
-* Fix `NOT_FOUND_COLUMN_IN_BLOCK`, which is caused by `predicate-push-down` optimization. Fixes [#80443](https://github.com/ClickHouse/ClickHouse/issues/80443). [#80834](https://github.com/ClickHouse/ClickHouse/pull/80834) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix logical error when resolving star (*) matcher in table function in JOIN with USING. [#80894](https://github.com/ClickHouse/ClickHouse/pull/80894) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fix memory accounting for Iceberg metadata files cache. [#80904](https://github.com/ClickHouse/ClickHouse/pull/80904) ([Azat Khuzhin](https://github.com/azat)).
-* Fix wrong partitioning with nullable partition key. [#80913](https://github.com/ClickHouse/ClickHouse/pull/80913) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix `Table does not exist` error for distributed queries with pushed-down predicate (`allow_push_predicate_ast_for_distributed_subqueries=1`) when the source table does not exist on the initialtor. Fixes [#77281](https://github.com/ClickHouse/ClickHouse/issues/77281). [#80915](https://github.com/ClickHouse/ClickHouse/pull/80915) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix the logical error in the nested functions with named windows. [#80926](https://github.com/ClickHouse/ClickHouse/pull/80926) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix extremes for nullable and floating-point columns. [#80970](https://github.com/ClickHouse/ClickHouse/pull/80970) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix possible crash while querying from system.tables (likely the case under memory pressure). [#80976](https://github.com/ClickHouse/ClickHouse/pull/80976) ([Azat Khuzhin](https://github.com/azat)).
-* Fix atomic rename with truncate for files which compression is inferred from their file extension. [#80979](https://github.com/ClickHouse/ClickHouse/pull/80979) ([Pablo Marcos](https://github.com/pamarcos)).
-* Fix ErrorCodes::getName. [#81032](https://github.com/ClickHouse/ClickHouse/pull/81032) ([RinChanNOW](https://github.com/RinChanNOWWW)).
-* Fix bug when user cannot list tables in Unity Catalog without permissions for all of them. Now all tables are listed properly, attempt to read from restricted table will throw an exception. [#81044](https://github.com/ClickHouse/ClickHouse/pull/81044) ([alesapin](https://github.com/alesapin)).
-* Now ClickHouse will ignore errors and unexpected responses from data lake catalogs in `SHOW TABLES` query. Fixes [#79725](https://github.com/ClickHouse/ClickHouse/issues/79725). [#81046](https://github.com/ClickHouse/ClickHouse/pull/81046) ([alesapin](https://github.com/alesapin)).
-* Fix parsing of DateTime64 from integers in JSONExtract and JSON type parsing. [#81050](https://github.com/ClickHouse/ClickHouse/pull/81050) ([Pavel Kruglov](https://github.com/Avogar)).
-* Reflect date_time_input_format setting in schema inference cache. [#81052](https://github.com/ClickHouse/ClickHouse/pull/81052) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix crash on INSERT if table was DROPed after query started but before columns sent. [#81053](https://github.com/ClickHouse/ClickHouse/pull/81053) ([Azat Khuzhin](https://github.com/azat)).
-* Fix use-of-uninitialized-value in quantileDeterministic. [#81062](https://github.com/ClickHouse/ClickHouse/pull/81062) ([Azat Khuzhin](https://github.com/azat)).
-* Fix hardlinks count management for metadatastoragefromdisk disk transactions. add tests. [#81066](https://github.com/ClickHouse/ClickHouse/pull/81066) ([Sema Checherinda](https://github.com/CheSema)).
-* User Defined Functions (UDF) names are not added to the `system.query_log` table, unlike other functions. This PR implements the addition of the UDF name to one of the two columns `used_executable_user_defined_functions` or `used_sql_user_defined_functions` if the UDF was used in the request. [#81101](https://github.com/ClickHouse/ClickHouse/pull/81101) ([Kyamran](https://github.com/nibblerenush)).
-* Fixed `Too large size ... passed to allocator` errors or possible crashes on inserts via http protocol with text formats (`JSON`, `Values`, ...) and omitted `Enum` fields. [#81145](https://github.com/ClickHouse/ClickHouse/pull/81145) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix LOGICAL_ERROR in case of Sparse column in INSERT block pushed to non-MT MV. [#81161](https://github.com/ClickHouse/ClickHouse/pull/81161) ([Azat Khuzhin](https://github.com/azat)).
-* Fix `Unknown table expression identifier` for `distributed_product_mode_local=local` with cross-replication. [#81162](https://github.com/ClickHouse/ClickHouse/pull/81162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fixed incorrectly caching number of rows in parquet files after filtering. [#81184](https://github.com/ClickHouse/ClickHouse/pull/81184) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix fs cache max_size_to_total_space setting when used with relative cache path. [#81237](https://github.com/ClickHouse/ClickHouse/pull/81237) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fixed clickhouse-local crashing when outputting const tuples or maps in Parquet format. [#81249](https://github.com/ClickHouse/ClickHouse/pull/81249) ([Michael Kolupaev](https://github.com/al13n321)).
-* Verify array offsets received over network. [#81269](https://github.com/ClickHouse/ClickHouse/pull/81269) ([Azat Khuzhin](https://github.com/azat)).
-* Fix some corner case in query that joins empty tables and uses window functions. The bug leads to exploding number of parallel streams which leads to OOMs. [#81299](https://github.com/ClickHouse/ClickHouse/pull/81299) ([Alexander Gololobov](https://github.com/davenger)).
-* Fixes for datalake Cluster functions (`deltaLakeCluster`, `icebergCluster`, etc): (1) fix potential segfault in `DataLakeConfiguration` when using `Cluster` function with old analyzer; (2) remove duplicating data lake metadata updates (extra object storage requests); (3) fix redundant listing in object storage when format is not explicitly specified (which was already done for non-cluster data lake engines). [#81300](https://github.com/ClickHouse/ClickHouse/pull/81300) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Make force_restore_data flag recover lost keeper metadata. [#81324](https://github.com/ClickHouse/ClickHouse/pull/81324) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix region error in delta-kernel. Fixes [#79914](https://github.com/ClickHouse/ClickHouse/issues/79914). [#81353](https://github.com/ClickHouse/ClickHouse/pull/81353) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Disable incorrect JIT for divideOrNull. [#81370](https://github.com/ClickHouse/ClickHouse/pull/81370) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix insert error when MergeTree table has a long partition column name. [#81390](https://github.com/ClickHouse/ClickHouse/pull/81390) ([hy123q](https://github.com/haoyangqian)).
-* Backported in [#81957](https://github.com/ClickHouse/ClickHouse/issues/81957): Fixed possible crash in `Aggregator` in case of exception during merge. [#81450](https://github.com/ClickHouse/ClickHouse/pull/81450) ([Nikita Taranov](https://github.com/nickitat)).
-* Don't store content of several manifest files in memory. [#81470](https://github.com/ClickHouse/ClickHouse/pull/81470) ([Daniil Ivanik](https://github.com/divanik)).
-* Fix possible crash during shutting down background pools (`background_.*pool_size`). [#81473](https://github.com/ClickHouse/ClickHouse/pull/81473) ([Azat Khuzhin](https://github.com/azat)).
-* Fix out-of-bounds read in the `Npy` format happening when writing to a table with the `URL` engine. This closes [#81356](https://github.com/ClickHouse/ClickHouse/issues/81356). [#81502](https://github.com/ClickHouse/ClickHouse/pull/81502) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* There is a chance that Web UI displays `NaN%` (typical JavaScript problems). [#81507](https://github.com/ClickHouse/ClickHouse/pull/81507) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix `DatabaseReplicated` for `database_replicated_enforce_synchronous_settings=1`. [#81564](https://github.com/ClickHouse/ClickHouse/pull/81564) ([Azat Khuzhin](https://github.com/azat)).
-* Fix sorting order for LowCardinality(Nullable(...)) types. [#81583](https://github.com/ClickHouse/ClickHouse/pull/81583) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Server should not preserve a HTTP connection if the request has not been fully read from the socket. [#81595](https://github.com/ClickHouse/ClickHouse/pull/81595) ([Sema Checherinda](https://github.com/CheSema)).
-* Make scalar correlated subqueries return a nullable result of the projection expression. Fix the case when a correlated subquery produces an empty result set. [#81632](https://github.com/ClickHouse/ClickHouse/pull/81632) ([Dmitry Novik](https://github.com/novikd)).
-* Fix `Unexpected relative path for a deduplicated part` during `ATTACH` to `ReplicatedMergeTree`. [#81647](https://github.com/ClickHouse/ClickHouse/pull/81647) ([Azat Khuzhin](https://github.com/azat)).
-* Query settings `use_iceberg_partition_pruning` will not take effect for iceberg storage, because it uses global context rather than query context. it's not critical because its default value is true. this pr can fix it. [#81673](https://github.com/ClickHouse/ClickHouse/pull/81673) ([Han Fei](https://github.com/hanfei1991)).
-* Backported in [#82128](https://github.com/ClickHouse/ClickHouse/issues/82128): Fix "Context has expired" during merges when dict used in TTL expression. [#81690](https://github.com/ClickHouse/ClickHouse/pull/81690) ([Azat Khuzhin](https://github.com/azat)).
-* Add validation for mergetree setting `merge_max_block_size` to ensure that it's non zero. [#81693](https://github.com/ClickHouse/ClickHouse/pull/81693) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fix issues with `clickhouse-local` involving stuck `DROP VIEW ` queries. [#81705](https://github.com/ClickHouse/ClickHouse/pull/81705) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fix StorageRedis join in some cases. [#81736](https://github.com/ClickHouse/ClickHouse/pull/81736) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix crash in `ConcurrentHashJoin` with empty `USING ()` and old analyzer enabled. [#81754](https://github.com/ClickHouse/ClickHouse/pull/81754) ([Nikita Taranov](https://github.com/nickitat)).
-* Keeper fix: block commits of new logs if there is invalid entry in the logs. Previously, if leader applied some logs incorrectly, it would continue to commit new logs, even though the follower would detect digest mismatch and abort. [#81780](https://github.com/ClickHouse/ClickHouse/pull/81780) ([Antonio Andelic](https://github.com/antonio2368)).
-* Fix the issue where required columns are not read during scalar correlated subquery processing. Fixes [#81716](https://github.com/ClickHouse/ClickHouse/issues/81716). [#81805](https://github.com/ClickHouse/ClickHouse/pull/81805) ([Dmitry Novik](https://github.com/novikd)).
-* Someone littered our code with Kusto. Cleaned it up. This closes [#81643](https://github.com/ClickHouse/ClickHouse/issues/81643). [#81885](https://github.com/ClickHouse/ClickHouse/pull/81885) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* In previous versions, the server returned excessive content for requests to `/js`. This closes [#61890](https://github.com/ClickHouse/ClickHouse/issues/61890). [#81895](https://github.com/ClickHouse/ClickHouse/pull/81895) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Previously, `MongoDB` table engine definitions could include a path component in the `host:port` argument which was silently ignored. The mongodb integration refuses to load such tables. With this fix *we allow loading such tables and ignore path component* if `MongoDB` engine has five arguments, using the database name from arguments. *Note:* The fix is not applied for newly created tables or queries with `mongo` table function, as well as for dictionary sources and named collections. [#81942](https://github.com/ClickHouse/ClickHouse/pull/81942) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fixed possible crash in `Aggregator` in case of exception during merge. [#82022](https://github.com/ClickHouse/ClickHouse/pull/82022) ([Nikita Taranov](https://github.com/nickitat)).
-* Fixing copy-paste error in `arraySimilarity`, disallowing the use of `UInt32` and `Int32` weights. Update tests and docs. [#82103](https://github.com/ClickHouse/ClickHouse/pull/82103) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
-* Fix possible data-race between suggestion thread and main client thread. [#82233](https://github.com/ClickHouse/ClickHouse/pull/82233) ([Azat Khuzhin](https://github.com/azat)).
-
-#### Build/Testing/Packaging Improvement
-* Use `postgres` 16.9. [#81437](https://github.com/ClickHouse/ClickHouse/pull/81437) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Use `openssl` 3.2.4. [#81438](https://github.com/ClickHouse/ClickHouse/pull/81438) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Use `abseil-cpp` 2025-01-27. [#81440](https://github.com/ClickHouse/ClickHouse/pull/81440) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Use `mongo-c-driver` 1.30.4. [#81449](https://github.com/ClickHouse/ClickHouse/pull/81449) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Use `krb5` 1.21.3-final. [#81453](https://github.com/ClickHouse/ClickHouse/pull/81453) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Use `orc` 2.1.2. [#81455](https://github.com/ClickHouse/ClickHouse/pull/81455) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Use `grpc` 1.73.0. [#81629](https://github.com/ClickHouse/ClickHouse/pull/81629) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Use `delta-kernel-rs` v0.12.1. [#81707](https://github.com/ClickHouse/ClickHouse/pull/81707) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Update `c-ares` to `v1.34.5`. [#81159](https://github.com/ClickHouse/ClickHouse/pull/81159) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Upgrade `curl` to 8.14 to address CVE-2025-5025 and CVE-2025-4947. [#81171](https://github.com/ClickHouse/ClickHouse/pull/81171) ([larryluogit](https://github.com/larryluogit)).
-* Upgrade `libarchive` to 3.7.9 to address: CVE-2024-20696 CVE-2025-25724 CVE-2024-48958 CVE-2024-57970 CVE-2025-1632 CVE-2024-48957 CVE-2024-48615. [#81174](https://github.com/ClickHouse/ClickHouse/pull/81174) ([larryluogit](https://github.com/larryluogit)).
-* Upgrade `libxml2` to 2.14.3. [#81187](https://github.com/ClickHouse/ClickHouse/pull/81187) ([larryluogit](https://github.com/larryluogit)).
-* Avoid copying vendored Rust sources to `CARGO_HOME`. [#79560](https://github.com/ClickHouse/ClickHouse/pull/79560) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Remove dependency on the Sentry library by replacing it with our own endpoint. [#80236](https://github.com/ClickHouse/ClickHouse/pull/80236) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Update python dependencies in CI images to address Dependabot alerts. [#80658](https://github.com/ClickHouse/ClickHouse/pull/80658) ([Raúl Marín](https://github.com/Algunenano)).
-* Retry reading of replicated DDL stop flag from Keeper at startup to make tests more robust when fault injection is enabled for Keeper. [#80964](https://github.com/ClickHouse/ClickHouse/pull/80964) ([Alexander Gololobov](https://github.com/davenger)).
-* Use https for ubuntu archive url. [#81016](https://github.com/ClickHouse/ClickHouse/pull/81016) ([Raúl Marín](https://github.com/Algunenano)).
-* Update python dependencies in test images. [#81042](https://github.com/ClickHouse/ClickHouse/pull/81042) ([dependabot[bot]](https://github.com/apps/dependabot)).
-* Introduce `flake.nix` for Nix builds. [#81463](https://github.com/ClickHouse/ClickHouse/pull/81463) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fix `delta-kernel-rs` requiring network access during build. Closes [#80609](https://github.com/ClickHouse/ClickHouse/issues/80609). [#81602](https://github.com/ClickHouse/ClickHouse/pull/81602) ([Konstantin Bogdanov](https://github.com/thevar1able)). Read the article [A Year of Rust in ClickHouse](https://clickhouse.com/blog/rust).
-
-
-### ClickHouse release 25.5, 2025-05-22 {#255}
-
-#### Backward Incompatible Change
-* Function `geoToH3` now accepts the input in the order (lat, lon, res) (which is common for other geometric functions). Users who wish to retain the previous result order (lon, lat, res) can set setting `geotoh3_argument_order = 'lon_lat'`. [#78852](https://github.com/ClickHouse/ClickHouse/pull/78852) ([Pratima Patel](https://github.com/pratimapatel2008)).
-* Add a filesystem cache setting `allow_dynamic_cache_resize`, by default `false`, to allow dynamic resize of filesystem cache. Why: in certain environments (ClickHouse Cloud) all the scaling events happen through the restart of the process and we would love this feature to be explicitly disabled to have more control over the behaviour + as a safety measure. This PR is marked as backward incompatible, because in older versions dynamic cache resize worked by default without special setting. [#79148](https://github.com/ClickHouse/ClickHouse/pull/79148) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Removed support for legacy index types `annoy` and `usearch`. Both have been stubs for a long time, i.e. every attempt to use the legacy indexes returned an error anyways. If you still have `annoy` and `usearch` indexes, please drop them. [#79802](https://github.com/ClickHouse/ClickHouse/pull/79802) ([Robert Schulze](https://github.com/rschu1ze)).
-* Remove `format_alter_commands_with_parentheses` server setting. The setting was introduced and disabled by default in 24.2. It was enabled by default in 25.2. As there are no LTS versions that don't support the new format, we can remove the setting. [#79970](https://github.com/ClickHouse/ClickHouse/pull/79970) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Enable `DeltaLake` storage `delta-kernel-rs` implementation by default. [#79541](https://github.com/ClickHouse/ClickHouse/pull/79541) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* If reading from an `URL` involves multiple redirects, setting `enable_url_encoding` is correctly applied across all redirects in the chain. [#79563](https://github.com/ClickHouse/ClickHouse/pull/79563) ([Shankar Iyer](https://github.com/shankar-iyer)). Setting `enble_url_encoding` default value is now set to `false`. [#80088](https://github.com/ClickHouse/ClickHouse/pull/80088) ([Shankar Iyer](https://github.com/shankar-iyer)).
-
-#### New Feature
-* Support scalar correlated subqueries in the WHERE clause. Closes [#6697](https://github.com/ClickHouse/ClickHouse/issues/6697). [#79600](https://github.com/ClickHouse/ClickHouse/pull/79600) ([Dmitry Novik](https://github.com/novikd)). Support correlated subqueries in the projection list in simple cases. [#79925](https://github.com/ClickHouse/ClickHouse/pull/79925) ([Dmitry Novik](https://github.com/novikd)). [#76078](https://github.com/ClickHouse/ClickHouse/pull/76078) ([Dmitry Novik](https://github.com/novikd)). Now it covers 100% of TPC-H test suite.
-* Vector search using the vector similarity index is now beta (from previously experimental). [#80164](https://github.com/ClickHouse/ClickHouse/pull/80164) ([Robert Schulze](https://github.com/rschu1ze)).
-* Support geo types in `Parquet` format. This closes [#75317](https://github.com/ClickHouse/ClickHouse/issues/75317). [#79777](https://github.com/ClickHouse/ClickHouse/pull/79777) ([scanhex12](https://github.com/scanhex12)).
-* New functions `sparseGrams`, `sparseGramsHashes`, `sparseGramsHashesUTF8`, `sparseGramsUTF8` for calculating "sparse-ngrams" - a robust algorithm for extracting substrings for indexing and search. [#79517](https://github.com/ClickHouse/ClickHouse/pull/79517) ([scanhex12](https://github.com/scanhex12)).
-* `clickhouse-local` (and its shorthand alias, `ch`) now use an implicit `FROM table` when there is input data for processing. This closes [#65023](https://github.com/ClickHouse/ClickHouse/issues/65023). Also enabled format inference in clickhouse-local if `--input-format` is not specified and it processes a regular file. [#79085](https://github.com/ClickHouse/ClickHouse/pull/79085) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add `stringBytesUniq` and `stringBytesEntropy` functions to search for possibly random or encrypted data. [#79350](https://github.com/ClickHouse/ClickHouse/pull/79350) ([Sachin Kumar Singh](https://github.com/sachinkumarsingh092)).
-* Added functions for encoding and decoding base32. [#79809](https://github.com/ClickHouse/ClickHouse/pull/79809) ([Joanna Hulboj](https://github.com/jh0x)).
-* Add `getServerSetting` and `getMergeTreeSetting` function. Closes #78318. [#78439](https://github.com/ClickHouse/ClickHouse/pull/78439) ([NamNguyenHoai](https://github.com/NamHoaiNguyen)).
-* Add new `iceberg_enable_version_hint` setting to leverage `version-hint.text` file. [#78594](https://github.com/ClickHouse/ClickHouse/pull/78594) ([Arnaud Briche](https://github.com/arnaudbriche)).
-* Gives the possibility to truncate specific tables from a database, filtered with the `LIKE` keyword. [#78597](https://github.com/ClickHouse/ClickHouse/pull/78597) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Support `_part_starting_offset` virtual column in `MergeTree`-family tables. This column represents the cumulative row count of all preceding parts, calculated at query time based on the current part list. The cumulative values are retained throughout query execution and remain effective even after part pruning. Related internal logic has been refactored to support this behavior. [#79417](https://github.com/ClickHouse/ClickHouse/pull/79417) ([Amos Bird](https://github.com/amosbird)).
-* Add functions `divideOrNull`,`moduloOrNull`, `intDivOrNull`,`positiveModuloOrNull` to return NULL when right argument is zero. [#78276](https://github.com/ClickHouse/ClickHouse/pull/78276) ([kevinyhzou](https://github.com/KevinyhZou)).
-* Clickhouse vector search now supports both pre-filtering and post-filtering and provides related settings for finer control. (issue [#78161](https://github.com/ClickHouse/ClickHouse/issues/78161)). [#79854](https://github.com/ClickHouse/ClickHouse/pull/79854) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Add [`icebergHash`](https://iceberg.apache.org/spec/#appendix-b-32-bit-hash-requirements) and [`icebergBucket`](https://iceberg.apache.org/spec/#bucket-transform-details) functions. Support data files pruning in `Iceberg` tables partitioned with [`bucket transfom`](https://iceberg.apache.org/spec/#partitioning). [#79262](https://github.com/ClickHouse/ClickHouse/pull/79262) ([Daniil Ivanik](https://github.com/divanik)).
-
-#### Experimental Feature
-* New `Time`/`Time64` data types: `Time` (HHH:MM:SS) and `Time64` (HHH:MM:SS.``) and some basic cast functions and functions to interact with other data types. Also, changed the existing function's name toTime to toTimeWithFixedDate because the function toTime is required for the cast function. [#75735](https://github.com/ClickHouse/ClickHouse/pull/75735) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-72459).
-* Hive metastore catalog for Iceberg datalake. [#77677](https://github.com/ClickHouse/ClickHouse/pull/77677) ([scanhex12](https://github.com/scanhex12)).
-* Indexes of type `full_text` were renamed to `gin`. This follows the more familiar terminology of PostgreSQL and other databases. Existing indexes of type `full_text` remain loadable but they will throw an exception (suggesting `gin` indexes instead) when one tries to use them in searches. [#79024](https://github.com/ClickHouse/ClickHouse/pull/79024) ([Robert Schulze](https://github.com/rschu1ze)).
-
-#### Performance Improvement
-* Change the Compact part format to save marks for each substream to be able to read individual subcolumns. Old Compact format is still supported for reads and can be enabled for writes using MergeTree setting `write_marks_for_substreams_in_compact_parts`. It's disabled by default for safer upgrades as it changes the compact parts storage. It will be enabled by default in one of the next releases. [#77940](https://github.com/ClickHouse/ClickHouse/pull/77940) ([Pavel Kruglov](https://github.com/Avogar)).
-* Allow moving conditions with subcolumns to prewhere. [#79489](https://github.com/ClickHouse/ClickHouse/pull/79489) ([Pavel Kruglov](https://github.com/Avogar)).
-* Speed up secondary indices by evaluating their expressions on multiple granules at once. [#64109](https://github.com/ClickHouse/ClickHouse/pull/64109) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Enable `compile_expressions` (JIT compiler for fragments of ordinary expressions) by default. This closes [#51264](https://github.com/ClickHouse/ClickHouse/issues/51264) and [#56386](https://github.com/ClickHouse/ClickHouse/issues/56386) and [#66486](https://github.com/ClickHouse/ClickHouse/issues/66486). [#79907](https://github.com/ClickHouse/ClickHouse/pull/79907) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* New setting introduced: `use_skip_indexes_in_final_exact_mode`. If a query on a `ReplacingMergeTree` table has FINAL clause, reading only table ranges based on skip indexes may produce incorrect result. This setting can ensure that correct results are returned by scanning newer parts that have overlap with primary key ranges returned by the skip index. Set to 0 to disable, 1 to enable. [#78350](https://github.com/ClickHouse/ClickHouse/pull/78350) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Object storage cluster table functions (e.g. `s3Cluster`) will now assign files to replicas for reading based on consistent hash to improve cache locality. [#77326](https://github.com/ClickHouse/ClickHouse/pull/77326) ([Andrej Hoos](https://github.com/adikus)).
-* Improve performance of `S3Queue`/`AzureQueue` by allowing INSERTs data in parallel (can be enabled with `parallel_inserts=true` queue setting). Previously S3Queue/AzureQueue can only do first part of pipeline in parallel (downloading, parsing), INSERT was single-threaded. And `INSERT`s are almost always the bottleneck. Now it will scale almost linear with `processing_threads_num`. [#77671](https://github.com/ClickHouse/ClickHouse/pull/77671) ([Azat Khuzhin](https://github.com/azat)). More fair max_processed_files_before_commit in S3Queue/AzureQueue. [#79363](https://github.com/ClickHouse/ClickHouse/pull/79363) ([Azat Khuzhin](https://github.com/azat)).
-* Introduced threshold (regulated by setting `parallel_hash_join_threshold`) to fall back to the `hash` algorithm when the size of the right table is below the threshold. [#76185](https://github.com/ClickHouse/ClickHouse/pull/76185) ([Nikita Taranov](https://github.com/nickitat)).
-* Now we use number of replicas to determine task size for reading with parallel replicas enabled. This provides better work distribution between replicas when the amount of data to read is not really big. [#78695](https://github.com/ClickHouse/ClickHouse/pull/78695) ([Nikita Taranov](https://github.com/nickitat)).
-* Allow parallel merging of `uniqExact` states during the final stage of distributed aggregation. [#78703](https://github.com/ClickHouse/ClickHouse/pull/78703) ([Nikita Taranov](https://github.com/nickitat)).
-* Fix possible performance degradation of the parallel merging of `uniqExact` states for aggregation with key. [#78724](https://github.com/ClickHouse/ClickHouse/pull/78724) ([Nikita Taranov](https://github.com/nickitat)).
-* Reduce the number of List Blobs API calls to Azure storage. [#78860](https://github.com/ClickHouse/ClickHouse/pull/78860) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fix performance of the distributed INSERT SELECT with parallel replicas. [#79441](https://github.com/ClickHouse/ClickHouse/pull/79441) ([Azat Khuzhin](https://github.com/azat)).
-* Prevent `LogSeriesLimiter` from doing cleanup on every construction, avoiding lock contention and performance regressions in high-concurrency scenarios. [#79864](https://github.com/ClickHouse/ClickHouse/pull/79864) ([filimonov](https://github.com/filimonov)).
-* Speedup queries with trivial count optimization. [#79945](https://github.com/ClickHouse/ClickHouse/pull/79945) ([Raúl Marín](https://github.com/Algunenano)).
-* Better inlining for some operations with `Decimal`. [#79999](https://github.com/ClickHouse/ClickHouse/pull/79999) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Set `input_format_parquet_bloom_filter_push_down` to true by default. Also, fix a mistake in the settings changes history. [#80058](https://github.com/ClickHouse/ClickHouse/pull/80058) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Optimized `ALTER ... DELETE` mutations for parts in which all rows should be deleted. Now, in such cases an empty part is created instead of original without executing a mutation. [#79307](https://github.com/ClickHouse/ClickHouse/pull/79307) ([Anton Popov](https://github.com/CurtizJ)).
-* Avoid extra copying of the block during insertion into Compact part when possible. [#79536](https://github.com/ClickHouse/ClickHouse/pull/79536) ([Pavel Kruglov](https://github.com/Avogar)).
-* Add setting `input_format_max_block_size_bytes` to limit blocks created in input formats in bytes. It can help to avoid high memory usage during data import when rows contains large values. [#79495](https://github.com/ClickHouse/ClickHouse/pull/79495) ([Pavel Kruglov](https://github.com/Avogar)).
-* Remove guard pages for threads and async_socket_for_remote/use_hedge_requests. Change the allocation method in `FiberStack` from `mmap` to `aligned_alloc`. Since this splits VMAs and under heavy load vm.max_map_count can be reached. [#79147](https://github.com/ClickHouse/ClickHouse/pull/79147) ([Sema Checherinda](https://github.com/CheSema)).
-* Lazy Materialization with parallel replicas. [#79401](https://github.com/ClickHouse/ClickHouse/pull/79401) ([Igor Nikonov](https://github.com/devcrafter)).
-
-#### Improvement
-* Added an ability to apply lightweight deletes on the fly (with settings `lightweight_deletes_sync = 0`, `apply_mutations_on_fly = 1`. [#79281](https://github.com/ClickHouse/ClickHouse/pull/79281) ([Anton Popov](https://github.com/CurtizJ)).
-* If data in the pretty format is displayed in the terminal, and a subsequent block has the same column widths, it can continue from the previous block, glue it to the previous block by moving the cursor up. This closes [#79333](https://github.com/ClickHouse/ClickHouse/issues/79333). The feature is controlled by the new setting, `output_format_pretty_glue_chunks`. [#79339](https://github.com/ClickHouse/ClickHouse/pull/79339) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Extend the `isIPAddressInRange` function to `String`, `IPv4`, `IPv6`, `Nullable(String)`, `Nullable(IPv4)`, and `Nullable(IPv6)` data types. [#78364](https://github.com/ClickHouse/ClickHouse/pull/78364) ([YjyJeff](https://github.com/YjyJeff)).
-* Allow changing `PostgreSQL` engine connection pooler settings dynamically. [#78414](https://github.com/ClickHouse/ClickHouse/pull/78414) ([Samay Sharma](https://github.com/samay-sharma)).
-* Allow to specify `_part_offset` in normal projection. This is the first step to build projection index. It can be used with [#58224](https://github.com/ClickHouse/ClickHouse/issues/58224) and can help improve #63207. [#78429](https://github.com/ClickHouse/ClickHouse/pull/78429) ([Amos Bird](https://github.com/amosbird)).
-* Add new columns (`create_query` and `source`) for `system.named_collections`. Closes [#78179](https://github.com/ClickHouse/ClickHouse/issues/78179). [#78582](https://github.com/ClickHouse/ClickHouse/pull/78582) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
-* Added a new field `condition` to system table `system.query_condition_cache`. It stores the plaintext condition whose hash is used as a key in the query condition cache. [#78671](https://github.com/ClickHouse/ClickHouse/pull/78671) ([Robert Schulze](https://github.com/rschu1ze)).
-* Vector similarity indexes can now be created on top of `BFloat16` columns. [#78850](https://github.com/ClickHouse/ClickHouse/pull/78850) ([Robert Schulze](https://github.com/rschu1ze)).
-* Support unix timestapms with fractional part in best effort `DateTime64` parsing. [#78908](https://github.com/ClickHouse/ClickHouse/pull/78908) ([Pavel Kruglov](https://github.com/Avogar)).
-* In the storage `DeltaLake` delta-kernel implementation, fix for column mapping mode, add tests for schema evolution. [#78921](https://github.com/ClickHouse/ClickHouse/pull/78921) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Improve insert into `Variant` column in Values format by better conversion of values. [#78923](https://github.com/ClickHouse/ClickHouse/pull/78923) ([Pavel Kruglov](https://github.com/Avogar)).
-* The `tokens` function was extended to accept an additional "tokenizer" argument plus further tokenizer-specific arguments. [#79001](https://github.com/ClickHouse/ClickHouse/pull/79001) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* The `SHOW CLUSTER` statement now expands macros (if any) in its argument. [#79006](https://github.com/ClickHouse/ClickHouse/pull/79006) ([arf42](https://github.com/arf42)).
-* Hash functions now support `NULL`s inside arrays, tuples, and maps. (issues [#48365](https://github.com/ClickHouse/ClickHouse/issues/48365) and [#48623](https://github.com/ClickHouse/ClickHouse/issues/48623)). [#79008](https://github.com/ClickHouse/ClickHouse/pull/79008) ([Michael Kolupaev](https://github.com/al13n321)).
-* Update cctz to 2025a. [#79043](https://github.com/ClickHouse/ClickHouse/pull/79043) ([Raúl Marín](https://github.com/Algunenano)).
-* Change the default stderr processing for UDFs to "log_last". It's better for usability. [#79066](https://github.com/ClickHouse/ClickHouse/pull/79066) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Make tabs undo-able in the Web UI. This closes [#71284](https://github.com/ClickHouse/ClickHouse/issues/71284). [#79084](https://github.com/ClickHouse/ClickHouse/pull/79084) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Remove settings during `recoverLostReplica` same as it was done in: https://github.com/ClickHouse/ClickHouse/pull/78637. [#79113](https://github.com/ClickHouse/ClickHouse/pull/79113) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Add profile events: `ParquetReadRowGroups` and `ParquetPrunedRowGroups` to profile parquet index prune. [#79180](https://github.com/ClickHouse/ClickHouse/pull/79180) ([flynn](https://github.com/ucasfl)).
-* Support `ALTER`ing database on cluster. [#79242](https://github.com/ClickHouse/ClickHouse/pull/79242) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Explicitly skip missed runs of statistics collection for QueryMetricLog, otherwise the log will take a long time to catch up with the current time. [#79257](https://github.com/ClickHouse/ClickHouse/pull/79257) ([Mikhail Artemenko](https://github.com/Michicosun)).
-* Some small optimizations for reading `Arrow`-based formats. [#79308](https://github.com/ClickHouse/ClickHouse/pull/79308) ([Bharat Nallan](https://github.com/bharatnc)).
-* The setting `allow_archive_path_syntax` was marked as experimental by mistake. Add a test to prevent having experimental settings enabled by default. [#79320](https://github.com/ClickHouse/ClickHouse/pull/79320) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Made page cache settings adjustable on a per-query level. This is needed for faster experimentation and for the possibility of fine-tuning for high-throughput and low-latency queries. [#79337](https://github.com/ClickHouse/ClickHouse/pull/79337) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Do not print number tips in pretty formats for numbers that look like most of the 64-bit hashes. This closes [#79334](https://github.com/ClickHouse/ClickHouse/issues/79334). [#79338](https://github.com/ClickHouse/ClickHouse/pull/79338) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Colors of graphs on the advanced dashboards will be calculated from the hash of the corresponding query. This makes it easier to remember and locate a graph while scrolling the dashboard. [#79341](https://github.com/ClickHouse/ClickHouse/pull/79341) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add asynchronous metric, `FilesystemCacheCapacity` - total capacity in the `cache` virtual filesystem. This is useful for global infrastructure monitoring. [#79348](https://github.com/ClickHouse/ClickHouse/pull/79348) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Optimize access to system.parts (read columns/indexes size only when requested). [#79352](https://github.com/ClickHouse/ClickHouse/pull/79352) ([Azat Khuzhin](https://github.com/azat)).
-* Calculate the relevant fields for query `'SHOW CLUSTER '` instead of all fields. [#79368](https://github.com/ClickHouse/ClickHouse/pull/79368) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Allow to specify storage settings for `DatabaseCatalog`. [#79407](https://github.com/ClickHouse/ClickHouse/pull/79407) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Support local storage in `DeltaLake`. [#79416](https://github.com/ClickHouse/ClickHouse/pull/79416) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Add a query level setting to enable delta-kernel-rs: `allow_experimental_delta_kernel_rs`. [#79418](https://github.com/ClickHouse/ClickHouse/pull/79418) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix possible endless loop when listing blobs from Azure/S3 blob storage. [#79425](https://github.com/ClickHouse/ClickHouse/pull/79425) ([Alexander Gololobov](https://github.com/davenger)).
-* Add filesystem cache setting `max_size_ratio_to_total_space`. [#79460](https://github.com/ClickHouse/ClickHouse/pull/79460) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* For `clickhouse-benchmark` reconfigure `reconnect` option to take 0, 1 or N as values for reconnecting accordingly. [#79465](https://github.com/ClickHouse/ClickHouse/pull/79465) ([Sachin Kumar Singh](https://github.com/sachinkumarsingh092)).
-* Allow `ALTER TABLE ... MOVE|REPLACE PARTITION` for tables on different `plain_rewritable` disks. [#79566](https://github.com/ClickHouse/ClickHouse/pull/79566) ([Julia Kartseva](https://github.com/jkartseva)).
-* The vector similarity index is now also used if the reference vector is of type `Array(BFloat16)`. [#79745](https://github.com/ClickHouse/ClickHouse/pull/79745) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Add last_error_message, last_error_trace and query_id to the system.error_log table. Related ticket [#75816](https://github.com/ClickHouse/ClickHouse/issues/75816). [#79836](https://github.com/ClickHouse/ClickHouse/pull/79836) ([Andrei Tinikov](https://github.com/Dolso)).
-* Enable sending crash reports by default. This can be turned off in the server's configuration file. [#79838](https://github.com/ClickHouse/ClickHouse/pull/79838) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* System table `system.functions` now shows in which ClickHouse version functions were first introduced. [#79839](https://github.com/ClickHouse/ClickHouse/pull/79839) ([Robert Schulze](https://github.com/rschu1ze)).
-* Added `access_control_improvements.enable_user_name_access_type` setting. This setting allows enabling/disabling of precise grants for users/roles, introduced in https://github.com/ClickHouse/ClickHouse/pull/72246. You may want to turn this setting off in case you have a cluster with the replicas older than 25.1. [#79842](https://github.com/ClickHouse/ClickHouse/pull/79842) ([pufit](https://github.com/pufit)).
-* Proper implementation of `ASTSelectWithUnionQuery::clone()` method now takes into account `is_normalized` field as well. This might help with [#77569](https://github.com/ClickHouse/ClickHouse/issues/77569). [#79909](https://github.com/ClickHouse/ClickHouse/pull/79909) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Fix the inconsistent formatting of certain queries with the EXCEPT operator. If the left-hand side of the EXCEPT operator ends with `*`, the formatted query loses parentheses and is then parsed as a `*` with the `EXCEPT` modifier. These queries are found by the fuzzer and are unlikely to be found in practice. This closes [#79950](https://github.com/ClickHouse/ClickHouse/issues/79950). [#79952](https://github.com/ClickHouse/ClickHouse/pull/79952) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Small improvement in `JSON` type parsing by using cache of variants deserialization order. [#79984](https://github.com/ClickHouse/ClickHouse/pull/79984) ([Pavel Kruglov](https://github.com/Avogar)).
-* Add setting `s3_slow_all_threads_after_network_error`. [#80035](https://github.com/ClickHouse/ClickHouse/pull/80035) ([Vitaly Baranov](https://github.com/vitlibar)).
-* The logging level about the selected parts to merge was wrong (Information). Closes [#80061](https://github.com/ClickHouse/ClickHouse/issues/80061). [#80062](https://github.com/ClickHouse/ClickHouse/pull/80062) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* trace-visualizer: add runtime/share in tooltips and status messages. [#79040](https://github.com/ClickHouse/ClickHouse/pull/79040) ([Sergei Trifonov](https://github.com/serxa)).
-* trace-visualizer: load data from clickhouse server. [#79042](https://github.com/ClickHouse/ClickHouse/pull/79042) ([Sergei Trifonov](https://github.com/serxa)).
-* Add metrics on failing merges. [#79228](https://github.com/ClickHouse/ClickHouse/pull/79228) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* `clickhouse-benchmark` will display percentage based on the max iterations if specified. [#79346](https://github.com/ClickHouse/ClickHouse/pull/79346) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add system.parts table visualizer. [#79437](https://github.com/ClickHouse/ClickHouse/pull/79437) ([Sergei Trifonov](https://github.com/serxa)).
-* Add tool for query latency analyzing. [#79978](https://github.com/ClickHouse/ClickHouse/pull/79978) ([Sergei Trifonov](https://github.com/serxa)).
-
-#### Bug Fix (user-visible misbehavior in an official stable release)
-* Fix renames of columns missing in part. [#76346](https://github.com/ClickHouse/ClickHouse/pull/76346) ([Anton Popov](https://github.com/CurtizJ)).
-* A materialized view can start too late, e.g. after the Kafka table that streams to it. [#72123](https://github.com/ClickHouse/ClickHouse/pull/72123) ([Ilya Golshtein](https://github.com/ilejn)).
-* Fix `SELECT` query rewriting during `VIEW` creation with enabled analyzer. closes [#75956](https://github.com/ClickHouse/ClickHouse/issues/75956). [#76356](https://github.com/ClickHouse/ClickHouse/pull/76356) ([Dmitry Novik](https://github.com/novikd)).
-* Fix applying `async_insert` from server (via `apply_settings_from_server`) (previously leads to `Unknown packet 11 from server` errors on the client). [#77578](https://github.com/ClickHouse/ClickHouse/pull/77578) ([Azat Khuzhin](https://github.com/azat)).
-* Fixed refreshable materialized view in Replicated database not working on newly added replicas. [#77774](https://github.com/ClickHouse/ClickHouse/pull/77774) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fixed refreshable materialized views breaking backups. [#77893](https://github.com/ClickHouse/ClickHouse/pull/77893) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix old firing logical error for `transform`. [#78247](https://github.com/ClickHouse/ClickHouse/pull/78247) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fix some cases where secondary index was not applied with analyzer. Fixes [#65607](https://github.com/ClickHouse/ClickHouse/issues/65607) , fixes [#69373](https://github.com/ClickHouse/ClickHouse/issues/69373). [#78485](https://github.com/ClickHouse/ClickHouse/pull/78485) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix dumping profile events (`NetworkSendElapsedMicroseconds`/`NetworkSendBytes`) for HTTP protocol with compression enabled (the error should not be more then the buffer size, usually around 1MiB). [#78516](https://github.com/ClickHouse/ClickHouse/pull/78516) ([Azat Khuzhin](https://github.com/azat)).
-* Fix analyzer producing LOGICAL_ERROR when JOIN ... USING involves ALIAS column - should produce appropriate error. [#78618](https://github.com/ClickHouse/ClickHouse/pull/78618) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fix analyzer: CREATE VIEW ... ON CLUSTER fails if SELECT contains positional arguments. [#78663](https://github.com/ClickHouse/ClickHouse/pull/78663) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fix `Block structure mismatch` error in case of `INSERT SELECT` into table a function with schema inference if `SELECT` has scalar subqueries. [#78677](https://github.com/ClickHouse/ClickHouse/pull/78677) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix analyzer: with prefer_global_in_and_join=1 for Distributed table in SELECT query `in` function should be replaced by `globalIn`. [#78749](https://github.com/ClickHouse/ClickHouse/pull/78749) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fixed several types of `SELECT` queries that read from tables with `MongoDB` engine or `mongodb` table function: queries with implicit conversion of const value in `WHERE` clause (e.g. `WHERE datetime = '2025-03-10 00:00:00'`) ; queries with `LIMIT` and `GROUP BY`. Previously, they could return the wrong result. [#78777](https://github.com/ClickHouse/ClickHouse/pull/78777) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix conversion between different JSON types. Not it's performed by simple cast through convertion to/from String. It's less effective but 100% accurate. [#78807](https://github.com/ClickHouse/ClickHouse/pull/78807) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix logical error during convertion of Dynamic type to Interval. [#78813](https://github.com/ClickHouse/ClickHouse/pull/78813) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix column rollback on JSON parsing error. [#78836](https://github.com/ClickHouse/ClickHouse/pull/78836) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix 'bad cast' error when join using constant alias column. [#78848](https://github.com/ClickHouse/ClickHouse/pull/78848) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Don't allow prewhere in materialized view on columns with different types in view and target table. [#78889](https://github.com/ClickHouse/ClickHouse/pull/78889) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix logical error during parsing of bad binary data of Variant column. [#78982](https://github.com/ClickHouse/ClickHouse/pull/78982) ([Pavel Kruglov](https://github.com/Avogar)).
-* Throw an exception when the parquet batch size is set to 0. Previously when output_format_parquet_batch_size = 0 ClickHouse would hang. Now this behavior is fixed. [#78991](https://github.com/ClickHouse/ClickHouse/pull/78991) ([daryawessely](https://github.com/daryawessely)).
-* Fix deserialization of variant discriminators with basic format in compact parts. It was introduced in https://github.com/ClickHouse/ClickHouse/pull/55518. [#79000](https://github.com/ClickHouse/ClickHouse/pull/79000) ([Pavel Kruglov](https://github.com/Avogar)).
-* Dictionaries of type `complex_key_ssd_cache` now reject zero or negative `block_size` and `write_buffer_size` parameters (issue [#78314](https://github.com/ClickHouse/ClickHouse/issues/78314)). [#79028](https://github.com/ClickHouse/ClickHouse/pull/79028) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Avoid using Field for non-aggregated columns in SummingMergeTree. It could lead to unexpected errors with Dynamic/Variant types used in SummingMergeTree. [#79051](https://github.com/ClickHouse/ClickHouse/pull/79051) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix read from Materialized View with Distributed destination table and different header in analyzer. [#79059](https://github.com/ClickHouse/ClickHouse/pull/79059) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fixes a bug where `arrayUnion()` returned extra (incorrect) values on tables that had batch inserts. Fixes [#75057](https://github.com/ClickHouse/ClickHouse/issues/75057). [#79079](https://github.com/ClickHouse/ClickHouse/pull/79079) ([Peter Nguyen](https://github.com/petern48)).
-* Fix segfault in `OpenSSLInitializer`. Closes [#79092](https://github.com/ClickHouse/ClickHouse/issues/79092). [#79097](https://github.com/ClickHouse/ClickHouse/pull/79097) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Always set prefix for S3 ListObject. [#79114](https://github.com/ClickHouse/ClickHouse/pull/79114) ([Azat Khuzhin](https://github.com/azat)).
-* Fixes a bug where arrayUnion() returned extra (incorrect) values on tables that had batch inserts. Fixes [#79157](https://github.com/ClickHouse/ClickHouse/issues/79157). [#79158](https://github.com/ClickHouse/ClickHouse/pull/79158) ([Peter Nguyen](https://github.com/petern48)).
-* Fix logical error after filter pushdown. [#79164](https://github.com/ClickHouse/ClickHouse/pull/79164) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix DeltaLake table engine with delta-kernel implementation being used with http based endpoints, fix NOSIGN. Closes [#78124](https://github.com/ClickHouse/ClickHouse/issues/78124). [#79203](https://github.com/ClickHouse/ClickHouse/pull/79203) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Keeper fix: Avoid triggering watches on failed multi requests. [#79247](https://github.com/ClickHouse/ClickHouse/pull/79247) ([Antonio Andelic](https://github.com/antonio2368)).
-* Forbid Dynamic and JSON types in IN. With current implementation of `IN` it can lead to incorrect results. Proper support of this types in `IN` is complicated and can be done in future. [#79282](https://github.com/ClickHouse/ClickHouse/pull/79282) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix check for duplicate paths in JSON type parsing. [#79317](https://github.com/ClickHouse/ClickHouse/pull/79317) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix SecureStreamSocket connection issues. [#79383](https://github.com/ClickHouse/ClickHouse/pull/79383) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fix loading of plain_rewritable disks containing data. [#79439](https://github.com/ClickHouse/ClickHouse/pull/79439) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fix crash in dynamic subcolumns discovery in Wide parts in MergeTree. [#79466](https://github.com/ClickHouse/ClickHouse/pull/79466) ([Pavel Kruglov](https://github.com/Avogar)).
-* Verify the table name's length only for initial create queries. Do not verify this for secondary creates to avoid backward compatibility issues. [#79488](https://github.com/ClickHouse/ClickHouse/pull/79488) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Fixed error `Block structure mismatch` in several cases with tables with sparse columns. [#79491](https://github.com/ClickHouse/ClickHouse/pull/79491) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix two cases of "Logical Error: Can't set alias of * of Asterisk on alias". [#79505](https://github.com/ClickHouse/ClickHouse/pull/79505) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix using incorrect paths when renaming an Atomic database. [#79569](https://github.com/ClickHouse/ClickHouse/pull/79569) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Fix order by JSON column with other columns. [#79591](https://github.com/ClickHouse/ClickHouse/pull/79591) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix result duplication when reading from remote with both `use_hedged_requests` and `allow_experimental_parallel_reading_from_replicas` disabled. [#79599](https://github.com/ClickHouse/ClickHouse/pull/79599) ([Eduard Karacharov](https://github.com/korowa)).
-* Fix crash in delta-kernel implementation when using unity catalog. [#79677](https://github.com/ClickHouse/ClickHouse/pull/79677) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Resolve macros for autodiscovery clusters. [#79696](https://github.com/ClickHouse/ClickHouse/pull/79696) ([Anton Ivashkin](https://github.com/ianton-ru)).
-* Handle incorrectly configured page_cache_limits suitably. [#79805](https://github.com/ClickHouse/ClickHouse/pull/79805) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fixes the result of SQL function `formatDateTime` if a variable-size formatter (e.g. `%W` aka. weekday `Monday` `Tuesday`, etc.) is followed by a compound formatter (a formatter that prints multiple components at once, e.g. `%D` aka. the American date `05/04/25`). [#79835](https://github.com/ClickHouse/ClickHouse/pull/79835) ([Robert Schulze](https://github.com/rschu1ze)).
-* IcebergS3 supports count optimization, but IcebergS3Cluster does not. As a result, the count() result returned in cluster mode may be a multiple of the number of replicas. [#79844](https://github.com/ClickHouse/ClickHouse/pull/79844) ([wxybear](https://github.com/wxybear)).
-* Fixes AMBIGUOUS_COLUMN_NAME error with lazy materialization when no columns are used for query execution until projection. Example, SELECT * FROM t ORDER BY rand() LIMIT 5. [#79926](https://github.com/ClickHouse/ClickHouse/pull/79926) ([Igor Nikonov](https://github.com/devcrafter)).
-* Hide password for query `CREATE DATABASE datalake ENGINE = DataLakeCatalog(\'http://catalog:8181\', \'admin\', \'password\')`. [#79941](https://github.com/ClickHouse/ClickHouse/pull/79941) ([Han Fei](https://github.com/hanfei1991)).
-* Allow to specify an alias in JOIN USING. Specify this alias in case the column was renamed (e.g., because of ARRAY JOIN). Fixes [#73707](https://github.com/ClickHouse/ClickHouse/issues/73707). [#79942](https://github.com/ClickHouse/ClickHouse/pull/79942) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Allow materialized views with UNIONs to work correctly on new replicas. [#80037](https://github.com/ClickHouse/ClickHouse/pull/80037) ([Samay Sharma](https://github.com/samay-sharma)).
-* Format specifier `%e` in SQL function `parseDateTime` now recognizes single-digit days (e.g. `3`), whereas it previously required space padding (e.g. ` 3`). This makes its behavior compatible with MySQL. To retain the previous behaviour, set setting `parsedatetime_e_requires_space_padding = 1`. (issue [#78243](https://github.com/ClickHouse/ClickHouse/issues/78243)). [#80057](https://github.com/ClickHouse/ClickHouse/pull/80057) ([Robert Schulze](https://github.com/rschu1ze)).
-* Fix warnings `Cannot find 'kernel' in '[...]/memory.stat'` in ClickHouse's log (issue [#77410](https://github.com/ClickHouse/ClickHouse/issues/77410)). [#80129](https://github.com/ClickHouse/ClickHouse/pull/80129) ([Robert Schulze](https://github.com/rschu1ze)).
-* Check stack size in FunctionComparison to avoid stack overflow crash. [#78208](https://github.com/ClickHouse/ClickHouse/pull/78208) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fix race during SELECT from `system.workloads`. [#78743](https://github.com/ClickHouse/ClickHouse/pull/78743) ([Sergei Trifonov](https://github.com/serxa)).
-* Fix: lazy materialization in distributed queries. [#78815](https://github.com/ClickHouse/ClickHouse/pull/78815) ([Igor Nikonov](https://github.com/devcrafter)).
-* Fix `Array(Bool)` to `Array(FixedString)` conversion. [#78863](https://github.com/ClickHouse/ClickHouse/pull/78863) ([Nikita Taranov](https://github.com/nickitat)).
-* Make parquet version selection less confusing. [#78818](https://github.com/ClickHouse/ClickHouse/pull/78818) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix `ReservoirSampler` self-merging. [#79031](https://github.com/ClickHouse/ClickHouse/pull/79031) ([Nikita Taranov](https://github.com/nickitat)).
-* Fix storage of insertion table in client context. [#79046](https://github.com/ClickHouse/ClickHouse/pull/79046) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Fix the destruction order of data members of `AggregatingSortedAlgorithm` and `SummingSortedAlgorithm`. [#79056](https://github.com/ClickHouse/ClickHouse/pull/79056) ([Nikita Taranov](https://github.com/nickitat)).
-* `enable_user_name_access_type` must not affect `DEFINER` access type. [#80026](https://github.com/ClickHouse/ClickHouse/pull/80026) ([pufit](https://github.com/pufit)).
-* Query to system database can hang if system database metadata located in keeper. [#79304](https://github.com/ClickHouse/ClickHouse/pull/79304) ([Mikhail Artemenko](https://github.com/Michicosun)).
-
-#### Build/Testing/Packaging Improvement
-* Make it possible to reuse the built `chcache` binary instead of always rebuilding it. [#78851](https://github.com/ClickHouse/ClickHouse/pull/78851) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Add NATS pause waiting. [#78987](https://github.com/ClickHouse/ClickHouse/pull/78987) ([Dmitry Novikov](https://github.com/dmitry-sles-novikov)).
-* Fix for incorrectly publishing ARM build as amd64compat. [#79122](https://github.com/ClickHouse/ClickHouse/pull/79122) ([Alexander Gololobov](https://github.com/davenger)).
-* Use generated ahead of time assembly for OpenSSL. [#79386](https://github.com/ClickHouse/ClickHouse/pull/79386) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fixes to allow building with `clang20`. [#79588](https://github.com/ClickHouse/ClickHouse/pull/79588) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* `chcache`: Rust caching support. [#78691](https://github.com/ClickHouse/ClickHouse/pull/78691) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Add unwind information for `zstd` assembly files. [#79288](https://github.com/ClickHouse/ClickHouse/pull/79288) ([Michael Kolupaev](https://github.com/al13n321)).
-
-
-### ClickHouse release 25.4, 2025-04-22 {#254}
-
-#### Backward Incompatible Change
-* Check if all columns in a materialized view match the target table when `allow_materialized_view_with_bad_select` is `false`. [#74481](https://github.com/ClickHouse/ClickHouse/pull/74481) ([Christoph Wurm](https://github.com/cwurm)).
-* Fix cases where `dateTrunc` is used with negative Date/DateTime arguments. [#77622](https://github.com/ClickHouse/ClickHouse/pull/77622) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* The legacy `MongoDB` integration has been removed. Server setting `use_legacy_mongodb_integration` became obsolete and now does nothing. [#77895](https://github.com/ClickHouse/ClickHouse/pull/77895) ([Robert Schulze](https://github.com/rschu1ze)).
-* Enhance `SummingMergeTree` validation to skip aggregation for columns used in partition or sort keys. [#78022](https://github.com/ClickHouse/ClickHouse/pull/78022) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-
-#### New Feature
-* Added CPU slot scheduling for workloads, see [the docs](https://clickhouse.com/docs/operations/workload-scheduling#cpu_scheduling) for details. [#77595](https://github.com/ClickHouse/ClickHouse/pull/77595) ([Sergei Trifonov](https://github.com/serxa)).
-* `clickhouse-local` will retain its databases after restart if you specify the `--path` command line argument. This closes [#50647](https://github.com/ClickHouse/ClickHouse/issues/50647). This closes [#49947](https://github.com/ClickHouse/ClickHouse/issues/49947). [#71722](https://github.com/ClickHouse/ClickHouse/pull/71722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Reject queries when the server is overloaded. The decision is made based on the ratio of wait time (`OSCPUWaitMicroseconds`) to busy time (`OSCPUVirtualTimeMicroseconds`). The query is dropped with some probability, when this ratio is between `min_os_cpu_wait_time_ratio_to_throw` and `max_os_cpu_wait_time_ratio_to_throw` (those are query level settings). [#63206](https://github.com/ClickHouse/ClickHouse/pull/63206) ([Alexey Katsman](https://github.com/alexkats)).
-* Time travel in `Iceberg`: add setting to query `Iceberg` tables as of a specific timestamp. [#71072](https://github.com/ClickHouse/ClickHouse/pull/71072) ([Brett Hoerner](https://github.com/bretthoerner)). [#77439](https://github.com/ClickHouse/ClickHouse/pull/77439) ([Daniil Ivanik](https://github.com/divanik)).
-* An in-memory cache for `Iceberg` metadata, which stores manifest files/list and `metadata.json` to speed up queries. [#77156](https://github.com/ClickHouse/ClickHouse/pull/77156) ([Han Fei](https://github.com/hanfei1991)).
-* Support `DeltaLake` table engine for Azure Blob Storage. Fixes [#68043](https://github.com/ClickHouse/ClickHouse/issues/68043). [#74541](https://github.com/ClickHouse/ClickHouse/pull/74541) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
-* Added an in-memory cache for deserialized vector similarity indexes. This should make repeated approximate nearest neighbor (ANN) search queries faster. The size of the new cache is controlled by server settings `vector_similarity_index_cache_size` and `vector_similarity_index_cache_max_entries`. This feature supersedes the skipping index cache feature of earlier releases. [#77905](https://github.com/ClickHouse/ClickHouse/pull/77905) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Support partition pruning in DeltaLake. [#78486](https://github.com/ClickHouse/ClickHouse/pull/78486) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Support for a background refresh in readonly `MergeTree` tables which allows querying updateable tables with an infinite amount of distributed readers (ClickHouse-native data lake). [#76467](https://github.com/ClickHouse/ClickHouse/pull/76467) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Support using custom disks to store databases metadata files. Currently it can be configured only on a global server level. [#77365](https://github.com/ClickHouse/ClickHouse/pull/77365) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Support ALTER TABLE ... ATTACH|DETACH|MOVE|REPLACE PARTITION for the plain_rewritable disk. [#77406](https://github.com/ClickHouse/ClickHouse/pull/77406) ([Julia Kartseva](https://github.com/jkartseva)).
-* Add table settings for `SASL` configuration and credentials to the `Kafka` table engine. This allows configuring SASL-based authentication to Kafka and Kafka-compatible systems directly in the CREATE TABLE statement rather than having to use configuration files or named collections. [#78810](https://github.com/ClickHouse/ClickHouse/pull/78810) ([Christoph Wurm](https://github.com/cwurm)).
-* Allow setting `default_compression_codec` for MergeTree tables: it is used when the CREATE query does not explicitly define one for the given columns. This closes [#42005](https://github.com/ClickHouse/ClickHouse/issues/42005). [#66394](https://github.com/ClickHouse/ClickHouse/pull/66394) ([gvoelfin](https://github.com/gvoelfin)).
-* Add `bind_host` setting in the clusters configuration so that ClickHouse can use a specific network for distributed connections. [#74741](https://github.com/ClickHouse/ClickHouse/pull/74741) ([Todd Yocum](https://github.com/toddyocum)).
-* Introduce a new column, `parametrized_view_parameters` in `system.tables`. Closes https://github.com/clickhouse/clickhouse/issues/66756. [#75112](https://github.com/ClickHouse/ClickHouse/pull/75112) ([NamNguyenHoai](https://github.com/NamHoaiNguyen)).
-* Allow changing a database comment. Closes [#73351](https://github.com/ClickHouse/ClickHouse/issues/73351) ### Documentation entry for user-facing changes. [#75622](https://github.com/ClickHouse/ClickHouse/pull/75622) ([NamNguyenHoai](https://github.com/NamHoaiNguyen)).
-* Support `SCRAM-SHA-256` authentication in the PostgreSQL compatibility protocol. [#76839](https://github.com/ClickHouse/ClickHouse/pull/76839) ([scanhex12](https://github.com/scanhex12)).
-* Add functions `arrayLevenshteinDistance`, `arrayLevenshteinDistanceWeighted`, and `arraySimilarity`. [#77187](https://github.com/ClickHouse/ClickHouse/pull/77187) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
-* The setting `parallel_distributed_insert_select` makes effect for `INSERT SELECT` into `ReplicatedMergeTree` (previously it required Distribued tables). [#78041](https://github.com/ClickHouse/ClickHouse/pull/78041) ([Igor Nikonov](https://github.com/devcrafter)).
-* Introduce `toInterval` function. This function accepts 2 arguments (value and unit), and converts the value to a specific `Interval` type. [#78723](https://github.com/ClickHouse/ClickHouse/pull/78723) ([Andrew Davis](https://github.com/pulpdrew)).
-* Add several convenient ways to resolve root `metadata.json` file in an iceberg table function and engine. Closes [#78455](https://github.com/ClickHouse/ClickHouse/issues/78455). [#78475](https://github.com/ClickHouse/ClickHouse/pull/78475) ([Daniil Ivanik](https://github.com/divanik)).
-* Support password based auth in SSH protocol in ClickHouse. [#78586](https://github.com/ClickHouse/ClickHouse/pull/78586) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-
-#### Experimental Feature
-* Support correlated subqueries as an argument of `EXISTS` expression in the `WHERE` clause. Closes [#72459](https://github.com/ClickHouse/ClickHouse/issues/72459). [#76078](https://github.com/ClickHouse/ClickHouse/pull/76078) ([Dmitry Novik](https://github.com/novikd)).
-* Functions `sparseGrams` and `sparseGramsHashes` with ASCII and UTF8 versions added. Author: [scanhex12](https://github.com/scanhex12). [#78176](https://github.com/ClickHouse/ClickHouse/pull/78176) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). Do not use it: the implementation will change in the next versions.
-
-#### Performance Improvement
-* Optimize performance with lazy columns, that read the data after ORDER BY and LIMIT. [#55518](https://github.com/ClickHouse/ClickHouse/pull/55518) ([Xiaozhe Yu](https://github.com/wudidapaopao)).
-* Enabled the query condition cache by default. [#79080](https://github.com/ClickHouse/ClickHouse/pull/79080) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Speed-up building JOIN result by de-virtualizing calls to `col->insertFrom()`. [#77350](https://github.com/ClickHouse/ClickHouse/pull/77350) ([Alexander Gololobov](https://github.com/davenger)).
-* Merge equality conditions from filter query plan step into JOIN condition if possible to allow using them as hash table keys. [#78877](https://github.com/ClickHouse/ClickHouse/pull/78877) ([Dmitry Novik](https://github.com/novikd)).
-* Use dynamic sharding for JOIN if the JOIN key is a prefix of PK for both parts. This optimization is enabled with `query_plan_join_shard_by_pk_ranges` setting (disabled by default). [#74733](https://github.com/ClickHouse/ClickHouse/pull/74733) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Support `Iceberg` data pruning based on lower and upper boundary values for columns. Fixes [#77638](https://github.com/ClickHouse/ClickHouse/issues/77638). [#78242](https://github.com/ClickHouse/ClickHouse/pull/78242) ([alesapin](https://github.com/alesapin)).
-* Implement trivial count optimization for `Iceberg`. Now queries with `count()` and without any filters should be faster. Closes [#77639](https://github.com/ClickHouse/ClickHouse/issues/77639). [#78090](https://github.com/ClickHouse/ClickHouse/pull/78090) ([alesapin](https://github.com/alesapin)).
-* Add ability to configure the number of columns that merges can flush in parallel using `max_merge_delayed_streams_for_parallel_write` (this should reduce memory usage for vertical merges to S3 about 25x times). [#77922](https://github.com/ClickHouse/ClickHouse/pull/77922) ([Azat Khuzhin](https://github.com/azat)).
-* Disable `filesystem_cache_prefer_bigger_buffer_size` when the cache is used passively, such as for merges. This lowers memory consumption on merges. [#77898](https://github.com/ClickHouse/ClickHouse/pull/77898) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Now we use number of replicas to determine task size for reading with parallel replicas enabled. This provides better work distribution between replicas when the amount of data to read is not really big. [#78695](https://github.com/ClickHouse/ClickHouse/pull/78695) ([Nikita Taranov](https://github.com/nickitat)).
-* Support asynchronous IO prefetch for the `ORC` format, which improves overall performance by hiding remote IO latency. [#70534](https://github.com/ClickHouse/ClickHouse/pull/70534) ([李扬](https://github.com/taiyang-li)).
-* Preallocate memory used by asynchronous inserts to improve performance. [#74945](https://github.com/ClickHouse/ClickHouse/pull/74945) ([Ilya Golshtein](https://github.com/ilejn)).
-* Decrease the amount of Keeper requests by eliminating the use of single `get` requests, which could have caused a significant load on Keeper with the increased number of replicas, in places where `multiRead` is available. [#56862](https://github.com/ClickHouse/ClickHouse/pull/56862) ([Nikolay Degterinsky](https://github.com/evillique)).
-* A marginal optimization for running functions on Nullable arguments. [#76489](https://github.com/ClickHouse/ClickHouse/pull/76489) ([李扬](https://github.com/taiyang-li)).
-* Optimize `arraySort`. [#76850](https://github.com/ClickHouse/ClickHouse/pull/76850) ([李扬](https://github.com/taiyang-li)).
-* Merge marks of the same part and write them to the query condition cache at one time to reduce the consumption of locks. [#77377](https://github.com/ClickHouse/ClickHouse/pull/77377) ([zhongyuankai](https://github.com/zhongyuankai)).
-* Optimize `s3Cluster` performance for queries with one bracket expansion. [#77686](https://github.com/ClickHouse/ClickHouse/pull/77686) ([Tomáš Hromada](https://github.com/gyfis)).
-* Optimize order by single Nullable or LowCardinality columns. [#77789](https://github.com/ClickHouse/ClickHouse/pull/77789) ([李扬](https://github.com/taiyang-li)).
-* Optimize memory usage of the `Native` format. [#78442](https://github.com/ClickHouse/ClickHouse/pull/78442) ([Azat Khuzhin](https://github.com/azat)).
-* Trivial optimization: do not rewrite `count(if(...))` to `countIf` if a type cast is required. Close [#78564](https://github.com/ClickHouse/ClickHouse/issues/78564). [#78565](https://github.com/ClickHouse/ClickHouse/pull/78565) ([李扬](https://github.com/taiyang-li)).
-* The `hasAll` function can now take advantage of the `tokenbf_v1`, `ngrambf_v1` full-text skipping indices. [#77662](https://github.com/ClickHouse/ClickHouse/pull/77662) ([UnamedRus](https://github.com/UnamedRus)).
-* Vector similarity index could over-allocate main memory by up to 2x. This fix reworks the memory allocation strategy, reducing the memory consumption and improving the effectiveness of the vector similarity index cache. (issue [#78056](https://github.com/ClickHouse/ClickHouse/issues/78056)). [#78394](https://github.com/ClickHouse/ClickHouse/pull/78394) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Introduce a setting `schema_type` for `system.metric_log` table with schema type. There are three allowed schemas: `wide` -- current schema, each metric/event in a separate column (most effective for reads of separate columns), `transposed` -- similar to `system.asynchronous_metric_log`, metrics/events are stored as rows, and the most interesting `transposed_with_wide_view` -- create underlying table with `transposed` schema, but also introduce a view with `wide` schema which translates queries to underlying table. In `transposed_with_wide_view` subsecond resolution for view is not supported, `event_time_microseconds` is just an alias for backward compatibility. [#78412](https://github.com/ClickHouse/ClickHouse/pull/78412) ([alesapin](https://github.com/alesapin)).
-
-#### Improvement
-* Serialize query plan for `Distributed` queries. A new setting `serialize_query_plan` is added. When enabled, queries from `Distributed` table will use a serialized query plan for remote query execution. This introduces a new packet type to TCP protocol, `true` should be added to the server config to allow processing this packet. [#69652](https://github.com/ClickHouse/ClickHouse/pull/69652) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Support `JSON` type and subcolumns reading from views. [#76903](https://github.com/ClickHouse/ClickHouse/pull/76903) ([Pavel Kruglov](https://github.com/Avogar)).
-* Support ALTER DATABASE ... ON CLUSTER. [#79242](https://github.com/ClickHouse/ClickHouse/pull/79242) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Refreshes of refreshable materialized views now appear in `system.query_log`. [#71333](https://github.com/ClickHouse/ClickHouse/pull/71333) ([Michael Kolupaev](https://github.com/al13n321)).
-* User-defined functions (UDFs) can now be marked as deterministic via a new setting in their configuration. Also, the query cache now checks if UDFs called within a query are deterministic. If this is the case, it caches the query result. (Issue [#59988](https://github.com/ClickHouse/ClickHouse/issues/59988)). [#77769](https://github.com/ClickHouse/ClickHouse/pull/77769) ([Jimmy Aguilar Mena](https://github.com/Ergus)).
-* Enabled a backoff logic for all types of replicated tasks. It will provide the ability to reduce CPU usage, memory usage, and log file sizes. Added new settings `max_postpone_time_for_failed_replicated_fetches_ms`, `max_postpone_time_for_failed_replicated_merges_ms` and `max_postpone_time_for_failed_replicated_tasks_ms` which are similar to `max_postpone_time_for_failed_mutations_ms`. [#74576](https://github.com/ClickHouse/ClickHouse/pull/74576) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
-* Add `query_id` to `system.errors`. Closes [#75815](https://github.com/ClickHouse/ClickHouse/issues/75815). [#76581](https://github.com/ClickHouse/ClickHouse/pull/76581) ([Vladimir Baikov](https://github.com/bkvvldmr)).
-* Adding support for converting `UInt128` to `IPv6`. This allows the `bitAnd` operation and arithmatics for `IPv6` and conversion back to `IPv6`. Closes [#76752](https://github.com/ClickHouse/ClickHouse/issues/76752). This allows the result from `bitAnd` operation on `IPv6` to be converted back to `IPv6`, as well. See also [#57707](https://github.com/ClickHouse/ClickHouse/pull/57707). [#76928](https://github.com/ClickHouse/ClickHouse/pull/76928) ([Muzammil Abdul Rehman](https://github.com/muzammilar)).
-* Don't parse special `Bool` values in text formats inside `Variant` type by default. It can be enabled using setting `allow_special_bool_values_inside_variant`. [#76974](https://github.com/ClickHouse/ClickHouse/pull/76974) ([Pavel Kruglov](https://github.com/Avogar)).
-* Support configurable per task waiting time of low `priority` query in session level and in server level. [#77013](https://github.com/ClickHouse/ClickHouse/pull/77013) ([VicoWu](https://github.com/VicoWu)).
-* Implement comparison for values of JSON data type. Now JSON objects can be compared similarly to Maps. [#77397](https://github.com/ClickHouse/ClickHouse/pull/77397) ([Pavel Kruglov](https://github.com/Avogar)).
-* Better permission support by `system.kafka_consumers`. Forward internal `librdkafka` errors (worth noting that this library is a crap). [#77700](https://github.com/ClickHouse/ClickHouse/pull/77700) ([Ilya Golshtein](https://github.com/ilejn)).
-* Added validation for the settings of the Buffer table engine. [#77840](https://github.com/ClickHouse/ClickHouse/pull/77840) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* Add config `enable_hdfs_pread` to enable or disable pread in `HDFS`. [#77885](https://github.com/ClickHouse/ClickHouse/pull/77885) ([kevinyhzou](https://github.com/KevinyhZou)).
-* Add profile events for number of zookeeper `multi` read and write requests. [#77888](https://github.com/ClickHouse/ClickHouse/pull/77888) ([JackyWoo](https://github.com/JackyWoo)).
-* Allow creating and inserting into temporary tables when `disable_insertion_and_mutation` is on. [#77901](https://github.com/ClickHouse/ClickHouse/pull/77901) ([Xu Jia](https://github.com/XuJia0210)).
-* Decrease `max_insert_delayed_streams_for_parallel_write` (to 100). [#77919](https://github.com/ClickHouse/ClickHouse/pull/77919) ([Azat Khuzhin](https://github.com/azat)).
-* Fix year parsing in Joda syntax (this is from the Java world if you're wondering) like `yyy`. [#77973](https://github.com/ClickHouse/ClickHouse/pull/77973) ([李扬](https://github.com/taiyang-li)).
-* Attaching parts of `MergeTree` tables will be performed in their block order, which is important for special merging algorithms, such as `ReplacingMergeTree`. This closes [#71009](https://github.com/ClickHouse/ClickHouse/issues/71009). [#77976](https://github.com/ClickHouse/ClickHouse/pull/77976) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Query masking rules are now able to throw a `LOGICAL_ERROR` in case if the match happened. This will help to check if pre-defined password is leaking anywhere in logs. [#78094](https://github.com/ClickHouse/ClickHouse/pull/78094) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Added column `index_length_column` to `information_schema.tables` for better compatibility with MySQL. [#78119](https://github.com/ClickHouse/ClickHouse/pull/78119) ([Paweł Zakrzewski](https://github.com/KrzaQ)).
-* Introduce two new metrics: `TotalMergeFailures` and `NonAbortedMergeFailures`. These metrics are needed to detect the cases where too many merges fail within a short period. [#78150](https://github.com/ClickHouse/ClickHouse/pull/78150) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Fix incorrect S3 URL parsing when key is not specified on path style. [#78185](https://github.com/ClickHouse/ClickHouse/pull/78185) ([Arthur Passos](https://github.com/arthurpassos)).
-* Fix incorrect values of `BlockActiveTime`, `BlockDiscardTime`, `BlockWriteTime`, `BlockQueueTime`, and `BlockReadTime` asynchronous metrics (before the change 1 second was incorrectly reported as 0.001). [#78211](https://github.com/ClickHouse/ClickHouse/pull/78211) ([filimonov](https://github.com/filimonov)).
-* Respect `loading_retries` limit for errors during push to materialized view for StorageS3(Azure)Queue. Before that such errors were retried indefinitely. [#78313](https://github.com/ClickHouse/ClickHouse/pull/78313) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* In DeltaLake with `delta-kernel-rs` implementation, fix performance and progress bar. [#78368](https://github.com/ClickHouse/ClickHouse/pull/78368) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Support `include`, `from_env`, `from_zk` for runtime disks. Closes [#78177](https://github.com/ClickHouse/ClickHouse/issues/78177). [#78470](https://github.com/ClickHouse/ClickHouse/pull/78470) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Add a dynamic warning to the `system.warnings` table for long running mutations. [#78658](https://github.com/ClickHouse/ClickHouse/pull/78658) ([Bharat Nallan](https://github.com/bharatnc)).
-* Added field `condition` to system table `system.query_condition_cache`. It stores the plaintext condition whose hash is used as a key in the query condition cache. [#78671](https://github.com/ClickHouse/ClickHouse/pull/78671) ([Robert Schulze](https://github.com/rschu1ze)).
-* Allow an empty value for Hive partitioning. [#78816](https://github.com/ClickHouse/ClickHouse/pull/78816) ([Arthur Passos](https://github.com/arthurpassos)).
-* Fix `IN` clause type coercion for `BFloat16` (i.e. `SELECT toBFloat16(1) IN [1, 2, 3];` now returns `1`). Closes [#78754](https://github.com/ClickHouse/ClickHouse/issues/78754). [#78839](https://github.com/ClickHouse/ClickHouse/pull/78839) ([Raufs Dunamalijevs](https://github.com/rienath)).
-* Do not check parts on other disks for `MergeTree` if `disk = ...` is set. [#78855](https://github.com/ClickHouse/ClickHouse/pull/78855) ([Azat Khuzhin](https://github.com/azat)).
-* Make data types in `used_data_type_families` in `system.query_log` to be recorded with canonical names. [#78972](https://github.com/ClickHouse/ClickHouse/pull/78972) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Cleanup settings during `recoverLostReplica` same as it was done in: [#78637](https://github.com/ClickHouse/ClickHouse/pull/78637). [#79113](https://github.com/ClickHouse/ClickHouse/pull/79113) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Use insertion columns for INFILE schema inference. [#78490](https://github.com/ClickHouse/ClickHouse/pull/78490) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-
-#### Bug Fix (user-visible misbehavior in an official stable release)
-* Fix incorrect projection analysis when `count(Nullable)` is used in aggregate projections. This fixes [#74495](https://github.com/ClickHouse/ClickHouse/issues/74495) . This PR also adds some logs around projection analysis to clarify why a projection is used or why not. [#74498](https://github.com/ClickHouse/ClickHouse/pull/74498) ([Amos Bird](https://github.com/amosbird)).
-* Fix `Part <...> does not contain in snapshot of previous virtual parts. (PART_IS_TEMPORARILY_LOCKED)` during `DETACH PART`. [#76039](https://github.com/ClickHouse/ClickHouse/pull/76039) ([Aleksei Filatov](https://github.com/aalexfvk)).
-* Fix not working skip indexes with expression with literals in analyzer and remove trivial casts during indexes analysis. [#77229](https://github.com/ClickHouse/ClickHouse/pull/77229) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix a bug when `close_session` query parameter didn't have any effect leading to named sessions being closed only after `session_timeout`. [#77336](https://github.com/ClickHouse/ClickHouse/pull/77336) ([Alexey Katsman](https://github.com/alexkats)).
-* Fixed receiving messages from NATS server without attached Materialized Views. [#77392](https://github.com/ClickHouse/ClickHouse/pull/77392) ([Dmitry Novikov](https://github.com/dmitry-sles-novikov)).
-* Fix logical error while reading from empty `FileLog` via `merge` table function, close [#75575](https://github.com/ClickHouse/ClickHouse/issues/75575). [#77441](https://github.com/ClickHouse/ClickHouse/pull/77441) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Use default format settings in `Dynamic` serialization from shared variant. [#77572](https://github.com/ClickHouse/ClickHouse/pull/77572) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix checking if the table data path exists on the local disk. [#77608](https://github.com/ClickHouse/ClickHouse/pull/77608) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Fix sending constant values to remote for some types. [#77634](https://github.com/ClickHouse/ClickHouse/pull/77634) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix a crash because of expired context in S3/AzureQueue. [#77720](https://github.com/ClickHouse/ClickHouse/pull/77720) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Hide credentials in RabbitMQ, Nats, Redis, AzureQueue table engines. [#77755](https://github.com/ClickHouse/ClickHouse/pull/77755) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix undefined behaviour on `NaN` comparison in `argMin`/`argMax`. [#77756](https://github.com/ClickHouse/ClickHouse/pull/77756) ([Raúl Marín](https://github.com/Algunenano)).
-* Regularly check if merges and mutations were cancelled even in case when the operation doesn't produce any blocks to write. [#77766](https://github.com/ClickHouse/ClickHouse/pull/77766) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Fixed refreshable materialized view in Replicated database not working on newly added replicas. [#77774](https://github.com/ClickHouse/ClickHouse/pull/77774) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix possible crash when `NOT_FOUND_COLUMN_IN_BLOCK` error occurs. [#77854](https://github.com/ClickHouse/ClickHouse/pull/77854) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Fix crash that happens in the S3/AzureQueue while filling data. [#77878](https://github.com/ClickHouse/ClickHouse/pull/77878) ([Bharat Nallan](https://github.com/bharatnc)).
-* Disable fuzzy search for history in SSH server (since it requires the skim library). [#78002](https://github.com/ClickHouse/ClickHouse/pull/78002) ([Azat Khuzhin](https://github.com/azat)).
-* Fixes a bug that a vector search query on a non-indexed column was returning incorrect results if there was another vector column in the table with a defined vector similarity index. (Issue [#77978](https://github.com/ClickHouse/ClickHouse/issues/77978)). [#78069](https://github.com/ClickHouse/ClickHouse/pull/78069) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Fix a minuscule error "The requested output format {} is binary... Do you want to output it anyway? [y/N]" prompt. [#78095](https://github.com/ClickHouse/ClickHouse/pull/78095) ([Azat Khuzhin](https://github.com/azat)).
-* Fix of a bug in case of `toStartOfInterval` with zero origin argument. [#78096](https://github.com/ClickHouse/ClickHouse/pull/78096) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Disallow specifying an empty `session_id` query parameter for HTTP interface. [#78098](https://github.com/ClickHouse/ClickHouse/pull/78098) ([Alexey Katsman](https://github.com/alexkats)).
-* Fix metadata override in `Replicated` database which could have happened due to a `RENAME` query executed right after an `ALTER` query. [#78107](https://github.com/ClickHouse/ClickHouse/pull/78107) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Fix crash in `NATS` engine. [#78108](https://github.com/ClickHouse/ClickHouse/pull/78108) ([Dmitry Novikov](https://github.com/dmitry-sles-novikov)).
-* Do not try to create history_file in embedded client for SSH (in previous versions the creation was always unsuccessful, but attempted). [#78112](https://github.com/ClickHouse/ClickHouse/pull/78112) ([Azat Khuzhin](https://github.com/azat)).
-* Fix `system.detached_tables` displaying incorrect information after `RENAME DATABASE` or `DROP TABLE` queries. [#78126](https://github.com/ClickHouse/ClickHouse/pull/78126) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Fix for checks for too many tables with `Replicated` database after [#77274](https://github.com/ClickHouse/ClickHouse/pull/77274). Also, perform the check before creating the storage to avoid creating unaccounted nodes in Keeper in the case of `ReplicatedMergeTree` or `KeeperMap`. [#78127](https://github.com/ClickHouse/ClickHouse/pull/78127) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Fix possible crash due to concurrent `S3Queue` metadata initialization. [#78131](https://github.com/ClickHouse/ClickHouse/pull/78131) ([Azat Khuzhin](https://github.com/azat)).
-* `groupArray*` functions now produce `BAD_ARGUMENTS` error for Int-typed 0 value of the `max_size` argument, like it's already done for UInt one, instead of trying to execute with it. [#78140](https://github.com/ClickHouse/ClickHouse/pull/78140) ([Eduard Karacharov](https://github.com/korowa)).
-* Prevent crash on recovering a lost replica if the local table is removed before it's detached. [#78173](https://github.com/ClickHouse/ClickHouse/pull/78173) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix the fact that "alterable" column in `system.s3_queue_settings` returning always `false`. [#78187](https://github.com/ClickHouse/ClickHouse/pull/78187) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Mask Azure access signature to be not visible to user or in logs. [#78189](https://github.com/ClickHouse/ClickHouse/pull/78189) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix prefetching of substreams with prefixes in Wide parts. [#78205](https://github.com/ClickHouse/ClickHouse/pull/78205) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fixed crashes / incorrect result for `mapFromArrays` in case of `LowCardinality(Nullable)` type of keys array. [#78240](https://github.com/ClickHouse/ClickHouse/pull/78240) ([Eduard Karacharov](https://github.com/korowa)).
-* Fix delta-kernel-rs auth options. [#78255](https://github.com/ClickHouse/ClickHouse/pull/78255) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Not schedule Refreshable Materialized Views task if a replica's `disable_insertion_and_mutation` is true. A task is some insertion, it will failed if `disable_insertion_and_mutation` is true. [#78277](https://github.com/ClickHouse/ClickHouse/pull/78277) ([Xu Jia](https://github.com/XuJia0210)).
-* Validate access to underlying tables for the `Merge` engine. [#78339](https://github.com/ClickHouse/ClickHouse/pull/78339) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)).
-* `FINAL` modifier can be ignored when querying a `Distributed` table. [#78428](https://github.com/ClickHouse/ClickHouse/pull/78428) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* `bitmapMin` returns the uint32_max when the bitmap is empty (and uint64_max when the input type is larger), which matches the behavior of empty roaring_bitmap's minimum. [#78444](https://github.com/ClickHouse/ClickHouse/pull/78444) ([wxybear](https://github.com/wxybear)).
-* Disable parallelization of query processing right after reading FROM when `distributed_aggregation_memory_efficient` enabled, it may lead to logical error. Closes [#76934](https://github.com/ClickHouse/ClickHouse/issues/76934). [#78500](https://github.com/ClickHouse/ClickHouse/pull/78500) ([flynn](https://github.com/ucasfl)).
-* Set at least one stream for reading in case there are zero planned streams after applying `max_streams_to_max_threads_ratio` setting. [#78505](https://github.com/ClickHouse/ClickHouse/pull/78505) ([Eduard Karacharov](https://github.com/korowa)).
-* In storage `S3Queue` fix logical error "Cannot unregister: table uuid is not registered". Closes [#78285](https://github.com/ClickHouse/ClickHouse/issues/78285). [#78541](https://github.com/ClickHouse/ClickHouse/pull/78541) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* ClickHouse is now able to figure out its cgroup v2 on systems with both cgroups v1 and v2 enabled. [#78566](https://github.com/ClickHouse/ClickHouse/pull/78566) ([Grigory Korolev](https://github.com/gkorolev)).
-* `-Cluster` table functions were failing when used with table-level settings. [#78587](https://github.com/ClickHouse/ClickHouse/pull/78587) ([Daniil Ivanik](https://github.com/divanik)).
-* Better checks when transactions are not supported by ReplicatedMergeTree on INSERT. [#78633](https://github.com/ClickHouse/ClickHouse/pull/78633) ([Azat Khuzhin](https://github.com/azat)).
-* Cleanup query settings during attach. [#78637](https://github.com/ClickHouse/ClickHouse/pull/78637) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix a crash when an invalid path was specified in `iceberg_metadata_file_path`. [#78688](https://github.com/ClickHouse/ClickHouse/pull/78688) ([alesapin](https://github.com/alesapin)).
-* In `DeltaLake` table engine with delta-kernel-s implementation, fix the case when the read schema is different from the table schema and there are partition columns at the same time leading to a "not found column" error. [#78690](https://github.com/ClickHouse/ClickHouse/pull/78690) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix a problem when after scheduling to close a named session (but before timeout expiration), creation of a new named session with the same name led to it being closed at a time point when the first session was scheduled to close. [#78698](https://github.com/ClickHouse/ClickHouse/pull/78698) ([Alexey Katsman](https://github.com/alexkats)).
-* Fixed several types of `SELECT` queries that read from tables with `MongoDB` engine or `mongodb` table function: queries with implicit conversion of const value in `WHERE` clause (e.g. `WHERE datetime = '2025-03-10 00:00:00'`) ; queries with `LIMIT` and `GROUP BY`. Previously, they could return the wrong result. [#78777](https://github.com/ClickHouse/ClickHouse/pull/78777) ([Anton Popov](https://github.com/CurtizJ)).
-* Don't block table shutdown while running `CHECK TABLE`. [#78782](https://github.com/ClickHouse/ClickHouse/pull/78782) ([Raúl Marín](https://github.com/Algunenano)).
-* Keeper fix: fix ephemeral count in all cases. [#78799](https://github.com/ClickHouse/ClickHouse/pull/78799) ([Antonio Andelic](https://github.com/antonio2368)).
-* Fix bad cast in `StorageDistributed` when using table functions other than `view`. Closes [#78464](https://github.com/ClickHouse/ClickHouse/issues/78464). [#78828](https://github.com/ClickHouse/ClickHouse/pull/78828) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fix the consistency of formatting for `tupleElement(*, 1)`. Closes [#78639](https://github.com/ClickHouse/ClickHouse/issues/78639). [#78832](https://github.com/ClickHouse/ClickHouse/pull/78832) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Dictionaries of type `ssd_cache` now reject zero or negative `block_size` and `write_buffer_size` parameters (issue [#78314](https://github.com/ClickHouse/ClickHouse/issues/78314)). [#78854](https://github.com/ClickHouse/ClickHouse/pull/78854) ([Elmi Ahmadov](https://github.com/ahmadov)).
-* Fix crash in Refreshable MATERIALIZED VIEW inthe case of ALTER after an incorrect shutdown. [#78858](https://github.com/ClickHouse/ClickHouse/pull/78858) ([Azat Khuzhin](https://github.com/azat)).
-* Fix parsing of bad `DateTime` values in `CSV` format. [#78919](https://github.com/ClickHouse/ClickHouse/pull/78919) ([Pavel Kruglov](https://github.com/Avogar)).
-* Keeper fix: Avoid triggering watches on failed multi requests. [#79247](https://github.com/ClickHouse/ClickHouse/pull/79247) ([Antonio Andelic](https://github.com/antonio2368)).
-* Fix reading Iceberg table failed when min-max value is specified explicitly but is `NULL`. The Go Iceberg library was noted for generating such an atrocious files. Closes [#78740](https://github.com/ClickHouse/ClickHouse/issues/78740). [#78764](https://github.com/ClickHouse/ClickHouse/pull/78764) ([flynn](https://github.com/ucasfl)).
-
-#### Build/Testing/Packaging Improvement
-* Respect CPU target features in Rust and enable LTO in all crates. [#78590](https://github.com/ClickHouse/ClickHouse/pull/78590) ([Raúl Marín](https://github.com/Algunenano)).
-
-
-### ClickHouse release 25.3 LTS, 2025-03-20 {#253}
-
-#### Backward Incompatible Change
-* Disallow truncating replicated databases. [#76651](https://github.com/ClickHouse/ClickHouse/pull/76651) ([Bharat Nallan](https://github.com/bharatnc)).
-* Skipping index cache is reverted. [#77447](https://github.com/ClickHouse/ClickHouse/pull/77447) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-
-#### New Feature
-* `JSON` data type is production-ready. See https://jsonbench.com/. `Dynamic` and `Variant` data types are production-ready. [#77785](https://github.com/ClickHouse/ClickHouse/pull/77785) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Introduce the SSH protocol for clickhouse-server. Now, it is possible to connect to ClickHouse using any SSH client. This closes: [#74340](https://github.com/ClickHouse/ClickHouse/issues/74340). [#74989](https://github.com/ClickHouse/ClickHouse/pull/74989) ([George Gamezardashvili](https://github.com/Infjoker)).
-* Replace table functions with their -Cluster alternatives if parallel replicas are enabled. Fixes [#65024](https://github.com/ClickHouse/ClickHouse/issues/65024). [#70659](https://github.com/ClickHouse/ClickHouse/pull/70659) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* A new implementation of the Userspace Page Cache, which allows caching data in the in-process memory instead of relying on the OS page cache, which is useful when the data is stored on a remote virtual filesystem without backing with the local filesystem cache. [#70509](https://github.com/ClickHouse/ClickHouse/pull/70509) ([Michael Kolupaev](https://github.com/al13n321)).
-* Added `concurrent_threads_scheduler` server setting that governs how CPU slots are distributed among concurrent queries. Could be set to `round_robin` (previous behavior) or `fair_round_robin` to address the issue of unfair CPU distribution between INSERTs and SELECTs. [#75949](https://github.com/ClickHouse/ClickHouse/pull/75949) ([Sergei Trifonov](https://github.com/serxa)).
-* Add `estimateCompressionRatio` aggregate function [#70801](https://github.com/ClickHouse/ClickHouse/issues/70801). [#76661](https://github.com/ClickHouse/ClickHouse/pull/76661) ([Tariq Almawash](https://github.com/talmawash)).
-* Added function `arraySymmetricDifference`. It returns all elements from multiple array arguments which do not occur in all arguments. Example: `SELECT arraySymmetricDifference([1, 2], [2, 3])` returns `[1, 3]`. (issue [#61673](https://github.com/ClickHouse/ClickHouse/issues/61673)). [#76231](https://github.com/ClickHouse/ClickHouse/pull/76231) ([Filipp Abapolov](https://github.com/pheepa)).
-* Allow to explicitly specify metadata file to read for Iceberg with storage/table function setting `iceberg_metadata_file_path `. Fixes [#47412](https://github.com/ClickHouse/ClickHouse/issues/47412). [#77318](https://github.com/ClickHouse/ClickHouse/pull/77318) ([alesapin](https://github.com/alesapin)).
-* Added the `keccak256` hash function, commonly used in blockchain implementations, especially in EVM-based systems. [#76669](https://github.com/ClickHouse/ClickHouse/pull/76669) ([Arnaud Briche](https://github.com/arnaudbriche)).
-* Add three new functions. `icebergTruncate` according to specification. https://iceberg.apache.org/spec/#truncate-transform-details, `toYearNumSinceEpoch` and `toMonthNumSinceEpoch`. Support `truncate` transform in partition pruning for `Iceberg` engine. [#77403](https://github.com/ClickHouse/ClickHouse/pull/77403) ([alesapin](https://github.com/alesapin)).
-* Support `LowCardinality(Decimal)` data types [#72256](https://github.com/ClickHouse/ClickHouse/issues/72256). [#72833](https://github.com/ClickHouse/ClickHouse/pull/72833) ([zhanglistar](https://github.com/zhanglistar)).
-* `FilterTransformPassedRows` and `FilterTransformPassedBytes` profile events will show the number of rows and number of bytes filtered during the query execution. [#76662](https://github.com/ClickHouse/ClickHouse/pull/76662) ([Onkar Deshpande](https://github.com/onkar)).
-* Support for the histogram metric type. The interface closely mirrors the Prometheus client, where you simply call `observe(value)` to increment the counter in the bucket corresponding to the value. The histogram metrics are exposed via `system.histogram_metrics`. [#75736](https://github.com/ClickHouse/ClickHouse/pull/75736) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Non-constant CASE support for switching on explicit values. [#77399](https://github.com/ClickHouse/ClickHouse/pull/77399) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-
-#### Experimental Feature
-* Add support [for Unity Catalog](https://www.databricks.com/product/unity-catalog) for DeltaLake tables on top of AWS S3 and local filesystem. [#76988](https://github.com/ClickHouse/ClickHouse/pull/76988) ([alesapin](https://github.com/alesapin)).
-* Introduce experimental integration with AWS Glue service catalog for Iceberg tables. [#77257](https://github.com/ClickHouse/ClickHouse/pull/77257) ([alesapin](https://github.com/alesapin)).
-* Added support for dynamic cluster autodiscovery. This extends the existing _node_ autodiscovery feature. ClickHouse can now automatically detect and register new _clusters_ under a common ZooKeeper path using ``. [#76001](https://github.com/ClickHouse/ClickHouse/pull/76001) ([Anton Ivashkin](https://github.com/ianton-ru)).
-* Allow automatic cleanup merges of entire partitions after a configurable timeout with a new setting `enable_replacing_merge_with_cleanup_for_min_age_to_force_merge`. [#76440](https://github.com/ClickHouse/ClickHouse/pull/76440) ([Christoph Wurm](https://github.com/cwurm)).
-
-#### Performance Improvement
-* Implement query condition cache to improve query performance using repeated conditions. The range of the portion of data that does not meet the condition is remembered as a temporary index in memory. Subsequent queries will use this index. Close [#67768](https://github.com/ClickHouse/ClickHouse/issues/67768) [#69236](https://github.com/ClickHouse/ClickHouse/pull/69236) ([zhongyuankai](https://github.com/zhongyuankai)).
-* Actively evict data from the cache on parts removal. Do not let the cache grow to the maximum size if the amount of data is less. [#76641](https://github.com/ClickHouse/ClickHouse/pull/76641) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Replace Int256 and UInt256 with clang builtin i256 in arithmetic calculation, and it gives a performance improvement [#70502](https://github.com/ClickHouse/ClickHouse/issues/70502). [#73658](https://github.com/ClickHouse/ClickHouse/pull/73658) ([李扬](https://github.com/taiyang-li)).
-* In some cases (e.g. empty array column) data parts can contain empty files. We can skip writing empty blobs to ObjectStorage and only store metadata for such files when the table resides on disk with separated metadata and object storages. [#75860](https://github.com/ClickHouse/ClickHouse/pull/75860) ([Alexander Gololobov](https://github.com/davenger)).
-* Improve min/max performance for Decimal32/Decimal64/DateTime64. [#76570](https://github.com/ClickHouse/ClickHouse/pull/76570) ([李扬](https://github.com/taiyang-li)).
-* Query compilation (setting `compile_expressions`) now considers the machine type. This speeds up such queries significantly. [#76753](https://github.com/ClickHouse/ClickHouse/pull/76753) ([ZhangLiStar](https://github.com/zhanglistar)).
-* Optimize `arraySort`. [#76850](https://github.com/ClickHouse/ClickHouse/pull/76850) ([李扬](https://github.com/taiyang-li)).
-* Disable `filesystem_cache_prefer_bigger_buffer_size` when the cache is used passively, such as for merges. [#77898](https://github.com/ClickHouse/ClickHouse/pull/77898) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Apply `preserve_most` attribute at some places in code, which allows slightly better code generation. [#67778](https://github.com/ClickHouse/ClickHouse/pull/67778) ([Nikita Taranov](https://github.com/nickitat)).
-* Faster ClickHouse servers shutdown (get rid of 2.5sec delay). [#76550](https://github.com/ClickHouse/ClickHouse/pull/76550) ([Azat Khuzhin](https://github.com/azat)).
-* Avoid excess allocation in ReadBufferFromS3 and other remote reading buffers, reduce their memory consumption in half. [#76692](https://github.com/ClickHouse/ClickHouse/pull/76692) ([Sema Checherinda](https://github.com/CheSema)).
-* Update zstd from 1.5.5 to 1.5.7 which could lead to some [performance improvements](https://github.com/facebook/zstd/releases/tag/v1.5.7). [#77137](https://github.com/ClickHouse/ClickHouse/pull/77137) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
-* Reduce memory usage during prefetches of JSON column in Wide parts. This is relevant when ClickHouse is used on top of a shared storage, such as in ClickHouse Cloud. [#77640](https://github.com/ClickHouse/ClickHouse/pull/77640) ([Pavel Kruglov](https://github.com/Avogar)).
-
-#### Improvement
-* Support atomic rename when `TRUNCATE` is used with `INTO OUTFILE`. Resolves [#70323](https://github.com/ClickHouse/ClickHouse/issues/70323). [#77181](https://github.com/ClickHouse/ClickHouse/pull/77181) ([Onkar Deshpande](https://github.com/onkar)).
-* It's no longer possible to use `NaN` or `inf` for float values as settings. Not like it did make any sense before. [#77546](https://github.com/ClickHouse/ClickHouse/pull/77546) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Disable parallel replicas by default when analyzer is disabled regardless `compatibility` setting. It's still possible to change this behavior by explicitly setting `parallel_replicas_only_with_analyzer` to `false`. [#77115](https://github.com/ClickHouse/ClickHouse/pull/77115) ([Igor Nikonov](https://github.com/devcrafter)).
-* Add the ability to define a list of headers that are forwarded from the headers of the client request to the external HTTP authenticator. [#77054](https://github.com/ClickHouse/ClickHouse/pull/77054) ([inv2004](https://github.com/inv2004)).
-* Respect column insensitive column matching for fields in tuple columns. Close https://github.com/apache/incubator-gluten/issues/8324. [#73780](https://github.com/ClickHouse/ClickHouse/pull/73780) ([李扬](https://github.com/taiyang-li)).
-* Parameters for the codec Gorilla will now always be saved in the table metadata in .sql file. This closes: [#70072](https://github.com/ClickHouse/ClickHouse/issues/70072). [#74814](https://github.com/ClickHouse/ClickHouse/pull/74814) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Implemented parsing enhancements for certain data lakes (Sequence ID parsing: Added functionality to parse sequence identifiers in manifest files AND Avro metadata parsing: Redesigned the Avro metadata parser to be easily extendable for future enhancements). [#75010](https://github.com/ClickHouse/ClickHouse/pull/75010) ([Daniil Ivanik](https://github.com/divanik)).
-* Remove trace_id from default ORDER BY for `system.opentelemetry_span_log`. [#75907](https://github.com/ClickHouse/ClickHouse/pull/75907) ([Azat Khuzhin](https://github.com/azat)).
-* Encryption (the attribute `encrypted_by`) can now be applied to any configuration file (config.xml, users.xml, nested configuration files). Previously, it worked only for the top-level config.xml file. [#75911](https://github.com/ClickHouse/ClickHouse/pull/75911) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
-* Improve the `system.warnings` table and add some dynamic warning messages that can be added, updated or removed. [#76029](https://github.com/ClickHouse/ClickHouse/pull/76029) ([Bharat Nallan](https://github.com/bharatnc)).
-* This PR makes it impossible to run a query `ALTER USER user1 ADD PROFILES a, DROP ALL PROFILES` because all `DROP` operations should come first in the order. [#76242](https://github.com/ClickHouse/ClickHouse/pull/76242) ([pufit](https://github.com/pufit)).
-* Various enhancements for SYNC REPLICA (better error messages, better tests, sanity checks). [#76307](https://github.com/ClickHouse/ClickHouse/pull/76307) ([Azat Khuzhin](https://github.com/azat)).
-* Use correct fallback when multipart copy to S3 fails during backup with Access Denied. Multi part copy can generate Access Denied error when backup is done between buckets that have different credentials. [#76515](https://github.com/ClickHouse/ClickHouse/pull/76515) ([Antonio Andelic](https://github.com/antonio2368)).
-* Upgraded librdkafka (which is a pile of crap) to version 2.8.0 (the pile does not get any better) and improved the shutdown sequence for Kafka tables, reducing delays during table drops and server restarts. The `engine=Kafka` no longer explicitly leaves the consumer group when a table is dropped. Instead, the consumer remains in the group until it is automatically removed after `session_timeout_ms` (default: 45 seconds) of inactivity. [#76621](https://github.com/ClickHouse/ClickHouse/pull/76621) ([filimonov](https://github.com/filimonov)).
-* Fix validation of S3 request settings. [#76658](https://github.com/ClickHouse/ClickHouse/pull/76658) ([Vitaly Baranov](https://github.com/vitlibar)).
-* System tables like `server_settings` or `settings` have a `default` value column which is convenient. Add them to `merge_tree_settings` and `replicated_merge_tree_settings`. [#76942](https://github.com/ClickHouse/ClickHouse/pull/76942) ([Diego Nieto](https://github.com/lesandie)).
-* Added `ProfileEvents::QueryPreempted`, which has a similar logic to `CurrentMetrics::QueryPreempted`. [#77015](https://github.com/ClickHouse/ClickHouse/pull/77015) ([VicoWu](https://github.com/VicoWu)).
-* Previously, a Replicated database could print credentials specified in a query to logs. This behaviour is fixed. This closes: [#77123](https://github.com/ClickHouse/ClickHouse/issues/77123). [#77133](https://github.com/ClickHouse/ClickHouse/pull/77133) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Allow ALTER TABLE DROP PARTITION for `plain_rewritable disk`. [#77138](https://github.com/ClickHouse/ClickHouse/pull/77138) ([Julia Kartseva](https://github.com/jkartseva)).
-* Backup/restore setting `allow_s3_native_copy` now supports value three possible values: - `False` - s3 native copy will not be used; - `True` (old default) - ClickHouse will try s3 native copy first, if it fails then fallback to the reading+writing approach; - `'auto'` (new default) - ClickHouse will compare the source and destination credentials first. If they are same, ClickHouse will try s3 native copy and then may fallback to the reading+writing approach. If they are different, ClickHouse will go directly to the reading+writing approach. [#77401](https://github.com/ClickHouse/ClickHouse/pull/77401) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Support aws session token and environment credentials usage in delta kernel for DeltaLake table engine. [#77661](https://github.com/ClickHouse/ClickHouse/pull/77661) ([Kseniia Sumarokova](https://github.com/kssenii)).
-
-#### Bug Fix (user-visible misbehavior in an official stable release)
-* Fix stuck while processing pending batch for async distributed INSERT (due to i.e. `No such file or directory`). [#72939](https://github.com/ClickHouse/ClickHouse/pull/72939) ([Azat Khuzhin](https://github.com/azat)).
-* Improved datetime conversion during index analysis by enforcing saturating behavior for implicit Date to DateTime conversions. This resolves potential index analysis inaccuracies caused by datetime range limitations. This fixes [#73307](https://github.com/ClickHouse/ClickHouse/issues/73307). It also fixes explicit `toDateTime` conversion when `date_time_overflow_behavior = 'ignore'` which is the default value. [#73326](https://github.com/ClickHouse/ClickHouse/pull/73326) ([Amos Bird](https://github.com/amosbird)).
-* Fix all sort of bugs due to race between UUID and table names (for instance it will fix the race between `RENAME` and `RESTART REPLICA`, in case of concurrent `RENAME` with `SYSTEM RESTART REPLICA` you may get end up restarting wrong replica, or/and leaving one of the tables in a `Table X is being restarted` state). [#76308](https://github.com/ClickHouse/ClickHouse/pull/76308) ([Azat Khuzhin](https://github.com/azat)).
-* Fix data loss when enable async insert and insert into ... from file ... with unequal block size if the first block size < async_max_size but the second block > async_max_size, the second block will not be inserted. these data is left in `squashing`. [#76343](https://github.com/ClickHouse/ClickHouse/pull/76343) ([Han Fei](https://github.com/hanfei1991)).
-* Renamed field 'marks' to 'marks_bytes' in `system.data_skipping_indices`. [#76374](https://github.com/ClickHouse/ClickHouse/pull/76374) ([Robert Schulze](https://github.com/rschu1ze)).
-* Fix dynamic filesystem cache resize handling unexpected errors during eviction. [#76466](https://github.com/ClickHouse/ClickHouse/pull/76466) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fixed `used_flag` initialization in parallel hash. It might cause a server crash. [#76580](https://github.com/ClickHouse/ClickHouse/pull/76580) ([Nikita Taranov](https://github.com/nickitat)).
-* Fix a logical error when calling `defaultProfiles` function inside a projection. [#76627](https://github.com/ClickHouse/ClickHouse/pull/76627) ([pufit](https://github.com/pufit)).
-* Do not request interactive basic auth in the browser in Web UI. Closes [#76319](https://github.com/ClickHouse/ClickHouse/issues/76319). [#76637](https://github.com/ClickHouse/ClickHouse/pull/76637) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix THERE_IS_NO_COLUMN exception when selecting boolean literal from distributed tables. [#76656](https://github.com/ClickHouse/ClickHouse/pull/76656) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* The subpath inside the table directory is chosen in a more profound way. [#76681](https://github.com/ClickHouse/ClickHouse/pull/76681) ([Daniil Ivanik](https://github.com/divanik)).
-* Fix an error `Not found column in block` after altering a table with a subcolumn in PK. After https://github.com/ClickHouse/ClickHouse/pull/72644, requires https://github.com/ClickHouse/ClickHouse/pull/74403. [#76686](https://github.com/ClickHouse/ClickHouse/pull/76686) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Add performance tests for null shortcircuit and fix bugs. [#76708](https://github.com/ClickHouse/ClickHouse/pull/76708) ([李扬](https://github.com/taiyang-li)).
-* Flush output write buffers before finalizing them. Fix `LOGICAL_ERROR` generated during the finalization of some output format, e.g. `JSONEachRowWithProgressRowOutputFormat`. [#76726](https://github.com/ClickHouse/ClickHouse/pull/76726) ([Antonio Andelic](https://github.com/antonio2368)).
-* Added support for MongoDB's binary UUID ([#74452](https://github.com/ClickHouse/ClickHouse/issues/74452)) - Fixed WHERE pushdown to MongoDB when using the table function ([#72210](https://github.com/ClickHouse/ClickHouse/issues/72210)) - Changed the MongoDB - ClickHouse type mapping such that MongoDB's binary UUID can only be parsed to ClickHouse's UUID. This should avoid ambiguities and surprises in future. - Fixed OID mapping, preserving backward compatibility. [#76762](https://github.com/ClickHouse/ClickHouse/pull/76762) ([Kirill Nikiforov](https://github.com/allmazz)).
-* Fix exception handling in parallel prefixes deserialization of JSON subcolumns. [#76809](https://github.com/ClickHouse/ClickHouse/pull/76809) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix lgamma function behavior for negative integers. [#76840](https://github.com/ClickHouse/ClickHouse/pull/76840) ([Ilya Kataev](https://github.com/IlyaKataev)).
-* Fix reverse key analysis for explicitly defined primary keys. Similar to [#76654](https://github.com/ClickHouse/ClickHouse/issues/76654). [#76846](https://github.com/ClickHouse/ClickHouse/pull/76846) ([Amos Bird](https://github.com/amosbird)).
-* Fix pretty print of Bool values in JSON format. [#76905](https://github.com/ClickHouse/ClickHouse/pull/76905) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix possible crash because of bad JSON column rollback on error during async inserts. [#76908](https://github.com/ClickHouse/ClickHouse/pull/76908) ([Pavel Kruglov](https://github.com/Avogar)).
-* Previously, `multiIf` may return different types of columns during planning and main execution. This resulted in code producing undefined behavior from the C++ perspective. [#76914](https://github.com/ClickHouse/ClickHouse/pull/76914) ([Nikita Taranov](https://github.com/nickitat)).
-* Fixed incorrect serialization of constant nullable keys in MergeTree. This fixes [#76939](https://github.com/ClickHouse/ClickHouse/issues/76939). [#76985](https://github.com/ClickHouse/ClickHouse/pull/76985) ([Amos Bird](https://github.com/amosbird)).
-* Fix sorting of `BFloat16` values. This closes [#75487](https://github.com/ClickHouse/ClickHouse/issues/75487). This closes [#75669](https://github.com/ClickHouse/ClickHouse/issues/75669). [#77000](https://github.com/ClickHouse/ClickHouse/pull/77000) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Bug fix JSON with Variant subcolumn by adding check to skip ephemeral subcolumns in part consistency check. [#72187](https://github.com/ClickHouse/ClickHouse/issues/72187). [#77034](https://github.com/ClickHouse/ClickHouse/pull/77034) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
-* Fix crash in template parsing in Values format in case of types mismatch. [#77071](https://github.com/ClickHouse/ClickHouse/pull/77071) ([Pavel Kruglov](https://github.com/Avogar)).
-* Don't allow creating EmbeddedRocksDB table with subcolumn in primary key. Previosly such table could be created but select queries were failing. [#77074](https://github.com/ClickHouse/ClickHouse/pull/77074) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix illegal comparison in distributed queries because pushing down predicates to remote doesn't respect literal types. [#77093](https://github.com/ClickHouse/ClickHouse/pull/77093) ([Duc Canh Le](https://github.com/canhld94)).
-* Fix crash during Kafka table creation with exception. [#77121](https://github.com/ClickHouse/ClickHouse/pull/77121) ([Pavel Kruglov](https://github.com/Avogar)).
-* Support JSON and subcolumns in Kafka and RabbitMQ engines. [#77122](https://github.com/ClickHouse/ClickHouse/pull/77122) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix exceptions stack unwinding on MacOS. [#77126](https://github.com/ClickHouse/ClickHouse/pull/77126) ([Eduard Karacharov](https://github.com/korowa)).
-* Fix reading 'null' subcolumn in getSubcolumn function. [#77163](https://github.com/ClickHouse/ClickHouse/pull/77163) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix bloom filter index with Array and not supported functions. [#77271](https://github.com/ClickHouse/ClickHouse/pull/77271) ([Pavel Kruglov](https://github.com/Avogar)).
-* We should only check the restriction on the amount of tables during the initial CREATE query. [#77274](https://github.com/ClickHouse/ClickHouse/pull/77274) ([Nikolay Degterinsky](https://github.com/evillique)).
-* Not a bug: `SELECT toBFloat16(-0.0) == toBFloat16(0.0)` now correctly returns `true` (from previously `false`). This makes the behavior consistent with `Float32` and `Float64`. [#77290](https://github.com/ClickHouse/ClickHouse/pull/77290) ([Shankar Iyer](https://github.com/shankar-iyer)).
-* Fix posbile incorrect reference to unintialized key_index variable, which may lead to crash in debug builds (this uninitialized reference won't cause issues in release builds because subsequent code are likely to throw errors.) ### documentation entry for user-facing changes. [#77305](https://github.com/ClickHouse/ClickHouse/pull/77305) ([wxybear](https://github.com/wxybear)).
-* Fix name for partition with a Bool value. It was broken in https://github.com/ClickHouse/ClickHouse/pull/74533. [#77319](https://github.com/ClickHouse/ClickHouse/pull/77319) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix comparison between tuples with nullable elements inside and strings. As an example, before the change comparison between a Tuple `(1, null)` and a String `'(1,null)'` would result in an error. Another example would be a comparison between a Tuple `(1, a)`, where `a` is a Nullable column, and a String `'(1, 2)'`. This change addresses these issues. [#77323](https://github.com/ClickHouse/ClickHouse/pull/77323) ([Alexey Katsman](https://github.com/alexkats)).
-* Fix crash in ObjectStorageQueueSource. Was intoduced in https://github.com/ClickHouse/ClickHouse/pull/76358. [#77325](https://github.com/ClickHouse/ClickHouse/pull/77325) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix `async_insert` with `input`. [#77340](https://github.com/ClickHouse/ClickHouse/pull/77340) ([Azat Khuzhin](https://github.com/azat)).
-* Fix: `WITH FILL` may fail with NOT_FOUND_COLUMN_IN_BLOCK when sorting column is removed by planer. Similar issue related to inconsistent DAG calculated for INTERPOLATE expression. [#77343](https://github.com/ClickHouse/ClickHouse/pull/77343) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fix several LOGICAL_ERRORs around setting alias of invalid AST nodes. [#77445](https://github.com/ClickHouse/ClickHouse/pull/77445) ([Raúl Marín](https://github.com/Algunenano)).
-* In filesystem cache impementation fix error processing during file segment write. [#77471](https://github.com/ClickHouse/ClickHouse/pull/77471) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Make DatabaseIceberg use correct metadata file provided by catalog. Closes [#75187](https://github.com/ClickHouse/ClickHouse/issues/75187). [#77486](https://github.com/ClickHouse/ClickHouse/pull/77486) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* The query cache now assumes that UDFs are non-deterministic. Accordingly, results of queries with UDFs are no longer cached. Previously, users were able to define non-deterministic UDFs whose result would erronously be cached (issue [#77553](https://github.com/ClickHouse/ClickHouse/issues/77553)). [#77633](https://github.com/ClickHouse/ClickHouse/pull/77633) ([Jimmy Aguilar Mena](https://github.com/Ergus)).
-* Fix system.filesystem_cache_log working only under setting `enable_filesystem_cache_log`. [#77650](https://github.com/ClickHouse/ClickHouse/pull/77650) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix a logical error when calling `defaultRoles` function inside a projection. Follow-up for [#76627](https://github.com/ClickHouse/ClickHouse/issues/76627). [#77667](https://github.com/ClickHouse/ClickHouse/pull/77667) ([pufit](https://github.com/pufit)).
-* Second arguments of type `Nullable` for function `arrayResize` are now disallowed. Previously, anything from errors to wrong results could happen with `Nullable` as second argument. (issue [#48398](https://github.com/ClickHouse/ClickHouse/issues/48398)). [#77724](https://github.com/ClickHouse/ClickHouse/pull/77724) ([Manish Gill](https://github.com/mgill25)).
-* Regularly check if merges and mutations were cancelled even in case when the operation doesn't produce any blocks to write. [#77766](https://github.com/ClickHouse/ClickHouse/pull/77766) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-
-#### Build/Testing/Packaging Improvement
-* `clickhouse-odbc-bridge` and `clickhouse-library-bridge` are moved to a separate repository, https://github.com/ClickHouse/odbc-bridge/. [#76225](https://github.com/ClickHouse/ClickHouse/pull/76225) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix Rust cross-compilation and allow disabling Rust completely. [#76921](https://github.com/ClickHouse/ClickHouse/pull/76921) ([Raúl Marín](https://github.com/Algunenano)).
-
-
-### ClickHouse release 25.2, 2025-02-27 {#252}
-
-#### Backward Incompatible Change
-* Completely enable `async_load_databases` by default (even for those installations that do not upgrade `config.xml`). [#74772](https://github.com/ClickHouse/ClickHouse/pull/74772) ([Azat Khuzhin](https://github.com/azat)).
-* Add `JSONCompactEachRowWithProgress` and `JSONCompactStringsEachRowWithProgress` formats. Continuation of [#69989](https://github.com/ClickHouse/ClickHouse/issues/69989). The `JSONCompactWithNames` and `JSONCompactWithNamesAndTypes` no longer output "totals" - apparently, it was a mistake in the implementation. [#75037](https://github.com/ClickHouse/ClickHouse/pull/75037) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Change the `format_alter_operations_with_parentheses` default to true to disambiguate the ALTER commands list (see https://github.com/ClickHouse/ClickHouse/pull/59532). This breaks replication with clusters prior to 24.3. If you are upgrading a cluster using older releases turn off the setting in the server config or upgrade to 24.3 first. [#75302](https://github.com/ClickHouse/ClickHouse/pull/75302) ([Raúl Marín](https://github.com/Algunenano)).
-* Remove the possibility of filtering log messages using regular expressions. The implementation introduced a data race, so it has to be removed. [#75577](https://github.com/ClickHouse/ClickHouse/pull/75577) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* The setting `min_chunk_bytes_for_parallel_parsing` cannot be zero anymore. This fixes: [#71110](https://github.com/ClickHouse/ClickHouse/issues/71110). [#75239](https://github.com/ClickHouse/ClickHouse/pull/75239) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Validate settings in the cache configuration. Non existing settings were ignored before, now they will throw an error and they should be removed. [#75452](https://github.com/ClickHouse/ClickHouse/pull/75452) ([Kseniia Sumarokova](https://github.com/kssenii)).
-
-#### New Feature
-* Support type `Nullable(JSON)`. [#73556](https://github.com/ClickHouse/ClickHouse/pull/73556) ([Pavel Kruglov](https://github.com/Avogar)).
-* Support subcolumns in the DEFAULT and MATERIALIZED expressions. [#74403](https://github.com/ClickHouse/ClickHouse/pull/74403) ([Pavel Kruglov](https://github.com/Avogar)).
-* Support writing Parquet bloom filters using the `output_format_parquet_write_bloom_filter` setting (enabled by default). [#71681](https://github.com/ClickHouse/ClickHouse/pull/71681) ([Michael Kolupaev](https://github.com/al13n321)).
-* Web UI now has interactive database navigation. [#75777](https://github.com/ClickHouse/ClickHouse/pull/75777) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Allow the combination of read-only and read-write disks in the storage policy (as multiple volumes or multiple disks). This allows data to be read from the entire volume, while inserts will prefer the writable disk (i.e., Copy-on-Write storage policy). [#75862](https://github.com/ClickHouse/ClickHouse/pull/75862) ([Azat Khuzhin](https://github.com/azat)).
-* Add a new Database engine, `DatabaseBackup,` that allows to instantly attach table/database from backup. [#75725](https://github.com/ClickHouse/ClickHouse/pull/75725) ([Maksim Kita](https://github.com/kitaisreal)).
-* Support prepared statements in the Postgres wire protocol. [#75035](https://github.com/ClickHouse/ClickHouse/pull/75035) ([scanhex12](https://github.com/scanhex12)).
-* Add the ability to ATTACH tables without a database layer, which is useful for MergeTree tables located on Web, S3, and similar external virtual filesystems. [#75788](https://github.com/ClickHouse/ClickHouse/pull/75788) ([Azat Khuzhin](https://github.com/azat)).
-* Add a new string comparison function, `compareSubstrings` to compare parts of two strings. Example: `SELECT compareSubstrings('Saxony', 'Anglo-Saxon', 0, 6, 5) AS result` means "compare 6 bytes of strings 'Saxon' and 'Anglo-Saxon' lexicographically, starting at offset 0 in the first string, offset 5 in the second string". [#74070](https://github.com/ClickHouse/ClickHouse/pull/74070) ([lgbo](https://github.com/lgbo-ustc)).
-* A new function, `initialQueryStartTime` is added. It returns the start time of the current query. The value is the same across all shards during a distributed query. [#75087](https://github.com/ClickHouse/ClickHouse/pull/75087) ([Roman Lomonosov](https://github.com/lomik)).
-* Add support for SSL authentication with named collections for MySQL. Closes [#59111](https://github.com/ClickHouse/ClickHouse/issues/59111). [#59452](https://github.com/ClickHouse/ClickHouse/pull/59452) ([Nikolay Degterinsky](https://github.com/evillique)).
-
-#### Experimental Features
-* Added a new setting, `enable_adaptive_memory_spill_scheduler` that allows multiple Grace JOINs in the same query to monitor their combined memory footprint and trigger spilling into external storage adaptively to prevent MEMORY_LIMIT_EXCEEDED. [#72728](https://github.com/ClickHouse/ClickHouse/pull/72728) ([lgbo](https://github.com/lgbo-ustc)).
-* Make the new, experimental `Kafka` table engine fully respect Keeper feature flags. [#76004](https://github.com/ClickHouse/ClickHouse/pull/76004) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Restore the (Intel) QPL codec, which was removed in v24.10 due to licensing issues. [#76021](https://github.com/ClickHouse/ClickHouse/pull/76021) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* For the integration with HDFS added support for the `dfs.client.use.datanode.hostname` configuration option. [#74635](https://github.com/ClickHouse/ClickHouse/pull/74635) ([Mikhail Tiukavkin](https://github.com/freshertm)).
-
-#### Performance Improvement
-* Improve performance of the whole JSON column reading in Wide parts from S3. It's done by adding prefetches for subcolumn prefixes deserialization, cache of deserialized prefixes, and parallel deserialization of subcolumn prefixes. It improves the reading of the JSON column from S3 4 times in queries like `SELECT data FROM table` and about 10 times in queries like `SELECT data FROM table LIMIT 10`. [#74827](https://github.com/ClickHouse/ClickHouse/pull/74827) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fixed unnecessary contention in `parallel_hash` when `max_rows_in_join = max_bytes_in_join = 0`. [#75155](https://github.com/ClickHouse/ClickHouse/pull/75155) ([Nikita Taranov](https://github.com/nickitat)).
-* Fixed double preallocation in `ConcurrentHashJoin` in case join sides are swapped by the optimizer. [#75149](https://github.com/ClickHouse/ClickHouse/pull/75149) ([Nikita Taranov](https://github.com/nickitat)).
-* Slight improvement in some join scenarios: precalculate the number of output rows and reserve memory for them. [#75376](https://github.com/ClickHouse/ClickHouse/pull/75376) ([Alexander Gololobov](https://github.com/davenger)).
-* For queries like `WHERE a < b AND b < c AND c < 5`, we can infer new comparing conditions (`a < 5 AND b < 5`) to have better filter ability. [#73164](https://github.com/ClickHouse/ClickHouse/pull/73164) ([Shichao Jin](https://github.com/jsc0218)).
-* Keeper improvement: disable digest calculation when committing to in-memory storage for better performance. It can be enabled with `keeper_server.digest_enabled_on_commit` config. Digest is still calculated when preprocessing requests. [#75490](https://github.com/ClickHouse/ClickHouse/pull/75490) ([Antonio Andelic](https://github.com/antonio2368)).
-* Push down filter expression from JOIN ON when possible. [#75536](https://github.com/ClickHouse/ClickHouse/pull/75536) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Calculate columns and indices sizes lazily in MergeTree. [#75938](https://github.com/ClickHouse/ClickHouse/pull/75938) ([Pavel Kruglov](https://github.com/Avogar)).
-* Reintroduce respect `ttl_only_drop_parts` on `MATERIALIZE TTL`; only read necessary columns to recalculate TTL and drop parts by replacing them with an empty ones. [#72751](https://github.com/ClickHouse/ClickHouse/pull/72751) ([Andrey Zvonov](https://github.com/zvonand)).
-* Reduce write buffer size for plain_rewritable metadata files [#75758](https://github.com/ClickHouse/ClickHouse/pull/75758) ([Julia Kartseva](https://github.com/jkartseva)).
-* Reduce memory usage for some window functions. [#65647](https://github.com/ClickHouse/ClickHouse/pull/65647) ([lgbo](https://github.com/lgbo-ustc)).
-* Evaluate parquet bloom filters and min/max indexes together. Necessary to properly support: `x = 3 or x > 5` where data = [1, 2, 4, 5]. [#71383](https://github.com/ClickHouse/ClickHouse/pull/71383) ([Arthur Passos](https://github.com/arthurpassos)).
-* Queries passed to the `Executable` storage are no longer limited to a single-threaded execution. [#70084](https://github.com/ClickHouse/ClickHouse/pull/70084) ([yawnt](https://github.com/yawnt)).
-* Fetch parts in parallel in ALTER TABLE FETCH PARTITION (thread pool size is controlled with `max_fetch_partition_thread_pool_size`). [#74978](https://github.com/ClickHouse/ClickHouse/pull/74978) ([Azat Khuzhin](https://github.com/azat)).
-* Allow to move predicates with the `indexHint` function to `PREWHERE`. [#74987](https://github.com/ClickHouse/ClickHouse/pull/74987) ([Anton Popov](https://github.com/CurtizJ)).
-
-#### Improvement
-* Fixed calculation of size in memory for `LowCardinality` columns. [#74688](https://github.com/ClickHouse/ClickHouse/pull/74688) ([Nikita Taranov](https://github.com/nickitat)).
-* `processors_profile_log` table now has a default configuration with a TTL of 30 days. [#66139](https://github.com/ClickHouse/ClickHouse/pull/66139) ([Ilya Yatsishin](https://github.com/qoega)).
-* Allow naming shards in the cluster configuration. [#72276](https://github.com/ClickHouse/ClickHouse/pull/72276) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
-* Change Prometheus remote write response success status from 200/OK to 204/NoContent. [#74170](https://github.com/ClickHouse/ClickHouse/pull/74170) ([Michael Dempsey](https://github.com/bluestealth)).
-* Add ability to reload `max_remote_read_network_bandwidth_for_serve` and `max_remote_write_network_bandwidth_for_server` on the fly without restart server. [#74206](https://github.com/ClickHouse/ClickHouse/pull/74206) ([Kai Zhu](https://github.com/nauu)).
-* Allow using blob paths to calculate checksums while making a backup. [#74729](https://github.com/ClickHouse/ClickHouse/pull/74729) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Added a query ID column to `system.query_cache` (closes [#68205](https://github.com/ClickHouse/ClickHouse/issues/68205)). [#74982](https://github.com/ClickHouse/ClickHouse/pull/74982) ([NamHoaiNguyen](https://github.com/NamHoaiNguyen)).
-* It is allowed to cancel `ALTER TABLE ... FREEZE ...` queries with `KILL QUERY` and automatically by a timeout (`max_execution_time`). [#75016](https://github.com/ClickHouse/ClickHouse/pull/75016) ([Kirill](https://github.com/kirillgarbar)).
-* Add support for `groupUniqArrayArrayMap` as `SimpleAggregateFunction`. [#75034](https://github.com/ClickHouse/ClickHouse/pull/75034) ([Miel Donkers](https://github.com/mdonkers)).
-* Hide catalog credential settings in database engine `Iceberg`. Closes [#74559](https://github.com/ClickHouse/ClickHouse/issues/74559). [#75080](https://github.com/ClickHouse/ClickHouse/pull/75080) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* `intExp2` / `intExp10`: Define undefined behaviour: return 0 for too small argument, `18446744073709551615` for too big argument, throw exception if `nan`. [#75312](https://github.com/ClickHouse/ClickHouse/pull/75312) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Support `s3.endpoint` natively from catalog config in `DatabaseIceberg`. Closes [#74558](https://github.com/ClickHouse/ClickHouse/issues/74558). [#75375](https://github.com/ClickHouse/ClickHouse/pull/75375) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Don't fail silently if a user executing `SYSTEM DROP REPLICA` doesn't have enough permissions. [#75377](https://github.com/ClickHouse/ClickHouse/pull/75377) ([Bharat Nallan](https://github.com/bharatnc)).
-* Add a ProfileEvent about the number of times any of the system logs have failed to flush. [#75466](https://github.com/ClickHouse/ClickHouse/pull/75466) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add a check and extra logging for decrypting and decompressing. [#75471](https://github.com/ClickHouse/ClickHouse/pull/75471) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Added support for the micro sign (U+00B5) in the `parseTimeDelta` function. Now both the micro sign (U+00B5) and the Greek letter mu (U+03BC) are recognized as valid representations for microseconds, aligning ClickHouse's behavior with Go’s implementation ([see time.go](https://github.com/golang/go/blob/ad7b46ee4ac1cee5095d64b01e8cf7fcda8bee5e/src/time/time.go#L983C19-L983C20) and [time/format.go](https://github.com/golang/go/blob/ad7b46ee4ac1cee5095d64b01e8cf7fcda8bee5e/src/time/format.go#L1608-L1609)). [#75472](https://github.com/ClickHouse/ClickHouse/pull/75472) ([Vitaly Orlov](https://github.com/orloffv)).
-* Replace server setting (`send_settings_to_client`) with client setting (`apply_settings_from_server`) that controls whether client-side code (e.g., parsing INSERT data and formatting query output) should use settings from server's `users.xml` and user profile. Otherwise, only settings from the client command line, session, and query are used. Note that this only applies to native client (not e.g. HTTP), and doesn't apply to most of query processing (which happens on the server). [#75478](https://github.com/ClickHouse/ClickHouse/pull/75478) ([Michael Kolupaev](https://github.com/al13n321)).
-* Better error messages for syntax errors. Previously, if the query was too large, and the token whose length exceeds the limit is a very large string literal, the message about the reason was lost in the middle of two examples of this very long token. Fix the issue when a query with UTF-8 was cut incorrectly in the error message. Fix excessive quoting of query fragments. This closes [#75473](https://github.com/ClickHouse/ClickHouse/issues/75473). [#75561](https://github.com/ClickHouse/ClickHouse/pull/75561) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add profile events in storage `S3(Azure)Queue`. [#75618](https://github.com/ClickHouse/ClickHouse/pull/75618) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Disable sending settings from server to client (`send_settings_to_client=false`) for compatibility (This feature will be re-implemented as a client setting later for better usability). [#75648](https://github.com/ClickHouse/ClickHouse/pull/75648) ([Michael Kolupaev](https://github.com/al13n321)).
-* Add a config `memory_worker_correct_memory_tracker` to enable correction of the internal memory tracker with information from different sources read in the background thread periodically. [#75714](https://github.com/ClickHouse/ClickHouse/pull/75714) ([Antonio Andelic](https://github.com/antonio2368)).
-* Add column `normalized_query_hash` into `system.processes`. Note: while it can be easily calculated on the fly with the `normalizedQueryHash` function, this is needed to prepare for subsequent changes. [#75756](https://github.com/ClickHouse/ClickHouse/pull/75756) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Querying `system.tables` will not throw even if there is a `Merge` table created over a database that no longer exists. Remove the `getTotalRows` method from `Hive` tables because we don't allow it to do complex work. [#75772](https://github.com/ClickHouse/ClickHouse/pull/75772) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Store start_time/end_time for Backups with microseconds. [#75929](https://github.com/ClickHouse/ClickHouse/pull/75929) ([Aleksandr Musorin](https://github.com/AVMusorin)).
-* Add `MemoryTrackingUncorrected` metric showing the value of the internal global memory tracker which is not corrected by RSS. [#75935](https://github.com/ClickHouse/ClickHouse/pull/75935) ([Antonio Andelic](https://github.com/antonio2368)).
-* Allow parsing endpoints like `localhost:1234/handle` in `PostgreSQL` or `MySQL` table functions. This fixes a regression introduced in https://github.com/ClickHouse/ClickHouse/pull/52503. [#75944](https://github.com/ClickHouse/ClickHouse/pull/75944) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Added a server setting `throw_on_unknown_workload` that allows to choose behavior on query with `workload` setting set to unknown value: either allow unlimited access (default) or throw a `RESOURCE_ACCESS_DENIED` error. It is useful to force all queries to use workload scheduling. [#75999](https://github.com/ClickHouse/ClickHouse/pull/75999) ([Sergei Trifonov](https://github.com/serxa)).
-* Don't rewrite subcolumns to `getSubcolumn` in `ARRAY JOIN` if not necessary. [#76018](https://github.com/ClickHouse/ClickHouse/pull/76018) ([Pavel Kruglov](https://github.com/Avogar)).
-* Retry coordination errors when loading tables. [#76020](https://github.com/ClickHouse/ClickHouse/pull/76020) ([Alexander Tokmakov](https://github.com/tavplubix)).
-* Support flushing individual logs in `SYSTEM FLUSH LOGS`. [#76132](https://github.com/ClickHouse/ClickHouse/pull/76132) ([Raúl Marín](https://github.com/Algunenano)).
-* Improved the `/binary` server's page. Using the Hilbert curve instead of the Morton curve. Display 512 MB worth of addresses in the square, which fills the square better (in previous versions, addresses fill only half of the square). Color addresses closer to the library name rather than the function name. Allow scrolling a bit more outside of the area. [#76192](https://github.com/ClickHouse/ClickHouse/pull/76192) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Retry ON CLUSTER queries in case of TOO_MANY_SIMULTANEOUS_QUERIES. [#76352](https://github.com/ClickHouse/ClickHouse/pull/76352) ([Patrick Galbraith](https://github.com/CaptTofu)).
-* Add the `CPUOverload` asynchronous metric, which calculates the server's relative CPU deficit. [#76404](https://github.com/ClickHouse/ClickHouse/pull/76404) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Changed the default value of `output_format_pretty_max_rows` from 10000 to 1000. I think it is better for usability. [#76407](https://github.com/ClickHouse/ClickHouse/pull/76407) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-
-#### Bug Fix (user-visible misbehavior in an official stable release)
-* Fix formatting of exceptions using a custom format if they appear during query interpretation. In previous versions, exceptions were formatted using the default format rather than the format specified in the query. This closes [#55422](https://github.com/ClickHouse/ClickHouse/issues/55422). [#74994](https://github.com/ClickHouse/ClickHouse/pull/74994) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix type mapping for SQLite (integer types into `int64`, floating points into `float64`). [#73853](https://github.com/ClickHouse/ClickHouse/pull/73853) ([Joanna Hulboj](https://github.com/jh0x)).
-* Fix identifier resolution from parent scopes. Allow the use of aliases to expressions in the WITH clause. Fixes [#58994](https://github.com/ClickHouse/ClickHouse/issues/58994). Fixes [#62946](https://github.com/ClickHouse/ClickHouse/issues/62946). Fixes [#63239](https://github.com/ClickHouse/ClickHouse/issues/63239). Fixes [#65233](https://github.com/ClickHouse/ClickHouse/issues/65233). Fixes [#71659](https://github.com/ClickHouse/ClickHouse/issues/71659). Fixes [#71828](https://github.com/ClickHouse/ClickHouse/issues/71828). Fixes [#68749](https://github.com/ClickHouse/ClickHouse/issues/68749). [#66143](https://github.com/ClickHouse/ClickHouse/pull/66143) ([Dmitry Novik](https://github.com/novikd)).
-* Fix negate function monotonicity. In previous versions, the query `select * from a where -x = -42;` where `x` is the primary key, can return a wrong result. [#71440](https://github.com/ClickHouse/ClickHouse/pull/71440) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix empty tuple handling in arrayIntersect. This fixes [#72578](https://github.com/ClickHouse/ClickHouse/issues/72578). [#72581](https://github.com/ClickHouse/ClickHouse/pull/72581) ([Amos Bird](https://github.com/amosbird)).
-* Fix reading JSON sub-object subcolumns with incorrect prefix. [#73182](https://github.com/ClickHouse/ClickHouse/pull/73182) ([Pavel Kruglov](https://github.com/Avogar)).
-* Propagate Native format settings properly for client-server communication. [#73924](https://github.com/ClickHouse/ClickHouse/pull/73924) ([Pavel Kruglov](https://github.com/Avogar)).
-* Check for not supported types for some storages. [#74218](https://github.com/ClickHouse/ClickHouse/pull/74218) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix crash with query `INSERT INTO SELECT` over PostgreSQL interface on macOS (issue [#72938](https://github.com/ClickHouse/ClickHouse/issues/72938)). [#74231](https://github.com/ClickHouse/ClickHouse/pull/74231) ([Artem Yurov](https://github.com/ArtemYurov)).
-* Fixed uninitialized max_log_ptr in the replicated database. [#74336](https://github.com/ClickHouse/ClickHouse/pull/74336) ([Konstantin Morozov](https://github.com/k-morozov)).
-* Fix crash when inserting interval (issue [#74299](https://github.com/ClickHouse/ClickHouse/issues/74299)). [#74478](https://github.com/ClickHouse/ClickHouse/pull/74478) ([NamHoaiNguyen](https://github.com/NamHoaiNguyen)).
-* Fix formatting constant JSON literals. Previously it could lead to syntax errors during sending the query to another server. [#74533](https://github.com/ClickHouse/ClickHouse/pull/74533) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix broken create query when using constant partition expressions with implicit projections enabled. This fixes [#74596](https://github.com/ClickHouse/ClickHouse/issues/74596) . [#74634](https://github.com/ClickHouse/ClickHouse/pull/74634) ([Amos Bird](https://github.com/amosbird)).
-* Avoid leaving connection in broken state after INSERT finishes with exception. [#74740](https://github.com/ClickHouse/ClickHouse/pull/74740) ([Azat Khuzhin](https://github.com/azat)).
-* Avoid reusing connections that had been left in the intermediate state. [#74749](https://github.com/ClickHouse/ClickHouse/pull/74749) ([Azat Khuzhin](https://github.com/azat)).
-* Fix crash during JSON type declaration parsing when type name is not uppercase. [#74784](https://github.com/ClickHouse/ClickHouse/pull/74784) ([Pavel Kruglov](https://github.com/Avogar)).
-* Keeper: fix logical_error when the connection had been terminated before establishing. [#74844](https://github.com/ClickHouse/ClickHouse/pull/74844) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix a behavior when the server couldn't startup when there's a table using `AzureBlobStorage`. Tables are loaded without any requests to Azure. [#74880](https://github.com/ClickHouse/ClickHouse/pull/74880) ([Alexey Katsman](https://github.com/alexkats)).
-* Fix missing `used_privileges` and `missing_privileges` fields in `query_log` for BACKUP and RESTORE operations. [#74887](https://github.com/ClickHouse/ClickHouse/pull/74887) ([Alexey Katsman](https://github.com/alexkats)).
-* HDFS refresh krb ticket if sasl error during hdfs select request. [#74930](https://github.com/ClickHouse/ClickHouse/pull/74930) ([inv2004](https://github.com/inv2004)).
-* Fix queries to Replicated database in startup_scripts. [#74942](https://github.com/ClickHouse/ClickHouse/pull/74942) ([Azat Khuzhin](https://github.com/azat)).
-* Fix issues with expressions type aliased in the JOIN ON clause when a null-safe comparison is used. [#74970](https://github.com/ClickHouse/ClickHouse/pull/74970) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Revert part's state from deleting back to outdated when remove operation has failed. [#74985](https://github.com/ClickHouse/ClickHouse/pull/74985) ([Sema Checherinda](https://github.com/CheSema)).
-* In previous versions, when there was a scalar subquery, we started writing the progress (accumulated from processing the subquery) during the initialization of the data format, which was before HTTP headers were written. This led to the loss of HTTP headers, such as X-ClickHouse-QueryId and X-ClickHouse-Format, as well as Content-Type. [#74991](https://github.com/ClickHouse/ClickHouse/pull/74991) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix `CREATE TABLE AS...` queries for `database_replicated_allow_replicated_engine_arguments=0`. [#75000](https://github.com/ClickHouse/ClickHouse/pull/75000) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fix leaving connection in a bad state in client after INSERT exceptions. [#75030](https://github.com/ClickHouse/ClickHouse/pull/75030) ([Azat Khuzhin](https://github.com/azat)).
-* Fix crash due to uncaught exception in PSQL replication. [#75062](https://github.com/ClickHouse/ClickHouse/pull/75062) ([Azat Khuzhin](https://github.com/azat)).
-* Sasl can fail any rpc call, the fix helps to repeat the call in case if krb5 ticker is expired. [#75063](https://github.com/ClickHouse/ClickHouse/pull/75063) ([inv2004](https://github.com/inv2004)).
-* Fixed usage of indexes (primary and secondary) for `Array`, `Map` and `Nullable(..)` columns with enabled setting `optimize_function_to_subcolumns`. Previously, indexes for these columns could have been ignored. [#75081](https://github.com/ClickHouse/ClickHouse/pull/75081) ([Anton Popov](https://github.com/CurtizJ)).
-* Disable `flatten_nested` when creating materialized views with inner tables since it will not be possible to use such flattened columns. [#75085](https://github.com/ClickHouse/ClickHouse/pull/75085) ([Christoph Wurm](https://github.com/cwurm)).
-* Fix for some of IPv6 addresses (such as ::ffff:1.1.1.1) in forwarded_for field is wrongly interpreted resulting in client disconnect with exception. [#75133](https://github.com/ClickHouse/ClickHouse/pull/75133) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fix nullsafe JOIN handling for LowCardinality nullable data type. Previously JOIN ON with nullsafe comparison, such as `IS NOT DISTINCT FROM`, `<=>` , `a IS NULL AND b IS NULL OR a == b` didn't work correctly with LowCardinality columns. [#75143](https://github.com/ClickHouse/ClickHouse/pull/75143) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Checks that we don't specify key_condition when counting total_number_of_rows for NumRowsCache. [#75164](https://github.com/ClickHouse/ClickHouse/pull/75164) ([Daniil Ivanik](https://github.com/divanik)).
-* Fix queries with unused interpolation with the new analyzer. [#75173](https://github.com/ClickHouse/ClickHouse/pull/75173) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Fix the crash bug of CTE with Insert. [#75188](https://github.com/ClickHouse/ClickHouse/pull/75188) ([Shichao Jin](https://github.com/jsc0218)).
-* Keeper fix: avoid writing to broken changelogs when rolling back logs. [#75197](https://github.com/ClickHouse/ClickHouse/pull/75197) ([Antonio Andelic](https://github.com/antonio2368)).
-* Use `BFloat16` as a supertype where appropriate. This closes: [#74404](https://github.com/ClickHouse/ClickHouse/issues/74404). [#75236](https://github.com/ClickHouse/ClickHouse/pull/75236) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Fix unexpected defaults in join result with any_join_distinct_right_table_keys and OR in JOIN ON. [#75262](https://github.com/ClickHouse/ClickHouse/pull/75262) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Mask azureblobstorage table engine credentials. [#75319](https://github.com/ClickHouse/ClickHouse/pull/75319) ([Garrett Thomas](https://github.com/garrettthomaskth)).
-* Fixed behavior when ClickHouse may erroneously do a filter pushdown to an external database like PostgreSQL, MySQL, or SQLite. This closes: [#71423](https://github.com/ClickHouse/ClickHouse/issues/71423). [#75320](https://github.com/ClickHouse/ClickHouse/pull/75320) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
-* Fix crash in protobuf schema cache that can happen during output in Protobuf format and parallel query `SYSTEM DROP FORMAT SCHEMA CACHE`. [#75357](https://github.com/ClickHouse/ClickHouse/pull/75357) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix a possible logical error or uninitialized memory issue when a filter from `HAVING` is pushed down with parallel replicas. [#75363](https://github.com/ClickHouse/ClickHouse/pull/75363) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* Hide sensitive info for `icebergS3`, `icebergAzure` table functions and table engines. [#75378](https://github.com/ClickHouse/ClickHouse/pull/75378) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Function `TRIM` with computed empty trim characters are now correctly handled. Example: `SELECT TRIM(LEADING concat('') FROM 'foo')` (Issue [#69922](https://github.com/ClickHouse/ClickHouse/issues/69922)). [#75399](https://github.com/ClickHouse/ClickHouse/pull/75399) ([Manish Gill](https://github.com/mgill25)).
-* Fix data race in IOutputFormat. [#75448](https://github.com/ClickHouse/ClickHouse/pull/75448) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix possible error `Elements ... and ... of Nested data structure ... (Array columns) have different array sizes` when JSON subcolumns with Array type are used in JOIN over distributed tables. [#75512](https://github.com/ClickHouse/ClickHouse/pull/75512) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix data corruption with `CODEC(ZSTD, DoubleDelta)`. Closes [#70031](https://github.com/ClickHouse/ClickHouse/issues/70031). [#75548](https://github.com/ClickHouse/ClickHouse/pull/75548) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fix interaction between allow_feature_tier and compatibility mergetree setting. [#75635](https://github.com/ClickHouse/ClickHouse/pull/75635) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix incorrect processed_rows value in system.s3queue_log in case file was retried. [#75666](https://github.com/ClickHouse/ClickHouse/pull/75666) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Respect `materialized_views_ignore_errors` when a materialized view writes to a URL engine and there is a connectivity issue. [#75679](https://github.com/ClickHouse/ClickHouse/pull/75679) ([Christoph Wurm](https://github.com/cwurm)).
-* Fixed rare crashes while reading from `MergeTree` table after multiple asynchronous `RENAME` queries (with `alter_sync = 0`) between columns with different types. [#75693](https://github.com/ClickHouse/ClickHouse/pull/75693) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix `Block structure mismatch in QueryPipeline stream` error for some queries with `UNION ALL`. [#75715](https://github.com/ClickHouse/ClickHouse/pull/75715) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Rebuild projection on alter modify of its PK column. Previously it could lead to `CANNOT_READ_ALL_DATA` errors during selects after alter modify of the column used in projection PK. [#75720](https://github.com/ClickHouse/ClickHouse/pull/75720) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix incorrect result of `ARRAY JOIN` for scalar subqueries (with analyzer). [#75732](https://github.com/ClickHouse/ClickHouse/pull/75732) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fixed null pointer dereference in `DistinctSortedStreamTransform`. [#75734](https://github.com/ClickHouse/ClickHouse/pull/75734) ([Nikita Taranov](https://github.com/nickitat)).
-* Fix `allow_suspicious_ttl_expressions` behaviour. [#75771](https://github.com/ClickHouse/ClickHouse/pull/75771) ([Aleksei Filatov](https://github.com/aalexfvk)).
-* Fix uninitialized memory read in function `translate`. This closes [#75592](https://github.com/ClickHouse/ClickHouse/issues/75592). [#75794](https://github.com/ClickHouse/ClickHouse/pull/75794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Propagate format settings to JSON as string formatting in Native format. [#75832](https://github.com/ClickHouse/ClickHouse/pull/75832) ([Pavel Kruglov](https://github.com/Avogar)).
-* Recorded the default enablement of parallel hash as join algorithm in v24.12 in the settings change history. This means that ClickHouse will continue to join using non-parallel hash if an older compatibility level than v24.12 is configured. [#75870](https://github.com/ClickHouse/ClickHouse/pull/75870) ([Robert Schulze](https://github.com/rschu1ze)).
-* Fixed a bug that tables with implicitly added min-max indices could not be copied into a new table (issue [#75677](https://github.com/ClickHouse/ClickHouse/issues/75677)). [#75877](https://github.com/ClickHouse/ClickHouse/pull/75877) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
-* `clickhouse-library-bridge` allows opening arbitrary libraries from the filesystem, which makes it safe to run only inside an isolated environment. To prevent a vulnerability when it is run near the clickhouse-server, we will limit the paths of libraries to a location, provided in the configuration. This vulnerability was found with the [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by **Arseniy Dugin**. [#75954](https://github.com/ClickHouse/ClickHouse/pull/75954) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* We happened to use JSON serialization for some metadata, which was a mistake, because JSON does not support binary data inside string literals, including zero bytes. SQL queries can contain binary data and invalid UTF-8, so we have to support this in our metadata files as well. At the same time, ClickHouse's `JSONEachRow` and similar formats work around that by deviating from the JSON standard in favor of a perfect roundtrip for the binary data. See the motivation here: https://github.com/ClickHouse/ClickHouse/pull/73668#issuecomment-2560501790. The solution is to make `Poco::JSON` library consistent with the JSON format serialization in ClickHouse. This closes [#73668](https://github.com/ClickHouse/ClickHouse/issues/73668). [#75963](https://github.com/ClickHouse/ClickHouse/pull/75963) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Fix check for commit limits in storage `S3Queue`. [#76104](https://github.com/ClickHouse/ClickHouse/pull/76104) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix attaching MergeTree tables with auto indexes (`add_minmax_index_for_numeric_columns`/`add_minmax_index_for_string_columns`). [#76139](https://github.com/ClickHouse/ClickHouse/pull/76139) ([Azat Khuzhin](https://github.com/azat)).
-* Fixed issue of stack traces from parent threads of a job (`enable_job_stack_trace` setting) are not printed out. Fixed issue `enable_job_stack_trace` setting is not properly propagated to the threads resulting stack trace content not always respects this setting. [#76191](https://github.com/ClickHouse/ClickHouse/pull/76191) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fix incorrect permission check where `ALTER RENAME` required `CREATE USER` grant. Closes [#74372](https://github.com/ClickHouse/ClickHouse/issues/74372). [#76241](https://github.com/ClickHouse/ClickHouse/pull/76241) ([pufit](https://github.com/pufit)).
-* Fix reinterpretAs with FixedString on big-endian architecture. [#76253](https://github.com/ClickHouse/ClickHouse/pull/76253) ([Azat Khuzhin](https://github.com/azat)).
-* Fix logical error in S3Queue "Expected current processor {} to be equal to {} for bucket {}". [#76358](https://github.com/ClickHouse/ClickHouse/pull/76358) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix deadlock for ALTER with Memory database. [#76359](https://github.com/ClickHouse/ClickHouse/pull/76359) ([Azat Khuzhin](https://github.com/azat)).
-* Fix logical error in index analysis if condition in `WHERE` has `pointInPolygon` function. [#76360](https://github.com/ClickHouse/ClickHouse/pull/76360) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix potentially unsafe call in signal handler. [#76549](https://github.com/ClickHouse/ClickHouse/pull/76549) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fix reverse key support in PartsSplitter. This fixes [#73400](https://github.com/ClickHouse/ClickHouse/issues/73400). [#73418](https://github.com/ClickHouse/ClickHouse/pull/73418) ([Amos Bird](https://github.com/amosbird)).
-
-#### Build/Testing/Packaging Improvement
-* Support build HDFS on both ARM and Intel Mac. [#74244](https://github.com/ClickHouse/ClickHouse/pull/74244) ([Yan Xin](https://github.com/yxheartipp)).
-* Enable ICU and GRPC when cross-compiling for Darwin. [#75922](https://github.com/ClickHouse/ClickHouse/pull/75922) ([Raúl Marín](https://github.com/Algunenano)).
-* Update to embedded LLVM 19. [#75148](https://github.com/ClickHouse/ClickHouse/pull/75148) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Disable network access for user default in the docker image. [#75259](https://github.com/ClickHouse/ClickHouse/pull/75259) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). Make all clickhouse-server related actions a function, and execute them only when launching the default binary in `entrypoint.sh`. A long-postponed improvement was suggested in [#50724](https://github.com/ClickHouse/ClickHouse/issues/50724). Added switch `--users` to `clickhouse-extract-from-config` to get values from the `users.xml`. [#75643](https://github.com/ClickHouse/ClickHouse/pull/75643) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
-* Remove about 20MB of dead code from the binary. [#76226](https://github.com/ClickHouse/ClickHouse/pull/76226) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-
-### ClickHouse release 25.1, 2025-01-28 {#251}
-
-#### Backward Incompatible Change
-* `JSONEachRowWithProgress` will write the progress whenever the progress happens. In previous versions, the progress was shown only after each block of the result, which made it useless. Change the way how the progress is displayed: it will not show zero values. This closes [#70800](https://github.com/ClickHouse/ClickHouse/issues/70800). [#73834](https://github.com/ClickHouse/ClickHouse/pull/73834) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* `Merge` tables will unify the structure of underlying tables by using a union of their columns and deriving common types. This closes [#64864](https://github.com/ClickHouse/ClickHouse/issues/64864). In certain cases, this change could be backward incompatible. One example is when there is no common type between tables, but conversion to the type of the first table is still possible, such as in the case of UInt64 and Int64 or any numeric type and String. If you want to return to the old behavior, set `merge_table_max_tables_to_look_for_schema_inference` to `1` or set `compatibility` to `24.12` or earlier. [#73956](https://github.com/ClickHouse/ClickHouse/pull/73956) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Parquet output format converts Date and DateTime columns to date/time types supported by Parquet, instead of writing them as raw numbers. `DateTime` becomes `DateTime64(3)` (was: `UInt32`); setting `output_format_parquet_datetime_as_uint32` brings back the old behavior. `Date` becomes `Date32` (was: `UInt16`). [#70950](https://github.com/ClickHouse/ClickHouse/pull/70950) ([Michael Kolupaev](https://github.com/al13n321)).
-* Don't allow not comparable types (like `JSON`/`Object`/`AggregateFunction`) in `ORDER BY` and comparison functions `less/greater/equal/etc` by default. [#73276](https://github.com/ClickHouse/ClickHouse/pull/73276) ([Pavel Kruglov](https://github.com/Avogar)).
-* The obsolete `MaterializedMySQL` database engine has been removed and is no longer available. [#73879](https://github.com/ClickHouse/ClickHouse/pull/73879) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* The `mysql` dictionary source no longer does `SHOW TABLE STATUS` query, because it does not provide any value for InnoDB tables, as long as for any recent MySQL versions. This closes [#72636](https://github.com/ClickHouse/ClickHouse/issues/72636). This change is backward compatible, but put in this category so you have a chance to notice it. [#73914](https://github.com/ClickHouse/ClickHouse/pull/73914) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* `CHECK TABLE` queries now require a separate, `CHECK` grant. In previous versions, it was enough to have `SHOW TABLES` grant to run these queries. But a `CHECK TABLE` query can be heavy, and usual query complexity limits for `SELECT` queries don't apply to it. It led to the potential of DoS. [#74471](https://github.com/ClickHouse/ClickHouse/pull/74471) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Function `h3ToGeo()` now returns the results in the order `(lat, lon)` (which is the standard order for geometric functions). Users who wish to retain the legacy result order `(lon, lat)` can set setting `h3togeo_lon_lat_result_order = true`. [#74719](https://github.com/ClickHouse/ClickHouse/pull/74719) ([Manish Gill](https://github.com/mgill25)).
-* A new MongoDB driver is now the default. Users who like to continue using the legacy driver can set server setting `use_legacy_mongodb_integration` to true. [#73359](https://github.com/ClickHouse/ClickHouse/pull/73359) ([Robert Schulze](https://github.com/rschu1ze)).
-
-#### New Feature
-* Added an ability to apply non-finished (not materialized by background process) mutations during the execution of `SELECT` queries immediately after submitting. It can be enabled by setting `apply_mutations_on_fly`. [#74877](https://github.com/ClickHouse/ClickHouse/pull/74877) ([Anton Popov](https://github.com/CurtizJ)).
-* Implement `Iceberg` tables partition pruning for time-related transform partition operations in Iceberg. [#72044](https://github.com/ClickHouse/ClickHouse/pull/72044) ([Daniil Ivanik](https://github.com/divanik)).
-* Support subcolumns in MergeTree sorting key and skip indexes. [#72644](https://github.com/ClickHouse/ClickHouse/pull/72644) ([Pavel Kruglov](https://github.com/Avogar)).
-* Support reading `HALF_FLOAT` values from `Apache Arrow`/`Parquet`/`ORC` (they are read into `Float32`). This closes [#72960](https://github.com/ClickHouse/ClickHouse/issues/72960). Keep in mind that IEEE-754 half float is not the same as `BFloat16`. Closes [#73835](https://github.com/ClickHouse/ClickHouse/issues/73835). [#73836](https://github.com/ClickHouse/ClickHouse/pull/73836) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* The `system.trace_log` table will contain two new columns, `symbols` and `lines` containing symbolized stack trace. It allows for easy collection and export of profile information. This is controlled by the server configuration value `symbolize` inside `trace_log` and is enabled by default. [#73896](https://github.com/ClickHouse/ClickHouse/pull/73896) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add a new function, `generateSerialID`, which can be used to generate auto-incremental numbers in tables. Continuation of [#64310](https://github.com/ClickHouse/ClickHouse/issues/64310) by [kazalika](https://github.com/kazalika). This closes [#62485](https://github.com/ClickHouse/ClickHouse/issues/62485). [#73950](https://github.com/ClickHouse/ClickHouse/pull/73950) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add syntax `query1 PARALLEL WITH query2 PARALLEL WITH query3 ... PARALLEL WITH queryN` for DDL queries. That means subqueries `{query1, query2, ... queryN}` are allowed to run in parallel with each other (and it's preferable). [#73983](https://github.com/ClickHouse/ClickHouse/pull/73983) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Added an in-memory cache for deserialized skipping index granules. This should make repeated queries that use skipping indexes faster. The size of the new cache is controlled by server settings `skipping_index_cache_size` and `skipping_index_cache_max_entries`. The original motivation for the cache were vector similarity indexes which became a lot faster now. [#70102](https://github.com/ClickHouse/ClickHouse/pull/70102) ([Robert Schulze](https://github.com/rschu1ze)).
-* Now, the embedded Web UI has a progress bar during query runtime. It allows cancelling queries. It displays the total number of records and the extended information about the speed. The table can be rendered incrementally as soon as data arrives. Enable HTTP compression. Rendering of the table became faster. The table header became sticky. It allows selecting cells and navigating them by arrow keys. Fix the issue when the outline of the selected cell makes it smaller. Cells no longer expand on mouse hover but only on selection. The moment to stop rendering the incoming data is decided on the client rather than on the server side. Highlight digit groups for numbers. The overall design was refreshed and became bolder. It checks if the server is reachable and the correctness of credentials and displays the server version and uptime. The cloud icon is contoured in every font, even in Safari. Big integers inside nested data types will be rendered better. It will display inf/nan correctly. It will display data types when the mouse is over a column header. [#74204](https://github.com/ClickHouse/ClickHouse/pull/74204) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add the ability to create min-max (skipping) indices by default for columns managed by MergeTree using settings `add_minmax_index_for_numeric_columns` (for numeric columns) and `add_minmax_index_for_string_columns` (for string columns). For now, both settings are disabled, so there is no behavior change yet. [#74266](https://github.com/ClickHouse/ClickHouse/pull/74266) ([Smita Kulkarni](https://github.com/SmitaRKulkarni)).
-* Add `script_query_number` and `script_line_number` fields to `system.query_log`, to the ClientInfo in the native protocol, and to server logs. This closes [#67542](https://github.com/ClickHouse/ClickHouse/issues/67542). Credits to [pinsvin00](https://github.com/pinsvin00) for kicking off this feature earlier in [#68133](https://github.com/ClickHouse/ClickHouse/issues/68133). [#74477](https://github.com/ClickHouse/ClickHouse/pull/74477) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Added aggregation function `sequenceMatchEvents` which return timestamps of matched events for longest chain of events in pattern. [#72349](https://github.com/ClickHouse/ClickHouse/pull/72349) ([UnamedRus](https://github.com/UnamedRus)).
-* Added function `arrayNormalizedGini`. [#72823](https://github.com/ClickHouse/ClickHouse/pull/72823) ([flynn](https://github.com/ucasfl)).
-* Add minus operator support for `DateTime64`, to allow subtraction between `DateTime64` values, as well as `DateTime`. [#74482](https://github.com/ClickHouse/ClickHouse/pull/74482) ([Li Yin](https://github.com/liyinsg)).
-
-#### Experimental Features
-* The `BFloat16` data type is production-ready. [#73840](https://github.com/ClickHouse/ClickHouse/pull/73840) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-
-#### Performance Improvement
-* Optimized function `indexHint`. Now, columns that are only used as arguments of function `indexHint` are not read from the table. [#74314](https://github.com/ClickHouse/ClickHouse/pull/74314) ([Anton Popov](https://github.com/CurtizJ)). If the `indexHint` function is a central piece of your enterprise data architecture, this optimization will save your life.
-* More accurate accounting for `max_joined_block_size_rows` setting for `parallel_hash` JOIN algorithm. Helps to avoid increased memory consumption compared to `hash` algorithm. [#74630](https://github.com/ClickHouse/ClickHouse/pull/74630) ([Nikita Taranov](https://github.com/nickitat)).
-* Support predicate push down optimization on the query plan level for the `MergingAggregated` step. It improves performance for some queries with the analyzer. [#74073](https://github.com/ClickHouse/ClickHouse/pull/74073) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Splitting of left table blocks by hash was removed from the probe phase of the `parallel_hash` JOIN algorithm. [#73089](https://github.com/ClickHouse/ClickHouse/pull/73089) ([Nikita Taranov](https://github.com/nickitat)).
-* Optimize RowBinary input format. Closes [#63805](https://github.com/ClickHouse/ClickHouse/issues/63805). [#65059](https://github.com/ClickHouse/ClickHouse/pull/65059) ([Pavel Kruglov](https://github.com/Avogar)).
-* Write parts with level 1 if `optimize_on_insert` is enabled. It allows to use several optimizations of queries with `FINAL` for freshly written parts. [#73132](https://github.com/ClickHouse/ClickHouse/pull/73132) ([Anton Popov](https://github.com/CurtizJ)).
-* Speedup string deserialization by some low-level optimisation. [#65948](https://github.com/ClickHouse/ClickHouse/pull/65948) ([Nikita Taranov](https://github.com/nickitat)).
-* When running an equality comparison between records, such as during merges, start to compare rows from most likely unequal columns first. [#63780](https://github.com/ClickHouse/ClickHouse/pull/63780) ([UnamedRus](https://github.com/UnamedRus)).
-* Improve grace hash join performance by re-ranking the right join table by keys. [#72237](https://github.com/ClickHouse/ClickHouse/pull/72237) ([kevinyhzou](https://github.com/KevinyhZou)).
-* Allow `arrayROCAUC` and `arrayAUCPR` to compute partial area of the whole curve, so that its calculation can be parallelized over huge datasets. [#72904](https://github.com/ClickHouse/ClickHouse/pull/72904) ([Emmanuel](https://github.com/emmanuelsdias)).
-* Avoid spawn too many idle threads. [#72920](https://github.com/ClickHouse/ClickHouse/pull/72920) ([Guo Wangyang](https://github.com/guowangy)).
-* Don't list blob storage keys if we only have curly brackets expansion in table function. Closes [#73333](https://github.com/ClickHouse/ClickHouse/issues/73333). [#73518](https://github.com/ClickHouse/ClickHouse/pull/73518) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Short circuit optimization for functions executed over Nullable arguments. [#73820](https://github.com/ClickHouse/ClickHouse/pull/73820) ([李扬](https://github.com/taiyang-li)).
-* Do not apply `maskedExecute` on non-function columns, improve the performance of short circuit execution. [#73965](https://github.com/ClickHouse/ClickHouse/pull/73965) ([lgbo](https://github.com/lgbo-ustc)).
-* Disable the autodetection of headers in input formats for `Kafka`/`NATS`/`RabbitMQ`/`FileLog` to improve performance. [#74006](https://github.com/ClickHouse/ClickHouse/pull/74006) ([Azat Khuzhin](https://github.com/azat)).
-* Execute pipeline with a higher degree of parallelism after aggregation with grouping sets. [#74082](https://github.com/ClickHouse/ClickHouse/pull/74082) ([Nikita Taranov](https://github.com/nickitat)).
-* Reduce critical section in `MergeTreeReadPool`. [#74202](https://github.com/ClickHouse/ClickHouse/pull/74202) ([Guo Wangyang](https://github.com/guowangy)).
-* Parallel replicas performance improvement. Packets deserialization on query initiator, for packets not related to parallel replicas protocol, now always happens in pipeline thread. Before, it could happen in a thread responsible for pipeline scheduling, which could make initiator less responsive and delay pipeline execution. [#74398](https://github.com/ClickHouse/ClickHouse/pull/74398) ([Igor Nikonov](https://github.com/devcrafter)).
-* Improve performance of larger multi requests in Keeper. [#74849](https://github.com/ClickHouse/ClickHouse/pull/74849) ([Antonio Andelic](https://github.com/antonio2368)).
-* Use log wrappers by value and don't allocate them in a heap. [#74034](https://github.com/ClickHouse/ClickHouse/pull/74034) ([Mikhail Artemenko](https://github.com/Michicosun)).
-* Reestablish connection to MySQL and Postgres dictionary replicas in the background, so it wouldn't delay requests to corresponding dictionaries. [#71101](https://github.com/ClickHouse/ClickHouse/pull/71101) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Parallel replicas used historical information about replica availability to improve replica selection but did not update the replica's error count when the connection was unavailable. This PR updates the replica's error count when unavailable. [#72666](https://github.com/ClickHouse/ClickHouse/pull/72666) ([zoomxi](https://github.com/zoomxi)).
-* Added a merge tree setting `materialize_skip_indexes_on_merge` which suppresses the creation of skip indexes during merge. This allows users to control explicitly (via `ALTER TABLE [..] MATERIALIZE INDEX [...]`) when skip indexes are created. This can be useful if skip indexes are expensive to build (e.g. vector similarity indexes). [#74401](https://github.com/ClickHouse/ClickHouse/pull/74401) ([Robert Schulze](https://github.com/rschu1ze)).
-* Optimize keeper requests in Storage(S3/Azure)Queue. [#74410](https://github.com/ClickHouse/ClickHouse/pull/74410) ([Kseniia Sumarokova](https://github.com/kssenii)). [#74538](https://github.com/ClickHouse/ClickHouse/pull/74538) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Use up to `1000` parallel replicas by default. [#74504](https://github.com/ClickHouse/ClickHouse/pull/74504) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Improve HTTP session reuse when reading from s3 disk ([#72401](https://github.com/ClickHouse/ClickHouse/issues/72401)). [#74548](https://github.com/ClickHouse/ClickHouse/pull/74548) ([Julian Maicher](https://github.com/jmaicher)).
-
-#### Improvement
-* Support SETTINGS in a CREATE TABLE query with an implicit ENGINE and support mixing engine and query settings. [#73120](https://github.com/ClickHouse/ClickHouse/pull/73120) ([Raúl Marín](https://github.com/Algunenano)).
-* Enable `use_hive_partitioning` by default. [#71636](https://github.com/ClickHouse/ClickHouse/pull/71636) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Support CAST and ALTER between JSON types with different parameters. [#72303](https://github.com/ClickHouse/ClickHouse/pull/72303) ([Pavel Kruglov](https://github.com/Avogar)).
-* Support equal comparison for values of JSON column. [#72991](https://github.com/ClickHouse/ClickHouse/pull/72991) ([Pavel Kruglov](https://github.com/Avogar)).
-* Improve formatting of identifiers with JSON subcolumns to avoid unnecessary back quotes. [#73085](https://github.com/ClickHouse/ClickHouse/pull/73085) ([Pavel Kruglov](https://github.com/Avogar)).
-* Interactive metrics improvements. Fix metrics from parallel replicas not being fully displayed. Display the metrics in order of the most recent update, then lexicographically by name. Do not display stale metrics. [#71631](https://github.com/ClickHouse/ClickHouse/pull/71631) ([Julia Kartseva](https://github.com/jkartseva)).
-* Make JSON output format pretty by default. Add new setting `output_format_json_pretty_print` to control it and enable it by default. [#72148](https://github.com/ClickHouse/ClickHouse/pull/72148) ([Pavel Kruglov](https://github.com/Avogar)).
-* Allow `LowCardinality(UUID)` by default. This has proven practical among ClickHouse Cloud customers. [#73826](https://github.com/ClickHouse/ClickHouse/pull/73826) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Better message during installation. [#73827](https://github.com/ClickHouse/ClickHouse/pull/73827) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Better message about password reset for ClickHouse Cloud. [#73831](https://github.com/ClickHouse/ClickHouse/pull/73831) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Improve the error message with a File table that cannot perform appends into a file. [#73832](https://github.com/ClickHouse/ClickHouse/pull/73832) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Ask when a user accidentally requests to output binary format (such as Native, Parquet, Avro) in the terminal. This closes [#59524](https://github.com/ClickHouse/ClickHouse/issues/59524). [#73833](https://github.com/ClickHouse/ClickHouse/pull/73833) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Highlight trailing spaces in Pretty and Vertical formats in the terminal for better clarity. This is controlled with the `output_format_pretty_highlight_trailing_spaces` setting. Initial implementation by [Braden Burns](https://github.com/bradenburns) from [#72996](https://github.com/ClickHouse/ClickHouse/issues/72996). Closes [#71590](https://github.com/ClickHouse/ClickHouse/issues/71590). [#73847](https://github.com/ClickHouse/ClickHouse/pull/73847) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* `clickhouse-client` and `clickhouse-local` will autodetect compression of stdin when it is redirected from a file. This closes [#70865](https://github.com/ClickHouse/ClickHouse/issues/70865). [#73848](https://github.com/ClickHouse/ClickHouse/pull/73848) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Cut too long column names in pretty formats by default. This is controlled by the `output_format_pretty_max_column_name_width_cut_to` and `output_format_pretty_max_column_name_width_min_chars_to_cut` settings. This is the continuation of the work of [tanmaydatta](https://github.com/tanmaydatta) in [#66502](https://github.com/ClickHouse/ClickHouse/issues/66502). This closes [#65968](https://github.com/ClickHouse/ClickHouse/issues/65968). [#73851](https://github.com/ClickHouse/ClickHouse/pull/73851) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Make `Pretty` formats prettier: squash blocks if not much time passed since the output of the previous block. This is controlled by new settings `output_format_pretty_squash_consecutive_ms` (50 ms by default) and `output_format_pretty_squash_max_wait_ms` (1000 ms by default). Continuation of [#49537](https://github.com/ClickHouse/ClickHouse/issues/49537). This closes [#49153](https://github.com/ClickHouse/ClickHouse/issues/49153). [#73852](https://github.com/ClickHouse/ClickHouse/pull/73852) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add a metric on the number of currently merging source parts. This closes [#70809](https://github.com/ClickHouse/ClickHouse/issues/70809). [#73868](https://github.com/ClickHouse/ClickHouse/pull/73868) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Highlight columns in the `Vertical` format if the output is to a terminal. This can be disabled with the `output_format_pretty_color` setting. [#73898](https://github.com/ClickHouse/ClickHouse/pull/73898) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Enhanced the MySQL compatibility to a level that now, `mysqlsh` (a rich MySQL CLI from Oracle) can connect to ClickHouse. This is needed to facilitate testing. [#73912](https://github.com/ClickHouse/ClickHouse/pull/73912) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Pretty formats can render multi-line fields inside a table cell, which improves readability. This is enabled by default and can be controlled by the setting `output_format_pretty_multiline_fields`. Continuation of the work by [Volodyachan](https://github.com/Volodyachan) in [#64094](https://github.com/ClickHouse/ClickHouse/issues/64094). This closes [#56912](https://github.com/ClickHouse/ClickHouse/issues/56912). [#74032](https://github.com/ClickHouse/ClickHouse/pull/74032) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Expose X-ClickHouse HTTP headers to JavaScript in the browser. It makes writing applications more convenient. [#74180](https://github.com/ClickHouse/ClickHouse/pull/74180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* The `JSONEachRowWithProgress` format will include events with metadata, as well as totals and extremes. It also includes `rows_before_limit_at_least` and `rows_before_aggregation`. The format prints the exception properly if it arrives after partial results. The progress now includes elapsed nanoseconds. One final progress event is emitted at the end. The progress during query runtime will be printed no more frequently than the value of the `interactive_delay` setting. [#74181](https://github.com/ClickHouse/ClickHouse/pull/74181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Hourglass will rotate smoothly in Play UI. [#74182](https://github.com/ClickHouse/ClickHouse/pull/74182) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Even if the HTTP response is compressed, send packets as soon as they arrive. This allows the browser to receive progress packets and compressed data. [#74201](https://github.com/ClickHouse/ClickHouse/pull/74201) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* If the number of output records is larger than N = `output_format_pretty_max_rows`, instead of displaying only the first N rows, we will cut the output table in the middle, displaying N/2 first rows and N/2 last rows. Continuation of [#64200](https://github.com/ClickHouse/ClickHouse/issues/64200). This closes [#59502](https://github.com/ClickHouse/ClickHouse/issues/59502). [#73929](https://github.com/ClickHouse/ClickHouse/pull/73929) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Allow more general join planning algorithm when hash join algorithm is enabled. [#71926](https://github.com/ClickHouse/ClickHouse/pull/71926) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Allow to create bloom_filter index on columns with data type `DateTime64`. [#66416](https://github.com/ClickHouse/ClickHouse/pull/66416) ([Yutong Xiao](https://github.com/YutSean)).
-* When `min_age_to_force_merge_seconds` and `min_age_to_force_merge_on_partition_only` are both enabled, the part merging will ignore the max bytes limit. [#73656](https://github.com/ClickHouse/ClickHouse/pull/73656) ([Kai Zhu](https://github.com/nauu)).
-* Added HTTP headers to OpenTelemetry span logs table for enhanced traceability. [#70516](https://github.com/ClickHouse/ClickHouse/pull/70516) ([jonymohajanGmail](https://github.com/jonymohajanGmail)).
-* Support writing `orc` file by custom time zone, not always by the `GMT` time zone. [#70615](https://github.com/ClickHouse/ClickHouse/pull/70615) ([kevinyhzou](https://github.com/KevinyhZou)).
-* Respect IO scheduling settings when writing backups across clouds. [#71093](https://github.com/ClickHouse/ClickHouse/pull/71093) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
-* Add `metric` column alias `name` to `system.asynchronous_metrics`. [#71164](https://github.com/ClickHouse/ClickHouse/pull/71164) ([megao](https://github.com/jetgm)).
-* Historically for some reason, the query `ALTER TABLE MOVE PARTITION TO TABLE` checked `SELECT` and `ALTER DELETE` rights instead of dedicated `ALTER_MOVE_PARTITION`. This PR makes use of this access type. For compatibility, this permission is also will be granted implicitly if `SELECT` and `ALTER DELETE` are granted, but this behavior will be removed in future releases. Closes [#16403](https://github.com/ClickHouse/ClickHouse/issues/16403). [#71632](https://github.com/ClickHouse/ClickHouse/pull/71632) ([pufit](https://github.com/pufit)).
-* Throw an exception when trying to materialize a column in the sort key instead of allowing it to break the sort order. [#71891](https://github.com/ClickHouse/ClickHouse/pull/71891) ([Peter Nguyen](https://github.com/petern48)).
-* Hide secrets in `EXPLAIN QUERY TREE`. [#72025](https://github.com/ClickHouse/ClickHouse/pull/72025) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Support parquet integer logical types in the "native" reader. [#72105](https://github.com/ClickHouse/ClickHouse/pull/72105) ([Arthur Passos](https://github.com/arthurpassos)).
-* Interactively request credentials in the browser if the default user requires a password. In previous versions, the server returned HTTP 403; now, it returns HTTP 401. [#72198](https://github.com/ClickHouse/ClickHouse/pull/72198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Convert access types `CREATE_USER`, `ALTER_USER`, `DROP_USER`, `CREATE_ROLE`, `ALTER_ROLE`, `DROP_ROLE` from global to parameterized. That means users can now grant access management grants more precise:. [#72246](https://github.com/ClickHouse/ClickHouse/pull/72246) ([pufit](https://github.com/pufit)).
-* Add the `latest_fail_error_code_name` column to `system.mutations`. We need this column to introduce a new metric on stuck mutations and use it to build graphs of the errors encountered in the cloud as well as, optionally, adding a new less-noisy alert. [#72398](https://github.com/ClickHouse/ClickHouse/pull/72398) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Reduce amount of allocation in the `ATTACH PARTITION` query. [#72583](https://github.com/ClickHouse/ClickHouse/pull/72583) ([Konstantin Morozov](https://github.com/k-morozov)).
-* Make `max_bytes_before_external_sort` limit depends on total query memory consumption (previously it was number of bytes in the sorting block for one sorting thread, now it has the same meaning as `max_bytes_before_external_group_by` - it is total limit for the whole query memory for all threads). Also one more setting added to control on disk block size - `min_external_sort_block_bytes`. [#72598](https://github.com/ClickHouse/ClickHouse/pull/72598) ([Azat Khuzhin](https://github.com/azat)).
-* Ignore memory restrictions by trace collector. [#72606](https://github.com/ClickHouse/ClickHouse/pull/72606) ([Azat Khuzhin](https://github.com/azat)).
-* Add server settings `dictionaries_lazy_load` and `wait_dictionaries_load_at_startup` to `system.server_settings`. [#72664](https://github.com/ClickHouse/ClickHouse/pull/72664) ([Christoph Wurm](https://github.com/cwurm)).
-* Adds setting `max_backup_bandwidth` to the list of settings that can be specified as part of `BACKUP`/`RESTORE` queries. [#72665](https://github.com/ClickHouse/ClickHouse/pull/72665) ([Christoph Wurm](https://github.com/cwurm)).
-* Reducing the log level for appearing replicated parts in the ReplicatedMergeTree engine to help minimize the volume of logs generated in a replicated cluster. [#72876](https://github.com/ClickHouse/ClickHouse/pull/72876) ([mor-akamai](https://github.com/morkalfon)).
-* Improve extraction of common expression in disjunctions. Allow simplifying the resulting filter expression even if there's no common subexpression for all the disjuncts. Continuation of [#71537](https://github.com/ClickHouse/ClickHouse/issues/71537). [#73271](https://github.com/ClickHouse/ClickHouse/pull/73271) ([Dmitry Novik](https://github.com/novikd)).
-* In Storage `S3Queue`/`AzureQueue` allow to add settings where table was created without settings. [#73283](https://github.com/ClickHouse/ClickHouse/pull/73283) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Introduce a setting `least_greatest_legacy_null_behavior` (default: `false`) which controls if functions `least` and `greatest` handle `NULL` arguments by unconditionally returning `NULL` (if `true`) or by ignoring them (if `false`). [#73344](https://github.com/ClickHouse/ClickHouse/pull/73344) ([Robert Schulze](https://github.com/rschu1ze)).
-* Use Keeper multi requests in the cleanup thread of ObjectStorageQueueMetadata. [#73357](https://github.com/ClickHouse/ClickHouse/pull/73357) ([Antonio Andelic](https://github.com/antonio2368)).
-* When ClickHouse runs under a cgroup we will still collect system-wide asynchronous metrics related to system load, process scheduling, memory etc. They might provide useful signals when ClickHouse is the only process on the host with high resource consumption. [#73369](https://github.com/ClickHouse/ClickHouse/pull/73369) ([Nikita Taranov](https://github.com/nickitat)).
-* In storage `S3Queue` allow to transfer old ordered tables created before 24.6 to new structure with buckets. [#73467](https://github.com/ClickHouse/ClickHouse/pull/73467) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Add `system.azure_queue` similar to existing `system.s3queue`. [#73477](https://github.com/ClickHouse/ClickHouse/pull/73477) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Function `parseDateTime64` (and its variants) now produces correct results for input dates before 1970 / after 2106. Example: `SELECT parseDateTime64InJodaSyntax('2200-01-01 00:00:00.000', 'yyyy-MM-dd HH:mm:ss.SSS')`. [#73594](https://github.com/ClickHouse/ClickHouse/pull/73594) ([zhanglistar](https://github.com/zhanglistar)).
-* Address some `clickhouse-disks` usability issues addressed by users. Closes [#67136](https://github.com/ClickHouse/ClickHouse/issues/67136). [#73616](https://github.com/ClickHouse/ClickHouse/pull/73616) ([Daniil Ivanik](https://github.com/divanik)).
-* Allow to alter commit settings in storage S3(Azure)Queue. (Commit settings are: `max_processed_files_before_commit`, `max_processed_rows_before_commit`, `max_processed_bytes_before_commit`, `max_processing_time_sec_before_commit`). [#73635](https://github.com/ClickHouse/ClickHouse/pull/73635) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* In storage S3(Azure)Queue aggregate progress between sources to compare with commit limit settings. [#73641](https://github.com/ClickHouse/ClickHouse/pull/73641) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Support core settings in `BACKUP`/`RESTORE` query. [#73650](https://github.com/ClickHouse/ClickHouse/pull/73650) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Take into account the `output_format_compression_level` on Parquet output. [#73651](https://github.com/ClickHouse/ClickHouse/pull/73651) ([Arthur Passos](https://github.com/arthurpassos)).
-* Adds reading Apache Arrow's `fixed_size_list` as an `Array` instead of treating it as an unsupported type. [#73654](https://github.com/ClickHouse/ClickHouse/pull/73654) ([Julian Meyers](https://github.com/J-Meyers)).
-* Add two backup engines: `Memory` (keeps backups inside the current user session), and `Null` (don't keep backups anywhere), which is for testing. [#73690](https://github.com/ClickHouse/ClickHouse/pull/73690) ([Vitaly Baranov](https://github.com/vitlibar)).
-* `concurrent_threads_soft_limit_num` and `concurrent_threads_soft_limit_num_ratio_to_cores` could be changed w/o restart of a server. [#73713](https://github.com/ClickHouse/ClickHouse/pull/73713) ([Sergei Trifonov](https://github.com/serxa)).
-* Add support for extended numeric types (`Decimal`, big integers) in `formatReadable` functions. [#73765](https://github.com/ClickHouse/ClickHouse/pull/73765) ([Raúl Marín](https://github.com/Algunenano)).
-* Support TLS for Postgres wire protocol compatibility. [#73812](https://github.com/ClickHouse/ClickHouse/pull/73812) ([scanhex12](https://github.com/scanhex12)).
-* The function `isIPv4String` returned true if the correct IPv4 address was followed by a zero byte, while it should return false in this case. Continuation of [#65387](https://github.com/ClickHouse/ClickHouse/issues/65387). [#73946](https://github.com/ClickHouse/ClickHouse/pull/73946) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Make the error code in the MySQL wire protocol compatible with MySQL. Continuation of [#56831](https://github.com/ClickHouse/ClickHouse/issues/56831). Closes [#50957](https://github.com/ClickHouse/ClickHouse/issues/50957). [#73948](https://github.com/ClickHouse/ClickHouse/pull/73948) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
-* Add setting `validate_enum_literals_in_opearators` to validate enum literals in operators like `IN`, `NOT IN` against the enum type and throw an exception if the literal is not a valid enum value. [#73985](https://github.com/ClickHouse/ClickHouse/pull/73985) ([Vladimir Cherkasov](https://github.com/vdimir)).
-* In Storage `S3(Azure)Queue` commit all files (in a single butch defined by commit settings) in a single keeper transaction. [#73991](https://github.com/ClickHouse/ClickHouse/pull/73991) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Disable header detection for executable UDFs and dictionaries (could lead to Function 'X': wrong result, expected Y row(s), actual Y-1). [#73992](https://github.com/ClickHouse/ClickHouse/pull/73992) ([Azat Khuzhin](https://github.com/azat)).
-* Add the `distributed` option for `EXPLAIN PLAN.` Now, `EXPLAIN distributed=1 ... ` appends remote plan to `ReadFromParallelRemote*` steps. [#73994](https://github.com/ClickHouse/ClickHouse/pull/73994) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Use correct return type for not/xor with Dynamic arguments. [#74013](https://github.com/ClickHouse/ClickHouse/pull/74013) ([Pavel Kruglov](https://github.com/Avogar)).
-* Allow changing `add_implicit_sign_column_constraint_for_collapsing_engine` after table creation. [#74014](https://github.com/ClickHouse/ClickHouse/pull/74014) ([Christoph Wurm](https://github.com/cwurm)).
-* Support subcolumns in materialized view select query. [#74030](https://github.com/ClickHouse/ClickHouse/pull/74030) ([Pavel Kruglov](https://github.com/Avogar)).
-* There are now three simple ways to set a custom prompt in `clickhouse-client`: 1. via command-line parameter `--prompt`, 2. in the configuration file, via settings `[...]`, and 3. also in the configuration file, via per-connection settings `[...]`. [#74168](https://github.com/ClickHouse/ClickHouse/pull/74168) ([Christoph Wurm](https://github.com/cwurm)).
-* Autodetect secure connection based on connecting to port 9440 in ClickHouse Client. [#74212](https://github.com/ClickHouse/ClickHouse/pull/74212) ([Christoph Wurm](https://github.com/cwurm)).
-* Authenticate users with username only for http_handlers (previously it requires user to put the password as well). [#74221](https://github.com/ClickHouse/ClickHouse/pull/74221) ([Azat Khuzhin](https://github.com/azat)).
-* Support for the alternative query languages PRQL and KQL was marked experimental. To use them, specify settings `allow_experimental_prql_dialect = 1` and `allow_experimental_kusto_dialect = 1`. [#74224](https://github.com/ClickHouse/ClickHouse/pull/74224) ([Robert Schulze](https://github.com/rschu1ze)).
-* Support returning the default Enum type in more aggregate functions. [#74272](https://github.com/ClickHouse/ClickHouse/pull/74272) ([Raúl Marín](https://github.com/Algunenano)).
-* In `OPTIMIZE TABLE`, it is now possible to specify keyword `FORCE` as an alternative to existing keyword `FINAL`. [#74342](https://github.com/ClickHouse/ClickHouse/pull/74342) ([Robert Schulze](https://github.com/rschu1ze)).
-* Add the `IsServerShuttingDown` metric, which is needed to trigger an alert when the server shutdown takes too much time. [#74429](https://github.com/ClickHouse/ClickHouse/pull/74429) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
-* Added Iceberg tables names to EXPLAIN. [#74485](https://github.com/ClickHouse/ClickHouse/pull/74485) ([alekseev-maksim](https://github.com/alekseev-maksim)).
-* Provide a better error message when using RECURSIVE CTE with the old analyzer. [#74523](https://github.com/ClickHouse/ClickHouse/pull/74523) ([Raúl Marín](https://github.com/Algunenano)).
-* Show extended error messages in `system.errors`. [#74574](https://github.com/ClickHouse/ClickHouse/pull/74574) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Allow to use password for client communication with clickhouse-keeper. This feature is not very useful if you specify proper SSL configuration for server and client, but still can be useful for some cases. Password cannot be longer than 16 characters. It's not connected with Keeper Auth model. [#74673](https://github.com/ClickHouse/ClickHouse/pull/74673) ([alesapin](https://github.com/alesapin)).
-* Add error code for config reloader. [#74746](https://github.com/ClickHouse/ClickHouse/pull/74746) ([Garrett Thomas](https://github.com/garrettthomaskth)).
-* Added support for IPv6 addresses in MySQL and PostgreSQL table functions and engines. [#74796](https://github.com/ClickHouse/ClickHouse/pull/74796) ([Mikhail Koviazin](https://github.com/mkmkme)).
-* Implement short circuit optimization for `divideDecimal`. Fixes [#74280](https://github.com/ClickHouse/ClickHouse/issues/74280). [#74843](https://github.com/ClickHouse/ClickHouse/pull/74843) ([Kevin Mingtarja](https://github.com/kevinmingtarja)).
-* Now users can be specified inside the startup scripts. [#74894](https://github.com/ClickHouse/ClickHouse/pull/74894) ([pufit](https://github.com/pufit)).
-* Add support for Azure SAS Tokens. [#72959](https://github.com/ClickHouse/ClickHouse/pull/72959) ([Azat Khuzhin](https://github.com/azat)).
-
-#### Bug Fix (user-visible misbehavior in an official stable release)
-* Set parquet compression level only if compression codec supports it. [#74659](https://github.com/ClickHouse/ClickHouse/pull/74659) ([Arthur Passos](https://github.com/arthurpassos)).
-* Fixed a regression that using collation locales with modifiers throws an error. As an example, `SELECT arrayJoin(['kk 50', 'KK 01', ' KK 2', ' KK 3', 'kk 1', 'x9y99', 'x9y100']) item ORDER BY item ASC COLLATE 'tr-u-kn-true-ka-shifted` now works. [#73544](https://github.com/ClickHouse/ClickHouse/pull/73544) ([Robert Schulze](https://github.com/rschu1ze)).
-* Fix cannot create SEQUENTIAL node with keeper-client. [#64177](https://github.com/ClickHouse/ClickHouse/pull/64177) ([Duc Canh Le](https://github.com/canhld94)).
-* Fix incorrect character counting in the position functions. [#71003](https://github.com/ClickHouse/ClickHouse/pull/71003) ([思维](https://github.com/heymind)).
-* `RESTORE` operations for access entities required more permission than necessary because of unhandled partial revokes. This PR fixes the issue. Closes [#71853](https://github.com/ClickHouse/ClickHouse/issues/71853). [#71958](https://github.com/ClickHouse/ClickHouse/pull/71958) ([pufit](https://github.com/pufit)).
-* Avoid pause after `ALTER TABLE REPLACE/MOVE PARTITION FROM/TO TABLE`. Retrieve correct settings for background task scheduling. [#72024](https://github.com/ClickHouse/ClickHouse/pull/72024) ([Aleksei Filatov](https://github.com/aalexfvk)).
-* Fix handling of empty tuples in some input and output formats (e.g. Parquet, Arrow). [#72616](https://github.com/ClickHouse/ClickHouse/pull/72616) ([Michael Kolupaev](https://github.com/al13n321)).
-* Column-level GRANT SELECT/INSERT statements on wildcard databases/tables now throw an error. [#72646](https://github.com/ClickHouse/ClickHouse/pull/72646) ([Johann Gan](https://github.com/johanngan)).
-* Fix the situation when a user can't run `REVOKE ALL ON *.*` because of implicit grants in the target access entity. [#72872](https://github.com/ClickHouse/ClickHouse/pull/72872) ([pufit](https://github.com/pufit)).
-* Fix positive timezone formatting of formatDateTime scalar function. [#73091](https://github.com/ClickHouse/ClickHouse/pull/73091) ([ollidraese](https://github.com/ollidraese)).
-* Fix to correctly reflect source port when connection made through PROXYv1 and `auth_use_forwarded_address` is set - previously proxy port was incorrectly used. Add `currentQueryID()` function. [#73095](https://github.com/ClickHouse/ClickHouse/pull/73095) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Propagate format settings to NativeWriter in TCPHandler, so settings like `output_format_native_write_json_as_string` are applied correctly. [#73179](https://github.com/ClickHouse/ClickHouse/pull/73179) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix a crash in StorageObjectStorageQueue. [#73274](https://github.com/ClickHouse/ClickHouse/pull/73274) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix rare crash in refreshable materialized view during server shutdown. [#73323](https://github.com/ClickHouse/ClickHouse/pull/73323) ([Michael Kolupaev](https://github.com/al13n321)).
-* The `%f` placeholder of function `formatDateTime` now unconditionally generates six (sub-second) digits. This makes the behavior compatible with MySQL `DATE_FORMAT` function. The previous behavior can be restored using setting `formatdatetime_f_prints_scale_number_of_digits = 1`. [#73324](https://github.com/ClickHouse/ClickHouse/pull/73324) ([ollidraese](https://github.com/ollidraese)).
-* Fixed filtering by `_etag` column while reading from `s3` storage and table function. [#73353](https://github.com/ClickHouse/ClickHouse/pull/73353) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix `Not-ready Set is passed as the second argument for function 'in'` error when `IN (subquery)` is used in `JOIN ON` expression, with the old analyzer. [#73382](https://github.com/ClickHouse/ClickHouse/pull/73382) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fix preparing for squashin for Dynamic and JSON columns. Previously in some cases new types could be inserted into shared variant/shared data even when the limit on types/paths is not reached. [#73388](https://github.com/ClickHouse/ClickHouse/pull/73388) ([Pavel Kruglov](https://github.com/Avogar)).
-* Check for corrupted sizes during types binary decoding to avoid too big allocations. [#73390](https://github.com/ClickHouse/ClickHouse/pull/73390) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fixed a logical error when reading from single-replica cluster with parallel replicas enabled. [#73403](https://github.com/ClickHouse/ClickHouse/pull/73403) ([Michael Kolupaev](https://github.com/al13n321)).
-* Fix ObjectStorageQueue with ZooKeeper and older Keeper. [#73420](https://github.com/ClickHouse/ClickHouse/pull/73420) ([Antonio Andelic](https://github.com/antonio2368)).
-* Implements fix, needed to enable hive partitioning by default. [#73479](https://github.com/ClickHouse/ClickHouse/pull/73479) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fix data race when creating vector similarity index. [#73517](https://github.com/ClickHouse/ClickHouse/pull/73517) ([Antonio Andelic](https://github.com/antonio2368)).
-* Fixes segfault when the source of the dictionary contains a function with wrong data. [#73535](https://github.com/ClickHouse/ClickHouse/pull/73535) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fix retries on failed insert in storage S3(Azure)Queue. Closes [#70951](https://github.com/ClickHouse/ClickHouse/issues/70951). [#73546](https://github.com/ClickHouse/ClickHouse/pull/73546) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fixed error in function `tupleElement` which may appear in some cases for tuples with `LowCardinality` elements and enabled setting `optimize_functions_to_subcolumns`. [#73548](https://github.com/ClickHouse/ClickHouse/pull/73548) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix parsing enum glob followed by range one. Fixes [#73473](https://github.com/ClickHouse/ClickHouse/issues/73473). [#73569](https://github.com/ClickHouse/ClickHouse/pull/73569) ([Konstantin Bogdanov](https://github.com/thevar1able)).
-* Fixed parallel_replicas_for_non_replicated_merge_tree being ignored in subqueries for non-replicated tables. [#73584](https://github.com/ClickHouse/ClickHouse/pull/73584) ([Igor Nikonov](https://github.com/devcrafter)).
-* Fix for std::logical_error thrown when task cannot be scheduled. Found in stress tests. [#73629](https://github.com/ClickHouse/ClickHouse/pull/73629) ([Alexander Gololobov](https://github.com/davenger)).
-* Do not interpret queries in `EXPLAIN SYNTAX` to avoid logical errors with incorrect processing stage for distributed queries. Fixes [#65205](https://github.com/ClickHouse/ClickHouse/issues/65205). [#73634](https://github.com/ClickHouse/ClickHouse/pull/73634) ([Dmitry Novik](https://github.com/novikd)).
-* Fix possible data inconsistency in Dynamic column. Fixes possible logical error `Nested columns sizes are inconsistent with local_discriminators column size`. [#73644](https://github.com/ClickHouse/ClickHouse/pull/73644) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fixed `NOT_FOUND_COLUMN_IN_BLOCK` in queries with `FINAL` and `SAMPLE`. Fixed incorrect result in selects with `FINAL` from `CollapsingMergeTree` and enabled optimizations of `FINAL` . [#73682](https://github.com/ClickHouse/ClickHouse/pull/73682) ([Anton Popov](https://github.com/CurtizJ)).
-* Fix crash in LIMIT BY COLUMNS. [#73686](https://github.com/ClickHouse/ClickHouse/pull/73686) ([Raúl Marín](https://github.com/Algunenano)).
-* Fix the bug when the normal projection is forced to use, and query is exactly the same as the projection defined, but the projection is not selected and thus error is prompted. [#73700](https://github.com/ClickHouse/ClickHouse/pull/73700) ([Shichao Jin](https://github.com/jsc0218)).
-* Fix deserialization of Dynamic/Object structure. It could lead to CANNOT_READ_ALL_DATA exceptions. [#73767](https://github.com/ClickHouse/ClickHouse/pull/73767) ([Pavel Kruglov](https://github.com/Avogar)).
-* Skip `metadata_version.txt` in while restoring parts from a backup. [#73768](https://github.com/ClickHouse/ClickHouse/pull/73768) ([Vitaly Baranov](https://github.com/vitlibar)).
-* Fix segmentation fault when Casting to Enum with LIKE. [#73775](https://github.com/ClickHouse/ClickHouse/pull/73775) ([zhanglistar](https://github.com/zhanglistar)).
-* Fix for S3 Express bucket not working as disk. [#73777](https://github.com/ClickHouse/ClickHouse/pull/73777) ([Sameer Tamsekar](https://github.com/stamsekar)).
-* Allow merging of rows with invalid sign column values in CollapsingMergeTree tables. [#73864](https://github.com/ClickHouse/ClickHouse/pull/73864) ([Christoph Wurm](https://github.com/cwurm)).
-* Fix getting error when querying ddl with offline replica. [#73876](https://github.com/ClickHouse/ClickHouse/pull/73876) ([Tuan Pham Anh](https://github.com/tuanpach)).
-* Fixes occasional failure to compare `map()` types due to possibility to create `Map` lacking explicit naming ('keys','values') of its nested tuple. [#73878](https://github.com/ClickHouse/ClickHouse/pull/73878) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Ignore window functions during GROUP BY ALL clause resolution. Fix [#73501](https://github.com/ClickHouse/ClickHouse/issues/73501). [#73916](https://github.com/ClickHouse/ClickHouse/pull/73916) ([Dmitry Novik](https://github.com/novikd)).
-* Fix implicit privileges (worked as wildcard before). [#73932](https://github.com/ClickHouse/ClickHouse/pull/73932) ([Azat Khuzhin](https://github.com/azat)).
-* Fix high memory usage during nested Maps creation. [#73982](https://github.com/ClickHouse/ClickHouse/pull/73982) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix parsing nested JSON with empty keys. [#73993](https://github.com/ClickHouse/ClickHouse/pull/73993) ([Pavel Kruglov](https://github.com/Avogar)).
-* Fix: alias can be not added to the projection if it is referenced by another alias and selected in inverse order. [#74033](https://github.com/ClickHouse/ClickHouse/pull/74033) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Ignore object not found errors for Azure during plain_rewritable disk initialization. [#74059](https://github.com/ClickHouse/ClickHouse/pull/74059) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fix behaviour of `any` and `anyLast` with enum types and empty table. [#74061](https://github.com/ClickHouse/ClickHouse/pull/74061) ([Joanna Hulboj](https://github.com/jh0x)).
-* Fixes case when the user specifies keyword arguments in the kafka table engine. [#74064](https://github.com/ClickHouse/ClickHouse/pull/74064) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fix altering Storage `S3Queue` settings with "s3queue_" prefix to without and vice versa. [#74075](https://github.com/ClickHouse/ClickHouse/pull/74075) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Add a setting `allow_push_predicate_ast_for_distributed_subqueries`. This adds AST-based predicate push-down for distributed queries with the analyzer. This is a temporary solution that we use until distributed queries with query plan serialization are supported. Closes [#66878](https://github.com/ClickHouse/ClickHouse/issues/66878) [#69472](https://github.com/ClickHouse/ClickHouse/issues/69472) [#65638](https://github.com/ClickHouse/ClickHouse/issues/65638) [#68030](https://github.com/ClickHouse/ClickHouse/issues/68030) [#73718](https://github.com/ClickHouse/ClickHouse/issues/73718). [#74085](https://github.com/ClickHouse/ClickHouse/pull/74085) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
-* Fixes issue when after [#73095](https://github.com/ClickHouse/ClickHouse/issues/73095) port can be present in the forwarded_for field, which leads to inability to resolve host name with port included. [#74116](https://github.com/ClickHouse/ClickHouse/pull/74116) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
-* Fixed incorrect formatting of `ALTER TABLE (DROP STATISTICS ...) (DROP STATISTICS ...)`. [#74126](https://github.com/ClickHouse/ClickHouse/pull/74126) ([Han Fei](https://github.com/hanfei1991)).
-* Fix for issue [#66112](https://github.com/ClickHouse/ClickHouse/issues/66112). [#74128](https://github.com/ClickHouse/ClickHouse/pull/74128) ([Anton Ivashkin](https://github.com/ianton-ru)).
-* It is no longer possible to use `Loop` as a table engine in `CREATE TABLE`. This combination was previously causing segfaults. [#74137](https://github.com/ClickHouse/ClickHouse/pull/74137) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
-* Fix security issue to prevent SQL injection in postgresql and sqlite table functions. [#74144](https://github.com/ClickHouse/ClickHouse/pull/74144) ([Pablo Marcos](https://github.com/pamarcos)).
-* Fix crash when reading a subcolumn from the compressed Memory engine table. Fixes [#74009](https://github.com/ClickHouse/ClickHouse/issues/74009). [#74161](https://github.com/ClickHouse/ClickHouse/pull/74161) ([Nikita Taranov](https://github.com/nickitat)).
-* Fixed an infinite loop occurring with queries to the system.detached_tables. [#74190](https://github.com/ClickHouse/ClickHouse/pull/74190) ([Konstantin Morozov](https://github.com/k-morozov)).
-* Fix logical error in s3queue during setting file as failed. [#74216](https://github.com/ClickHouse/ClickHouse/pull/74216) ([Kseniia Sumarokova](https://github.com/kssenii)).
-* Fix native copy settings (`allow_s3_native_copy`/`allow_azure_native_copy`) for `RESTORE` from base backup. [#74286](https://github.com/ClickHouse/ClickHouse/pull/74286) ([Azat Khuzhin](https://github.com/azat)).
-* Fixed the issue when the number of detached tables in the database is a multiple of max_block_size. [#74289](https://github.com/ClickHouse/ClickHouse/pull/74289) ([Konstantin Morozov](https://github.com/k-morozov)).
-* Fix copying via ObjectStorage (i.e. S3) when source and destination credentials differs. [#74331](https://github.com/ClickHouse/ClickHouse/pull/74331) ([Azat Khuzhin](https://github.com/azat)).
-* Fix detection of "use the Rewrite method in the JSON API" for native copy on GCS. [#74338](https://github.com/ClickHouse/ClickHouse/pull/74338) ([Azat Khuzhin](https://github.com/azat)).
-* Fix incorrect calculation of `BackgroundMergesAndMutationsPoolSize` (it was x2 from real value). [#74509](https://github.com/ClickHouse/ClickHouse/pull/74509) ([alesapin](https://github.com/alesapin)).
-* Fix the bug of leaking keeper watches when enable Cluster Discovery. [#74521](https://github.com/ClickHouse/ClickHouse/pull/74521) ([RinChanNOW](https://github.com/RinChanNOWWW)).
-* Fix mem alignment issue reported by UBSan [#74512](https://github.com/ClickHouse/ClickHouse/issues/74512). [#74534](https://github.com/ClickHouse/ClickHouse/pull/74534) ([Arthur Passos](https://github.com/arthurpassos)).
-* Fix KeeperMap concurrent cleanup during table creation. [#74568](https://github.com/ClickHouse/ClickHouse/pull/74568) ([Antonio Andelic](https://github.com/antonio2368)).
-* Do not remove unused projection columns in subqueries in the presence of `EXCEPT` or `INTERSECT` to preserve the correct query result. Fixes [#73930](https://github.com/ClickHouse/ClickHouse/issues/73930). Fixes [#66465](https://github.com/ClickHouse/ClickHouse/issues/66465). [#74577](https://github.com/ClickHouse/ClickHouse/pull/74577) ([Dmitry Novik](https://github.com/novikd)).
-* Fixed `INSERT SELECT` queries between tables with `Tuple` columns and enabled sparse serialization. [#74698](https://github.com/ClickHouse/ClickHouse/pull/74698) ([Anton Popov](https://github.com/CurtizJ)).
-* Function `right` works incorrectly for const negative offset. [#74701](https://github.com/ClickHouse/ClickHouse/pull/74701) ([Daniil Ivanik](https://github.com/divanik)).
-* Fix insertion of gzip-ed data sometimes fails due to flawed decompression on client side. [#74707](https://github.com/ClickHouse/ClickHouse/pull/74707) ([siyuan](https://github.com/linkwk7)).
-* Partial revokes with wildcard grants could remove more privileges than expected. Closes [#74263](https://github.com/ClickHouse/ClickHouse/issues/74263). [#74751](https://github.com/ClickHouse/ClickHouse/pull/74751) ([pufit](https://github.com/pufit)).
-* Keeper fix: fix reading log entries from disk. [#74785](https://github.com/ClickHouse/ClickHouse/pull/74785) ([Antonio Andelic](https://github.com/antonio2368)).
-* Fixed checking grants for SYSTEM REFRESH/START/STOP VIEW, now it's not required to have this grant on `*.*` to execute a query for a specific view, only grant for this view are required. [#74789](https://github.com/ClickHouse/ClickHouse/pull/74789) ([Alexander Tokmakov](https://github.com/tavplubix)).
-* The `hasColumnInTable` function doesn't account for alias columns. Fix it to also work for alias columns. [#74841](https://github.com/ClickHouse/ClickHouse/pull/74841) ([Bharat Nallan](https://github.com/bharatnc)).
-* Fix FILE_DOESNT_EXIST error occurring during data parts merge for a table with an empty column in Azure Blob Storage. [#74892](https://github.com/ClickHouse/ClickHouse/pull/74892) ([Julia Kartseva](https://github.com/jkartseva)).
-* Fix projection column name when joining temporary tables, close [#68872](https://github.com/ClickHouse/ClickHouse/issues/68872). [#74897](https://github.com/ClickHouse/ClickHouse/pull/74897) ([Vladimir Cherkasov](https://github.com/vdimir)).
-
-#### Build/Testing/Packaging Improvement
-* The universal installation script will propose installation even on macOS. [#74339](https://github.com/ClickHouse/ClickHouse/pull/74339) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
diff --git a/knowledgebase/marketplace.mdx b/knowledgebase/marketplace.mdx
deleted file mode 100644
index 169063817af..00000000000
--- a/knowledgebase/marketplace.mdx
+++ /dev/null
@@ -1,18 +0,0 @@
----
-title: Marketplace
-description: "Learn how to use CSP marketplaces to subscribe to ClickHouse Cloud."
-date: 2022-12-06
-tags: ['Managing Cloud']
-keywords: ['CSP Marketplaces']
----
-
-import Content from '@site/docs/cloud/manage/billing/marketplace/index.md';
-
-{frontMatter.description}
-{/* truncate */}
-
-## Marketplace {#marketplace}
-
-Details on using CSP marketplaces to subscribe to ClickHouse Cloud.
-
-
diff --git a/docs/faq/troubleshooting.md b/knowledgebase/unable-to-access-cloud-service.mdx
similarity index 62%
rename from docs/faq/troubleshooting.md
rename to knowledgebase/unable-to-access-cloud-service.mdx
index 4b1221d7dea..46fc174e860 100644
--- a/docs/faq/troubleshooting.md
+++ b/knowledgebase/unable-to-access-cloud-service.mdx
@@ -1,12 +1,15 @@
---
-title: 'Troubleshooting'
-slug: /faq/troubleshooting
-description: 'How to troubleshoot common ClickHouse Cloud error messages.'
+title: Tips and tricks on optimizing basic data types in ClickHouse
+description: "Tips and tricks on optimizing basic data types in ClickHouse"
+date: 2024-07-02
+tags: ['Errors and Exceptions']
+keywords: ['accessing cloud service']
---
-## ClickHouse Cloud troubleshooting {#clickhouse-cloud-troubleshooting}
+{frontMatter.description}
+{/* truncate */}
-### Unable to access a ClickHouse Cloud service {#unable-to-access-a-clickhouse-cloud-service}
+## I am unable to access a ClickHouse Cloud service {#i-am-unable-to-access-a-clickhouse-cloud-service}
If you are seeing an error message like one of these, your IP Access List may be denying access:
diff --git a/docs/cloud/migrate/upload-a-csv-file.md b/knowledgebase/upload-a-file.mdx
similarity index 91%
rename from docs/cloud/migrate/upload-a-csv-file.md
rename to knowledgebase/upload-a-file.mdx
index 71347b0f55e..f968d125d1c 100644
--- a/docs/cloud/migrate/upload-a-csv-file.md
+++ b/knowledgebase/upload-a-file.mdx
@@ -1,7 +1,9 @@
---
-title: 'Uploading files'
-slug: /cloud/migrate/upload-a-csv-file
-description: 'Learn how to upload files to Cloud'
+title: How do I upload a file to ClickHouse Cloud?
+description: "Learn how to upload a file to ClickHouse Cloud to import your data"
+date: 2025-07-24
+tags: ['Data Ingestion']
+keywords: ['Upload file', 'ClickHouse Cloud']
---
import Image from '@theme/IdealImage';
@@ -16,6 +18,9 @@ import csv_08 from '@site/static/images/cloud/migrate/csv_08.png';
import csv_09 from '@site/static/images/cloud/migrate/csv_09.png';
import csv_10 from '@site/static/images/cloud/migrate/csv_10.png';
+{frontMatter.description}
+{/* truncate */}
+
# Upload files to Cloud
ClickHouse Cloud provides an easy way to import your files and supports the
@@ -49,14 +54,14 @@ Next select `Upload a file` on the right side of the data sources page:
-A file dialogue will pop up allowing you to select the file that you wish to
+A file dialogue will pop up allowing you to select the file that you wish to
use to insert data into a table on your Cloud service.
## Configure table {#configure-table}
-Once the file has uploaded you will be able to configure the table where you want
+Once the file has uploaded you will be able to configure the table where you want
to insert the data to. A preview of the table with the first three rows is shown.
@@ -67,7 +72,7 @@ You can now select a destination table. The options are:
- an existing table
-You can specify which database you want to upload the data to, and in the case of
+You can specify which database you want to upload the data to, and in the case of
a new table, the name of the table that will be created. You will also be able to select the sorting key:
@@ -103,8 +108,8 @@ the file which was uploaded to an S3 bucket using the `URL` table function.
-If the job fails you will see a `failed` status badge under the `Status` column of
-the `Data upload history` tab. You can click `View Details` for more information
+If the job fails you will see a `failed` status badge under the `Status` column of
+the `Data upload history` tab. You can click `View Details` for more information
on why the upload failed. You may need to modify the table configuration or clean
the data based on the error message for the failed insert.
diff --git a/scripts/aspell-dict-file.txt b/scripts/aspell-dict-file.txt
index 22113ce5f4f..b57186e13be 100644
--- a/scripts/aspell-dict-file.txt
+++ b/scripts/aspell-dict-file.txt
@@ -300,7 +300,7 @@ dataview
--docs/integrations/data-ingestion/clickpipes/postgres/source/supabase.md--
pooler
supabase
---docs/cloud/security/saml-sso-setup.md--
+--docs/cloud/guides/security/cloud_access_management/saml-sso-setup.md--
IdP
IdPs
Entra
@@ -327,7 +327,7 @@ typings
Rockset's
Workspaces
workspaces
---docs/cloud/security/azure-privatelink.md--
+--docs/cloud/features/04_security/connectivity/private_networking/azure-privatelink.md--
privatelink
VNets
guid
@@ -353,13 +353,13 @@ UserIDs
--docs/cloud/security/shared-responsibility-model.md--
Entra
RelayState
---docs/cloud/manage/_snippets/_network_transfer_rates.md--
+--docs/cloud/reference/_snippets/_network_transfer_rates.md--
asia
eastus
europe
germanywestcentral
westus
---docs/cloud/manage/jan2025_faq/dimensions.md--
+--docs/cloud/reference/09_jan2025_faq/dimensions.md--
intra
clickpipesPricingFaq
--docs/cloud/manage/backups.md--
@@ -402,14 +402,14 @@ pageId
rdhnsx
sessionId
ybtm
---docs/cloud/reference/supported-regions.md--
+--docs/cloud/reference/05_supported-regions.md--
asia
australia
europe
Montréal
northamerica
JapanEast
---docs/cloud/reference/byoc.md--
+--docs/cloud/features/02_infrastructure_and_deploy/byoc.md--
Acceptor
Autoscaler
byoc
@@ -956,7 +956,7 @@ payg
bursty
--docs/cloud/security/gcp-private-service-connect.md--
privatelink
---docs/cloud/security/aws-privatelink.md--
+--docs/cloud/features/04_security/connectivity/private_networking/aws-privatelink.md--
dnsname
nsname
pecreate
@@ -1000,7 +1000,7 @@ Probabilistically
tunable
--docs/best-practices/use_materialized_views.md--
DAGs
---docs/migrations/postgres/appendix.md--
+--docs/cloud/onboard/02_migrate/01_migration_guides/02_postgres/appendix.md--
Citus
--docs/integrations/data-ingestion/azure-synapse/index.md--
microsoft
@@ -1093,3 +1093,11 @@ MCP's
daemonset
--docs/use-cases/observability/clickstack/ingesting-data/kubernetes.md--
daemonset
+--docs/cloud/onboard/01_discover/02_use_cases/04_machine_learning_and_genAI/03_agent_facing_analytics.md--
+AgentForce
+DeepSeek
+OpenAI's
+PDFs
+ReAct
+ServiceNow
+explorative
\ No newline at end of file
diff --git a/scripts/aspell-ignore/en/aspell-dict.txt b/scripts/aspell-ignore/en/aspell-dict.txt
index 9c99dc35314..4195360fd28 100644
--- a/scripts/aspell-ignore/en/aspell-dict.txt
+++ b/scripts/aspell-ignore/en/aspell-dict.txt
@@ -2375,6 +2375,7 @@ kurtsamp
lagInFrame
laion
lakehouse
+Lakehouses
lang
laravel
largestTriangleThreeBuckets
@@ -3609,3 +3610,22 @@ znode
znodes
zookeeperSessionUptime
zstd
+Coinhall
+Instacart
+SingleStore
+Fastly
+Fong
+MTTD
+MTTR
+O'Reilly
+transformative
+MLOps
+chatbots
+SageMaker
+GWLBs
+NLBs
+explorative
+pointwise
+summarization
+reusability
+lakehouses
\ No newline at end of file
diff --git a/scripts/autogenerate-table-of-contents.sh b/scripts/autogenerate-table-of-contents.sh
index 1d614c97c56..f57755a96d7 100644
--- a/scripts/autogenerate-table-of-contents.sh
+++ b/scripts/autogenerate-table-of-contents.sh
@@ -40,8 +40,8 @@ COMMANDS=(
'--single-toc --dir="docs/sql-reference/aggregate-functions/reference" --md="docs/sql-reference/aggregate-functions/reference/index.md"'
'--single-toc --dir="docs/sql-reference/table-functions" --md="docs/sql-reference/table-functions/index.md"'
'--single-toc --dir="docs/chdb/guides" --md="docs/chdb/guides/index.md" --ignore images'
- '--single-toc --dir="docs/cloud/manage/jan2025_faq" --md="docs/cloud/manage/jan2025_faq/index.md" --ignore images'
- '--single-toc --dir="docs/cloud/changelogs" --md="docs/cloud/reference/release-notes-index.md"'
+ '--single-toc --dir="docs/cloud/reference/09_jan2025_faq" --md="docs/cloud/reference/09_jan2025_faq/index.md" --ignore images'
+ '--single-toc --dir="docs/cloud/reference/01_changelog" --md="docs/cloud/reference/01_changelog/02_release_notes/index.md"'
'--single-toc --dir="docs/development" --md="docs/development/index.md" --ignore images'
'--single-toc --dir="docs/getting-started/example-datasets" --md="docs/getting-started/index.md" --ignore images'
'--single-toc --dir="docs/integrations/data-ingestion/clickpipes/kafka" --md="docs/integrations/data-ingestion/clickpipes/kafka/index.md" --ignore images'
diff --git a/scripts/check-doc-aspell b/scripts/check-doc-aspell
index cd7f412a9d2..bc79d14dc68 100755
--- a/scripts/check-doc-aspell
+++ b/scripts/check-doc-aspell
@@ -131,6 +131,7 @@ IGNORE_LIST=(
"${ROOT_PATH}/docs/whats-new/security-changelog.md"
"${ROOT_PATH}/docs/cloud/changelogs/*"
"${ROOT_PATH}/docs/whats-new/changelog/*" # ignore all changelogs
+ "${ROOT_PATH}/docs/cloud/reference/01_changelog/*"
"${ROOT_PATH}/docs/sql-reference/*" # we ignore all files from ClickHouse/ClickHouse
"${ROOT_PATH}/docs/engines/*"
"${ROOT_PATH}/docs/getting-started/*"
@@ -141,6 +142,7 @@ IGNORE_LIST=(
"${ROOT_PATH}/docs/zh/*"
"${ROOT_PATH}/docs/cloud/manage/api/*" # api files
"${ROOT_PATH}/docs/index.md"
+ "${ROOT_PATH}/docs/about-us/beta-and-experimental-features.md"
# Add more files to ignore here, e.g.:
# "${ROOT_PATH}/docs/some-other-file.md"
)
diff --git a/sidebars.js b/sidebars.js
index 19a62fdf5f8..ff0cbf444fb 100644
--- a/sidebars.js
+++ b/sidebars.js
@@ -186,61 +186,6 @@ const sidebars = {
},
]
},
- {
- type: "category",
- label: "Migration Guides",
- collapsed: false,
- collapsible: false,
- link: { type: "doc", id: "migrations/index" },
- items: [
- {
- type: "category",
- label: "BigQuery",
- link: { type: "doc", id: "migrations/bigquery/index" },
- items: [
- {
- type: "doc",
- id: "migrations/bigquery/equivalent-concepts",
- },
- {
- type: "doc",
- id: "migrations/bigquery/migrating-to-clickhouse-cloud",
- },
- {
- type: "doc",
- id: "migrations/bigquery/loading-data",
- },
- ]
- },
- "migrations/snowflake",
- {
- type: "category",
- label: "PostgreSQL",
- collapsed: true,
- collapsible: true,
- link: { type: "doc", id: "migrations/postgres/index" },
- items: [
- {
- type: "doc",
- id: "migrations/postgres/overview",
- label: "Overview",
- },
- "migrations/postgres/dataset",
- "migrations/postgres/rewriting-queries",
- "migrations/postgres/data-modeling-techniques",
- "migrations/postgres/appendix"
- ],
- },
- "integrations/data-ingestion/dbms/mysql/index",
- "integrations/data-ingestion/redshift/index",
- "integrations/data-ingestion/dbms/dynamodb/index",
- {
- label: "Elasticsearch",
- type: "doc",
- id: "use-cases/observability/clickstack/migration/elastic/index",
- },
- ],
- },
{
type: "category",
label: "Example Datasets",
@@ -260,244 +205,102 @@ const sidebars = {
cloud: [
{
type: "category",
- label: "Get Started",
- collapsed: false,
- collapsible: false,
- className: "top-nav-item",
- link: { type: "doc", id: "cloud/get-started/index" },
- items: [
- "cloud-index",
- "cloud/get-started/sql-console",
- "cloud/get-started/query-insights",
- "cloud/get-started/query-endpoints",
- "cloud/manage/dashboards",
- "cloud/manage/hyperdx",
- "cloud/support",
- ],
- },
- {
- type: "category",
- label: "Best Practices",
- collapsed: false,
- collapsible: false,
- className: "top-nav-item",
- link: { type: "doc", id: "cloud/bestpractices/index" },
- items: [
- "cloud/bestpractices/usagelimits",
- "cloud/bestpractices/multitenancy",
- ],
- },
- {
- type: "category",
- label: "Managing Cloud",
+ label: "Get started",
collapsed: false,
- collapsible: false,
- className: "top-nav-item",
- link: { type: "doc", id: "cloud/manage/index" },
+ collapsible: true,
+ link: { type: "doc", id: "cloud/onboard/index" },
items: [
- "cloud/manage/cloud-tiers",
- "cloud/manage/integrations",
{
type: "category",
- label: "Backups",
+ label: "Discover",
collapsed: true,
collapsible: true,
- link: { type: "doc", id: "cloud/manage/backups/index" },
items: [
{
type: "autogenerated",
- dirName: "cloud/manage/backups",
+ dirName: "cloud/onboard/01_discover"
}
]
},
{
type: "category",
- label: "Monitoring",
+ label: "Setup",
collapsed: true,
collapsible: true,
items: [
{
type: "autogenerated",
- dirName: "cloud/manage/monitoring"
- }
- ],
- },
- {
- type: "category",
- label: "Billing",
- link: { type: "doc", id: "cloud/manage/billing/index" },
- items: [
- "cloud/manage/billing",
- "cloud/manage/billing/payment-thresholds",
- "cloud/manage/troubleshooting-billing-issues",
- {
- type: "category",
- label: "Marketplace",
- link: { type: "doc", id: "cloud/manage/billing/marketplace/index" },
- items: [
- "cloud/manage/billing/marketplace/overview",
- "cloud/manage/billing/marketplace/aws-marketplace-payg",
- "cloud/manage/billing/marketplace/aws-marketplace-committed",
- "cloud/manage/billing/marketplace/gcp-marketplace-payg",
- "cloud/manage/billing/marketplace/gcp-marketplace-committed",
- "cloud/manage/billing/marketplace/azure-marketplace-payg",
- "cloud/manage/billing/marketplace/azure-marketplace-committed",
- ],
+ dirName: "cloud/onboard/02_migrate",
}
- ],
+ ]
},
- "cloud/manage/settings",
- "cloud/manage/replica-aware-routing",
- "cloud/manage/scaling",
- "cloud/manage/service-uptime",
- "cloud/manage/notifications",
- "cloud/manage/upgrades",
- "cloud/manage/account-close",
- "cloud/manage/postman",
- "faq/troubleshooting",
- "cloud/manage/network-data-transfer",
{
type: "category",
- label: "Jan 2025 Changes FAQ",
+ label: "Tune",
collapsed: true,
collapsible: true,
- link: { type: "doc", id: "cloud/manage/jan2025_faq/index" },
items: [
- "cloud/manage/jan2025_faq/summary",
- "cloud/manage/jan2025_faq/new_tiers",
- "cloud/manage/jan2025_faq/plan_migrations",
- "cloud/manage/jan2025_faq/dimensions",
- "cloud/manage/jan2025_faq/billing",
- "cloud/manage/jan2025_faq/scaling",
- "cloud/manage/jan2025_faq/backup",
- ],
+ {
+ type: "autogenerated",
+ dirName: "cloud/onboard/03_tune",
+ }
+ ]
}
- ],
+ ]
},
{
type: "category",
- label: "Cloud API",
- collapsed: false,
- collapsible: false,
+ label: "Features",
+ collapsed: true,
+ collapsible: true,
className: "top-nav-item",
- link: { type: "doc", id: "cloud/manage/api/index" },
items: [
- "cloud/manage/api/api-overview",
- "cloud/manage/openapi",
{
- type: 'link',
- label: "API Reference",
- href: "https://clickhouse.com/docs/cloud/manage/api/swagger",
+ type: "autogenerated",
+ dirName: "cloud/features",
}
],
},
{
type: "category",
- label: "Cloud Reference ",
- collapsed: false,
- collapsible: false,
+ label: "Guides",
+ collapsed: true,
+ collapsible: true,
className: "top-nav-item",
- link: { type: "doc", id: "cloud/reference/index" },
+ link: { type: "doc", id: "cloud/guides/index" },
items: [
- "cloud/reference/architecture",
- "cloud/reference/shared-merge-tree",
- "cloud/reference/shared-catalog",
- "cloud/reference/warehouses",
- "cloud/reference/byoc",
{
- type: "category",
- link: { type: "doc", id: "cloud/reference/changelogs-index" },
- label: "Changelogs",
- collapsed: true,
- items: [
- "cloud/reference/changelog",
- {
- type: "category",
- label: "Release Notes",
- collapsed: true,
- link: { type: "doc", id: "cloud/reference/release-notes-index" },
- items: [
- {
- type: "autogenerated",
- dirName: "cloud/changelogs"
- }
- ]
- }
- ],
- },
- "cloud/reference/cloud-compatibility",
- "cloud/reference/supported-regions"
+ type: "autogenerated",
+ dirName: "cloud/guides",
+ }
],
},
{
type: "category",
- label: "Security",
- collapsed: false,
- collapsible: false,
+ label: "Reference",
+ collapsed: true,
+ collapsible: true,
className: "top-nav-item",
- link: { type: "doc", id: "cloud/security/index" },
+ link: { type: "doc", id: "cloud/reference/index" },
items: [
- "cloud/security/shared-responsibility-model",
- {
- type: "category",
- label: "Cloud Access Management",
- link: { type: "doc", id: "cloud/security/cloud-access-management/index" },
- items: [
- "cloud/security/cloud-access-management/cloud-access-management",
- "cloud/security/cloud-access-management/cloud-authentication",
- "cloud/security/saml-sso-setup",
- "cloud/security/common-access-management-queries",
- "cloud/security/inviting-new-users",
- ],
- },
{
- type: "category",
- label: "Connectivity",
- link: { type: "doc", id: "cloud/security/connectivity-overview" },
- items: [
- "cloud/security/setting-ip-filters",
- {
- type: "category",
- label: "Private Networking",
- link: { type: "doc", id: "cloud/security/private-link-overview" },
- items: [
- "cloud/security/aws-privatelink",
- "cloud/security/gcp-private-service-connect",
- "cloud/security/azure-privatelink",
- ],
- },
- "cloud/security/accessing-s3-data-securely",
- "cloud/security/cloud-endpoints-api",
- ],
- },
- "cloud/security/cmek",
- "cloud/security/audit-logging",
- {
- type: "category",
- label: "Privacy and Compliance",
- collapsed: true,
- collapsible: true,
- link: { type: "doc", id: "cloud/security/privacy-compliance-overview" },
- items: [
- "cloud/security/compliance-overview",
- "cloud/security/personal-data-access",
- ],
- },
+ type: "autogenerated",
+ dirName: "cloud/reference",
+ }
],
},
{
type: "category",
- label: "Migrating to Cloud",
- collapsed: false,
- collapsible: false,
- link: { type: "doc", id: "integrations/migration/index" },
+ label: "Cloud API",
+ collapsed: true,
+ collapsible: true,
+ className: "top-nav-item",
+ link: { type: "doc", id: "cloud/api/index" },
items: [
- "integrations/migration/overview",
- "integrations/migration/clickhouse-to-cloud",
- "integrations/migration/clickhouse-local-etl",
- "integrations/migration/etl-tool-to-clickhouse",
- "integrations/migration/object-storage-to-clickhouse",
- "cloud/migrate/upload-a-csv-file",
+ {
+ type: "autogenerated",
+ dirName: "cloud/api",
+ }
],
},
],
@@ -732,11 +535,10 @@ const sidebars = {
"integrations/data-ingestion/clickpipes/postgres/deduplication",
"integrations/data-ingestion/clickpipes/postgres/ordering_keys",
"integrations/data-ingestion/clickpipes/postgres/toast",
- "integrations/data-ingestion/dbms/postgresql/connecting-to-postgresql",
- "integrations/data-ingestion/dbms/postgresql/inserting-data",
"integrations/data-ingestion/clickpipes/postgres/schema-changes",
"integrations/data-ingestion/clickpipes/postgres/faq",
- "integrations/data-ingestion/clickpipes/postgres/parallel_initial_load",
+ "integrations/data-ingestion/dbms/postgresql/connecting-to-postgresql",
+ "integrations/data-ingestion/dbms/postgresql/inserting-data",
{
type: "category",
label: "Operations",
@@ -745,9 +547,6 @@ const sidebars = {
"integrations/data-ingestion/clickpipes/postgres/pause_and_resume",
"integrations/data-ingestion/clickpipes/postgres/remove_table",
"integrations/data-ingestion/clickpipes/postgres/table_resync",
- "integrations/data-ingestion/clickpipes/postgres/resync",
- "integrations/data-ingestion/clickpipes/postgres/controlling_sync",
- "integrations/data-ingestion/clickpipes/postgres/scaling",
],
},
{
@@ -1824,39 +1623,27 @@ const sidebars = {
},
{
type: "link",
- label: "Best Practices",
- description: "How to get the most out of ClickHouse Cloud",
- href: "/cloud/bestpractices/"
+ label: "Features",
+ description: "Features offered by ClickHouse Cloud",
+ href: "/cloud/features/"
},
{
type: "link",
- label: "Managing Cloud",
- description: "Manage your ClickHouse Cloud services",
- href: "/cloud/bestpractices"
- },
- {
- type: "link",
- label: "Cloud API",
- description: "Automate your ClickHouse Cloud services",
- href: "/cloud/manage/cloud-api/"
+ label: "Guides",
+ description: "ClickHouse Cloud guides",
+ href: "/cloud/guides"
},
{
type: "link",
- label: "Cloud Reference",
- description: "Understanding how ClickHouse Cloud works",
+ label: "Reference",
+ description: "Reference docs for ClickHouse Cloud",
href: "/cloud/reference/"
},
{
type: "link",
- label: "Security",
- description: "Secure your ClickHouse Cloud services",
- href: "/cloud/security/"
- },
- {
- type: "link",
- label: "Migrating to Cloud",
- description: "Migrate your database to ClickHouse Cloud",
- href: "/integrations/migration"
+ label: "Cloud API",
+ description: "Automate your ClickHouse Cloud services",
+ href: "/cloud/manage/cloud-api/"
},
]
},
diff --git a/static/images/cloud/onboard/discover/use_cases/0_rta.png b/static/images/cloud/onboard/discover/use_cases/0_rta.png
new file mode 100644
index 00000000000..d294c84d49d
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/0_rta.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/1_rta.png b/static/images/cloud/onboard/discover/use_cases/1_rta.png
new file mode 100644
index 00000000000..524a35b5ae3
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/1_rta.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/2_rta.png b/static/images/cloud/onboard/discover/use_cases/2_rta.png
new file mode 100644
index 00000000000..6e5c7e2e1f9
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/2_rta.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/3_rta.png b/static/images/cloud/onboard/discover/use_cases/3_rta.png
new file mode 100644
index 00000000000..6c31e82a3cc
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/3_rta.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/4_rta.png b/static/images/cloud/onboard/discover/use_cases/4_rta.png
new file mode 100644
index 00000000000..78baae8cfb1
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/4_rta.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/cloud_architecture.png b/static/images/cloud/onboard/discover/use_cases/cloud_architecture.png
new file mode 100644
index 00000000000..394a3cc4c4b
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/cloud_architecture.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/datalakehouse_01.png b/static/images/cloud/onboard/discover/use_cases/datalakehouse_01.png
new file mode 100644
index 00000000000..120f30c54d2
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/datalakehouse_01.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_01.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_01.png
new file mode 100644
index 00000000000..2193a93a6d1
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_01.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_02.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_02.png
new file mode 100644
index 00000000000..9443ee122f9
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_02.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_03.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_03.png
new file mode 100644
index 00000000000..88a8656c830
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_03.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_04.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_04.png
new file mode 100644
index 00000000000..f6705484d74
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_04.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_05.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_05.png
new file mode 100644
index 00000000000..035aebd1b5c
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_05.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_06.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_06.png
new file mode 100644
index 00000000000..48a17c9e0b9
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_06.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_07.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_07.png
new file mode 100644
index 00000000000..ff365134141
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_07.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_08.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_08.png
new file mode 100644
index 00000000000..7c6488df3bc
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_08.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/ml_ai_09.png b/static/images/cloud/onboard/discover/use_cases/ml_ai_09.png
new file mode 100644
index 00000000000..ae54653aa67
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/ml_ai_09.png differ
diff --git a/static/images/cloud/onboard/discover/use_cases/snowflake_architecture.png b/static/images/cloud/onboard/discover/use_cases/snowflake_architecture.png
new file mode 100644
index 00000000000..05ae8e7c2a7
Binary files /dev/null and b/static/images/cloud/onboard/discover/use_cases/snowflake_architecture.png differ
diff --git a/vercel.json b/vercel.json
index 90949ea1974..71916810c3c 100644
--- a/vercel.json
+++ b/vercel.json
@@ -3367,14 +3367,29 @@
"permanent": true
},
{
- "source": "/docs/guides/developer/lightweight-update",
- "destination": "/docs/guides/developer/on-the-fly-mutations",
+ "source": "/guides/developer/lightweight-update",
+ "destination": "/guides/developer/on-the-fly-mutations",
"permanent": true
},
{
"source": "/docs/manage/troubleshooting-billing-issues",
"destination": "/docs/manage/clickhouse-cloud-billing-compliance",
"permanent": true
+ },
+ {
+ "source": "/docs/guides/developer/lightweight-update",
+ "destination": "/docs/guides/developer/on-the-fly-mutations",
+ "permanent": true
+ },
+ {
+ "source": "/docs/cloud/migrate/upload-a-csv-file",
+ "destination": "/docs/knowledge-base/upload-a-file",
+ "permanent": true
+ },
+ {
+ "source": "/docs/faq/troubleshooting",
+ "destination": "/docs/knowledge-base/unable-to-access-cloud-service",
+ "permanent": true
}
]
}