Skip to content

Commit d62eb38

Browse files
Merge branch 'apache:main' into issue-489-parition-key-more-types-apache
2 parents 126da6d + 9350bb2 commit d62eb38

File tree

264 files changed

+6969
-2259
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

264 files changed

+6969
-2259
lines changed

.github/workflows/docs-check.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,17 @@
1818

1919
# This workflow is meant for checking broken links in the documentation.
2020
name: Check Documentation
21+
permissions:
22+
contents: read
2123
on:
2224
pull_request:
2325
branches: [main, release-*, ci-*]
2426
paths:
2527
- 'website/**'
28+
push:
29+
branches: [main, release-*, ci-*]
30+
paths:
31+
- 'website/**'
2632

2733
jobs:
2834
test-deploy:

.github/workflows/docs-deploy.yaml

Lines changed: 13 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -16,39 +16,27 @@
1616
# limitations under the License.
1717
################################################################################
1818
name: Deploy Documentation
19+
permissions:
20+
contents: read
1921
on:
2022
push:
2123
branches: [main, release-*]
2224
paths:
2325
- 'website/**'
2426

27+
# We use `repository_dispatch` to trigger the deployment job on the apache/fluss-website repository.
28+
# https://github.com/apache/fluss-website/blob/main/.github/workflows/website-deploy.yaml
29+
# https://docs.github.com/en/actions/reference/events-that-trigger-workflows#repository_dispatch
2530
jobs:
2631
deploy:
2732
runs-on: ubuntu-latest
28-
defaults:
29-
run:
30-
working-directory: ./website
3133
steps:
32-
- uses: actions/checkout@v4
33-
with:
34-
ref: main
35-
fetch-depth: 0
36-
- name: Generate versioned docs
37-
run: ./build_versioned_docs.sh
38-
- uses: actions/setup-node@v4
39-
with:
40-
node-version: 18
41-
- name: Install dependencies
42-
run: npm install
43-
- name: Build website
44-
run: npm run build
45-
- uses: webfactory/ssh-agent@v0.5.0
46-
with:
47-
ssh-private-key: ${{ secrets.GH_PAGES_DEPLOY }}
48-
- name: Deploy website
49-
env:
50-
USE_SSH: true
34+
- name: Send Event to Trigger Deploy
5135
run: |
52-
git config --global user.email "actions@github.com"
53-
git config --global user.name "gh-actions"
54-
npm run deploy -- --skip-build
36+
curl -L \
37+
-X POST \
38+
-H "Accept: application/vnd.github+json" \
39+
-H "Authorization: Bearer ${{ secrets.GH_TOKEN }}" \
40+
-H "X-GitHub-Api-Version: 2022-11-28" \
41+
https://api.github.com/repos/apache/fluss-website/dispatches \
42+
-d '{"event_type":"website-deploy","client_payload":{"repository": "apache/fluss"}}'

.github/workflows/license-check.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,8 @@
1414
# limitations under the License.
1515

1616
name: Check License
17+
permissions:
18+
contents: read
1719

1820
on: [push, pull_request]
1921

@@ -37,6 +39,7 @@ jobs:
3739
with:
3840
java-version: 8
3941
distribution: 'temurin'
42+
4043
- name: Build
4144
run: |
4245
set -o pipefail

.github/workflows/stage.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,9 @@ fluss-flink/fluss-flink-1.20,\
2727
fluss-flink/fluss-flink-1.19,\
2828
fluss-flink/fluss-flink-1.18,\
2929
fluss-lake,\
30-
fluss-lake/fluss-lake-paimon
30+
fluss-lake/fluss-lake-paimon,\
31+
fluss-lake/fluss-lake-iceberg,\
32+
fluss-lake/fluss-lake-lance
3133
"
3234

3335
function get_test_modules_for_stage() {

.idea/vcs.xml

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

README.md

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3,25 +3,26 @@
33
<source media="(prefers-color-scheme: dark)" srcset="website/static/img/logo/svg/white_color_logo.svg">
44
<source media="(prefers-color-scheme: light)" srcset="website/static/img/logo/svg/colored_logo.svg">
55
<!-- Fall back to version that works for dark and light mode -->
6-
<img alt="Fluss logo" src="website/static/img/logo/svg/white_filled.svg">
6+
<img alt="Apache Fluss logo" src="website/static/img/logo/svg/white_filled.svg">
77
</picture>
88
</p>
99

1010
<p align="center">
11-
<a href="https://alibaba.github.io/fluss-docs/docs/intro/">Documentation</a> | <a href="https://alibaba.github.io/fluss-docs/docs/quickstart/flink/">QuickStart</a> | <a href="https://alibaba.github.io/fluss-docs/community/dev/ide-setup/">Development</a>
11+
<a href="https://fluss.apache.org/docs/">Documentation</a> | <a href="https://fluss.apache.org/docs/quickstart/flink/">QuickStart</a> | <a href="https://fluss.apache.org/community/dev/ide-setup/">Development</a>
1212
</p>
1313

1414
<p align="center">
15-
<a href="https://github.com/alibaba/fluss/actions/workflows/ci.yaml"><img src="https://github.com/alibaba/fluss/actions/workflows/ci.yaml/badge.svg?branch=main" alt="CI"></a>
16-
<a href="https://github.com/alibaba/fluss/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-Apache%202-4EB1BA.svg" alt="License"></a>
17-
<a href="https://join.slack.com/t/fluss-hq/shared_invite/zt-33wlna581-QAooAiCmnYboJS8D_JUcYw"><img src="https://img.shields.io/badge/slack-join_chat-brightgreen.svg?logo=slack" alt="Slack"></a>
15+
<a href="https://github.com/apache/fluss/actions/workflows/ci.yaml"><img src="https://github.com/apache/fluss/actions/workflows/ci.yaml/badge.svg?branch=main" alt="CI"></a>
16+
<a href="https://github.com/apache/fluss/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-Apache%202-4EB1BA.svg" alt="License"></a>
17+
<a href="https://join.slack.com/t/apache-fluss/shared_invite/zt-33wlna581-QAooAiCmnYboJS8D_JUcYw"><img src="https://img.shields.io/badge/slack-join_chat-brightgreen.svg?logo=slack" alt="Slack"></a>
1818
</p>
1919

20-
## What is Fluss?
20+
## What is Apache Fluss (Incubating)?
2121

22-
Fluss is a streaming storage built for real-time analytics which can serve as the real-time data layer for Lakehouse architectures.
22+
Apache Fluss (Incubating) is a streaming storage built for real-time analytics which can serve as the real-time data layer for Lakehouse architectures.
2323

24-
It bridges the gap between **data streaming** and **data Lakehouse** by enabling low-latency, high-throughput data ingestion and processing while seamlessly integrating with popular compute engines like **Apache Flink**, while Apache Spark, and StarRocks are coming soon.
24+
It bridges the gap between **data streaming** and **data Lakehouse** by enabling low-latency, high-throughput data ingestion and processing while seamlessly integrating with popular compute engines like **Apache Flink**, while
25+
Apache Spark, and StarRocks are coming soon.
2526

2627
**Fluss (German: river, pronounced `/flus/`)** enables streaming data continuously converging, distributing and flowing into lakes, like a river 🌊
2728

@@ -36,27 +37,27 @@ It bridges the gap between **data streaming** and **data Lakehouse** by enabling
3637

3738
## Building
3839

39-
Prerequisites for building Fluss:
40+
Prerequisites for building Apache Fluss:
4041

4142
- Unix-like environment (we use Linux, Mac OS X, Cygwin, WSL)
4243
- Git
4344
- Maven (we require version >= 3.8.6)
4445
- Java 8 or 11
4546

4647
```bash
47-
git clone https://github.com/alibaba/fluss.git
48+
git clone https://github.com/apache/fluss.git
4849
cd fluss
4950
./mvnw clean package -DskipTests
5051
```
5152

52-
Fluss is now installed in `build-target`. The build command uses Maven Wrapper (`mvnw`) which ensures the correct Maven version is used.
53+
Apache Fluss is now installed in `build-target`. The build command uses Maven Wrapper (`mvnw`) which ensures the correct Maven version is used.
5354

5455
## Contributing
5556

56-
Fluss is open-source, and we’d love your help to keep it growing! Join the [discussions](https://github.com/alibaba/fluss/discussions),
57-
open [issues](https://github.com/alibaba/fluss/issues) if you find a bug or request features, contribute code and documentation,
58-
or help us improve the project in any way. All contributions are welcome!
57+
Apache Fluss (Incubating) is open-source, and we’d love your help to keep it growing! Join the [discussions](https://github.com/apache/fluss/discussions),
58+
open [issues](https://github.com/apache/fluss/issues) if you find a bug or request features, contribute code and documentation,
59+
or help us improve the project in any way. All contributions are welcome!
5960

6061
## License
6162

62-
Fluss project is licensed under the [Apache License 2.0](https://github.com/alibaba/fluss/blob/main/LICENSE).
63+
Apache Fluss (Incubating) project is licensed under the [Apache License 2.0](https://github.com/apache/fluss/blob/main/LICENSE).

fluss-client/src/main/java/com/alibaba/fluss/client/admin/FlussAdmin.java

Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
import com.alibaba.fluss.rpc.GatewayClientProxy;
3939
import com.alibaba.fluss.rpc.RpcClient;
4040
import com.alibaba.fluss.rpc.gateway.AdminGateway;
41+
import com.alibaba.fluss.rpc.gateway.AdminReadOnlyGateway;
4142
import com.alibaba.fluss.rpc.gateway.TabletServerGateway;
4243
import com.alibaba.fluss.rpc.messages.CreateAclsRequest;
4344
import com.alibaba.fluss.rpc.messages.CreateDatabaseRequest;
@@ -99,13 +100,17 @@
99100
public class FlussAdmin implements Admin {
100101

101102
private final AdminGateway gateway;
103+
private final AdminReadOnlyGateway readOnlyGateway;
102104
private final MetadataUpdater metadataUpdater;
103105
private final RpcClient client;
104106

105107
public FlussAdmin(RpcClient client, MetadataUpdater metadataUpdater) {
106108
this.gateway =
107109
GatewayClientProxy.createGatewayProxy(
108110
metadataUpdater::getCoordinatorServer, client, AdminGateway.class);
111+
this.readOnlyGateway =
112+
GatewayClientProxy.createGatewayProxy(
113+
metadataUpdater::getRandomTabletServer, client, AdminGateway.class);
109114
this.metadataUpdater = metadataUpdater;
110115
this.client = client;
111116
}
@@ -119,7 +124,7 @@ public CompletableFuture<List<ServerNode>> getServerNodes() {
119124
List<ServerNode> serverNodeList = new ArrayList<>();
120125
Cluster cluster =
121126
sendMetadataRequestAndRebuildCluster(
122-
gateway,
127+
readOnlyGateway,
123128
false,
124129
metadataUpdater.getCluster(),
125130
null,
@@ -142,7 +147,8 @@ public CompletableFuture<SchemaInfo> getTableSchema(TablePath tablePath) {
142147
request.setTablePath()
143148
.setDatabaseName(tablePath.getDatabaseName())
144149
.setTableName(tablePath.getTableName());
145-
return gateway.getTableSchema(request)
150+
return readOnlyGateway
151+
.getTableSchema(request)
146152
.thenApply(
147153
r ->
148154
new SchemaInfo(
@@ -157,7 +163,8 @@ public CompletableFuture<SchemaInfo> getTableSchema(TablePath tablePath, int sch
157163
.setTablePath()
158164
.setDatabaseName(tablePath.getDatabaseName())
159165
.setTableName(tablePath.getTableName());
160-
return gateway.getTableSchema(request)
166+
return readOnlyGateway
167+
.getTableSchema(request)
161168
.thenApply(
162169
r ->
163170
new SchemaInfo(
@@ -179,7 +186,8 @@ public CompletableFuture<Void> createDatabase(
179186
public CompletableFuture<DatabaseInfo> getDatabaseInfo(String databaseName) {
180187
GetDatabaseInfoRequest request = new GetDatabaseInfoRequest();
181188
request.setDatabaseName(databaseName);
182-
return gateway.getDatabaseInfo(request)
189+
return readOnlyGateway
190+
.getDatabaseInfo(request)
183191
.thenApply(
184192
r ->
185193
new DatabaseInfo(
@@ -204,13 +212,14 @@ public CompletableFuture<Void> dropDatabase(
204212
public CompletableFuture<Boolean> databaseExists(String databaseName) {
205213
DatabaseExistsRequest request = new DatabaseExistsRequest();
206214
request.setDatabaseName(databaseName);
207-
return gateway.databaseExists(request).thenApply(DatabaseExistsResponse::isExists);
215+
return readOnlyGateway.databaseExists(request).thenApply(DatabaseExistsResponse::isExists);
208216
}
209217

210218
@Override
211219
public CompletableFuture<List<String>> listDatabases() {
212220
ListDatabasesRequest request = new ListDatabasesRequest();
213-
return gateway.listDatabases(request)
221+
return readOnlyGateway
222+
.listDatabases(request)
214223
.thenApply(ListDatabasesResponse::getDatabaseNamesList);
215224
}
216225

@@ -233,7 +242,8 @@ public CompletableFuture<TableInfo> getTableInfo(TablePath tablePath) {
233242
request.setTablePath()
234243
.setDatabaseName(tablePath.getDatabaseName())
235244
.setTableName(tablePath.getTableName());
236-
return gateway.getTableInfo(request)
245+
return readOnlyGateway
246+
.getTableInfo(request)
237247
.thenApply(
238248
r ->
239249
TableInfo.of(
@@ -261,14 +271,14 @@ public CompletableFuture<Boolean> tableExists(TablePath tablePath) {
261271
request.setTablePath()
262272
.setDatabaseName(tablePath.getDatabaseName())
263273
.setTableName(tablePath.getTableName());
264-
return gateway.tableExists(request).thenApply(TableExistsResponse::isExists);
274+
return readOnlyGateway.tableExists(request).thenApply(TableExistsResponse::isExists);
265275
}
266276

267277
@Override
268278
public CompletableFuture<List<String>> listTables(String databaseName) {
269279
ListTablesRequest request = new ListTablesRequest();
270280
request.setDatabaseName(databaseName);
271-
return gateway.listTables(request).thenApply(ListTablesResponse::getTableNamesList);
281+
return readOnlyGateway.listTables(request).thenApply(ListTablesResponse::getTableNamesList);
272282
}
273283

274284
@Override
@@ -289,7 +299,8 @@ public CompletableFuture<List<PartitionInfo>> listPartitionInfos(
289299
PbPartitionSpec pbPartitionSpec = makePbPartitionSpec(partitionSpec);
290300
request.setPartialPartitionSpec(pbPartitionSpec);
291301
}
292-
return gateway.listPartitionInfos(request)
302+
return readOnlyGateway
303+
.listPartitionInfos(request)
293304
.thenApply(ClientRpcMessageUtils::toPartitionInfos);
294305
}
295306

@@ -315,7 +326,8 @@ public CompletableFuture<KvSnapshots> getLatestKvSnapshots(TablePath tablePath)
315326
request.setTablePath()
316327
.setDatabaseName(tablePath.getDatabaseName())
317328
.setTableName(tablePath.getTableName());
318-
return gateway.getLatestKvSnapshots(request)
329+
return readOnlyGateway
330+
.getLatestKvSnapshots(request)
319331
.thenApply(ClientRpcMessageUtils::toKvSnapshots);
320332
}
321333

@@ -328,7 +340,8 @@ public CompletableFuture<KvSnapshots> getLatestKvSnapshots(
328340
.setDatabaseName(tablePath.getDatabaseName())
329341
.setTableName(tablePath.getTableName());
330342
request.setPartitionName(partitionName);
331-
return gateway.getLatestKvSnapshots(request)
343+
return readOnlyGateway
344+
.getLatestKvSnapshots(request)
332345
.thenApply(ClientRpcMessageUtils::toKvSnapshots);
333346
}
334347

@@ -342,7 +355,8 @@ public CompletableFuture<KvSnapshotMetadata> getKvSnapshotMetadata(
342355
request.setTableId(bucket.getTableId())
343356
.setBucketId(bucket.getBucket())
344357
.setSnapshotId(snapshotId);
345-
return gateway.getKvSnapshotMetadata(request)
358+
return readOnlyGateway
359+
.getKvSnapshotMetadata(request)
346360
.thenApply(ClientRpcMessageUtils::toKvSnapshotMetadata);
347361
}
348362

@@ -353,7 +367,8 @@ public CompletableFuture<LakeSnapshot> getLatestLakeSnapshot(TablePath tablePath
353367
.setDatabaseName(tablePath.getDatabaseName())
354368
.setTableName(tablePath.getTableName());
355369

356-
return gateway.getLatestLakeSnapshot(request)
370+
return readOnlyGateway
371+
.getLatestLakeSnapshot(request)
357372
.thenApply(ClientRpcMessageUtils::toLakeTableSnapshotInfo);
358373
}
359374

fluss-client/src/main/java/com/alibaba/fluss/client/lookup/Lookuper.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@ public interface Lookuper {
3434
* Lookups certain row from the given lookup key.
3535
*
3636
* <p>The lookup key must be a primary key if the lookuper is a Primary Key Lookuper (created by
37-
* {@code table.newLookuper().create()}), or be the prefix key if the lookuper is a Prefix Key
38-
* Lookuper (created by {@code table.newLookuper().withLookupColumns(prefixKeys).create()}).
37+
* {@code table.newLookup().createLookuper()}), or be the prefix key if the lookuper is a Prefix
38+
* Key Lookuper (created by {@code table.newLookup().lookupBy(prefixKeys).createLookuper()}).
3939
*
4040
* @param lookupKey the lookup key.
4141
* @return the result of lookup.

fluss-client/src/main/java/com/alibaba/fluss/client/table/scanner/log/LogFetchBuffer.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ CompletedFetch poll() {
175175
* <li>The thread was interrupted
176176
* </ol>
177177
*
178-
* @param deadlineNanos the deadline time to wait util
178+
* @param deadlineNanos the deadline time to wait until
179179
* @return false if the waiting time detectably elapsed before return from the method, else true
180180
*/
181181
boolean awaitNotEmpty(long deadlineNanos) throws InterruptedException {

0 commit comments

Comments
 (0)