Skip to content

Commit 2fba0bb

Browse files
[ES-1436915] Modify an e2e test to iterate over the rs in a separate thread (#807)
This will help identify potential thread-safety issues, such as problems with state propagation and context management. In this scenario, customers can interact with first-class JDBC objects like Statement and ResultSet from a separate thread, while the JDBC connection object itself is managed in a different thread. The test is designed to simulate this type of multithreaded environment, ensuring that the system can handle concurrent access to these objects without issues. By doing so, it helps uncover any synchronization problems or unintended side effects that may arise when JDBC components are accessed from multiple threads.
1 parent bea0564 commit 2fba0bb

File tree

1 file changed

+57
-33
lines changed

1 file changed

+57
-33
lines changed

src/test/java/com/databricks/jdbc/integration/fakeservice/tests/MultiChunkExecutionIntegrationTests.java

Lines changed: 57 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,7 @@
44
import static com.databricks.jdbc.integration.IntegrationTestUtil.getValidJDBCConnection;
55
import static com.github.tomakehurst.wiremock.client.WireMock.getRequestedFor;
66
import static com.github.tomakehurst.wiremock.client.WireMock.urlPathMatching;
7-
import static org.junit.jupiter.api.Assertions.assertEquals;
8-
import static org.junit.jupiter.api.Assertions.assertTrue;
7+
import static org.junit.jupiter.api.Assertions.*;
98

109
import com.databricks.jdbc.api.impl.DatabricksResultSet;
1110
import com.databricks.jdbc.api.impl.DatabricksResultSetMetaData;
@@ -15,13 +14,14 @@
1514
import java.sql.SQLException;
1615
import java.sql.Statement;
1716
import java.util.Properties;
17+
import java.util.concurrent.atomic.AtomicReference;
1818
import org.junit.jupiter.api.Test;
1919

2020
/** Test SQL execution with results spanning multiple chunks. */
2121
public class MultiChunkExecutionIntegrationTests extends AbstractFakeServiceIntegrationTests {
2222

2323
@Test
24-
void testMultiChunkSelect() throws SQLException {
24+
void testMultiChunkSelect() throws SQLException, InterruptedException {
2525
final String table = "samples.tpch.lineitem";
2626

2727
// To save on the size of stub mappings, the test uses just enough rows to span multiple chunks.
@@ -36,38 +36,62 @@ void testMultiChunkSelect() throws SQLException {
3636
final Statement statement = connection.createStatement();
3737
statement.setMaxRows(maxRows);
3838

39-
try (ResultSet rs = statement.executeQuery(sql)) {
40-
DatabricksResultSetMetaData metaData = (DatabricksResultSetMetaData) rs.getMetaData();
39+
final AtomicReference<Throwable> threadException = new AtomicReference<>();
4140

42-
int rowCount = 0;
43-
while (rs.next()) {
44-
rowCount++;
45-
}
41+
// Iterate through the result set in a different thread to surface any 1st-level thread-safety
42+
// issues
43+
Thread thread =
44+
new Thread(
45+
() -> {
46+
try (ResultSet rs = statement.executeQuery(sql)) {
47+
DatabricksResultSetMetaData metaData =
48+
(DatabricksResultSetMetaData) rs.getMetaData();
49+
50+
int rowCount = 0;
51+
while (rs.next()) {
52+
rowCount++;
53+
}
54+
55+
// The result should have the same number of rows as the limit
56+
assertEquals(maxRows, rowCount);
57+
assertEquals(maxRows, metaData.getTotalRows());
58+
59+
// The result should be split into multiple chunks
60+
assertTrue(metaData.getChunkCount() > 1, "Chunk count should be greater than 1");
61+
62+
// The number of cloud fetch calls should be equal to the number of chunks
63+
final int cloudFetchCalls =
64+
getCloudFetchApiExtension()
65+
.countRequestsMatching(getRequestedFor(urlPathMatching(".*")).build())
66+
.getCount();
67+
// cloud fetch calls can be retried
68+
assertTrue(cloudFetchCalls >= metaData.getChunkCount());
69+
70+
if (isSqlExecSdkClient()) {
71+
// Number of requests to fetch external links should be one less than the total
72+
// number of chunks as first chunk link is already fetched
73+
final String statementId = ((DatabricksResultSet) rs).getStatementId();
74+
final String resultChunkPathRegex =
75+
String.format(RESULT_CHUNK_PATH, statementId, ".*");
76+
getDatabricksApiExtension()
77+
.verify(
78+
(int) (metaData.getChunkCount() - 1),
79+
getRequestedFor(urlPathMatching(resultChunkPathRegex)));
80+
}
81+
} catch (Throwable e) {
82+
threadException.set(e);
83+
}
84+
});
85+
86+
thread.start();
87+
thread.join(10_000);
4688

47-
// The result should have the same number of rows as the limit
48-
assertEquals(maxRows, rowCount);
49-
assertEquals(maxRows, metaData.getTotalRows());
50-
51-
// The result should be split into multiple chunks
52-
assertTrue(metaData.getChunkCount() > 1, "Chunk count should be greater than 1");
53-
54-
// The number of cloud fetch calls should be equal to the number of chunks
55-
final int cloudFetchCalls =
56-
getCloudFetchApiExtension()
57-
.countRequestsMatching(getRequestedFor(urlPathMatching(".*")).build())
58-
.getCount();
59-
// cloud fetch calls can be retried
60-
assertTrue(cloudFetchCalls >= metaData.getChunkCount());
61-
62-
if (isSqlExecSdkClient()) {
63-
// Number of requests to fetch external links should be one less than the total number of
64-
// chunks as first chunk link is already fetched
65-
final String statementId = ((DatabricksResultSet) rs).getStatementId();
66-
final String resultChunkPathRegex = String.format(RESULT_CHUNK_PATH, statementId, ".*");
67-
getDatabricksApiExtension()
68-
.verify(
69-
(int) (metaData.getChunkCount() - 1),
70-
getRequestedFor(urlPathMatching(resultChunkPathRegex)));
89+
// Check if the thread had an exception
90+
if (threadException.get() != null) {
91+
if (threadException.get() instanceof AssertionError) {
92+
throw (AssertionError) threadException.get();
93+
} else {
94+
fail("Test thread failed with exception: " + threadException.get());
7195
}
7296
}
7397

0 commit comments

Comments
 (0)