|
| 1 | +# Use the PostgreSQL API for CosmosDB |
| 2 | + |
| 3 | +## Decision |
| 4 | + |
| 5 | +EDC will use the PostgreSQL API for CosmosDB instead of Cosmos' own client SDK ("SQL API"). |
| 6 | + |
| 7 | +## Rationale |
| 8 | + |
| 9 | +Now that Cosmos fully supports PostgreSQL (https://learn.microsoft.com/en-us/azure/cosmos-db/postgresql/introduction), there is no need to keep the CosmosDB client around. CosmosDB can now be targeted using the JDBC PostgreSQL driver. |
| 10 | +This will greatly reduce the maintenance surface, and it may even reduce test runtime. |
| 11 | + |
| 12 | +These store implementations do not yet have a PostgreSQL variant: |
| 13 | +- `FederatedCacheNodeDirectory` |
| 14 | + |
| 15 | + |
| 16 | +## Approach |
| 17 | + |
| 18 | +- delete all specific CosmosDB implementations for persistence, such as `AssetIndex`, etc. |
| 19 | +- delete the `CosmosFederatedCacheNodeDirectory`, as it is not used currently |
| 20 | +- for our integration tests, we'll need to provision a "Cosmos Postgres cluster" and retire our existing CosmosDB |
| 21 | +- for each store implementation, add tests in the `Technology-Azure` repo targeting a CosmosDB PostgreSQL cluster specifically, for example: |
| 22 | + ```java |
| 23 | + @ComponentTest |
| 24 | + @ExtendWith(EdcExtension.class) |
| 25 | + public class CosmosPostgresTest extends AssetIndexTestBase { |
| 26 | + private final BaseSqlDialectStatements sqlStatements = new PostgresDialectStatements(); |
| 27 | + private final QueryExecutor queryExecutor = new SqlQueryExecutor(); |
| 28 | + private SqlAssetIndex sqlAssetIndex; |
| 29 | + private NoopTransactionContext transactionContext; |
| 30 | + private DataSource dataSource; |
| 31 | + |
| 32 | + @BeforeEach |
| 33 | + void setUp() { |
| 34 | + var typeManager = new TypeManager(); |
| 35 | + typeManager.registerTypes(PolicyRegistrationTypes.TYPES.toArray(Class<?>[]::new)); |
| 36 | + |
| 37 | + var dsName = "test-ds"; |
| 38 | + var reg = new DefaultDataSourceRegistry(); |
| 39 | + dataSource = createDataSource(); |
| 40 | + reg.register(dsName, dataSource); |
| 41 | + |
| 42 | + System.setProperty("edc.datasource.asset.name", dsName); |
| 43 | + |
| 44 | + transactionContext = new NoopTransactionContext(); |
| 45 | + sqlAssetIndex = new SqlAssetIndex(reg, dsName, transactionContext, new ObjectMapper(), sqlStatements, queryExecutor); |
| 46 | + |
| 47 | + var schema = Files.readString(Paths.get("docs/schema.sql")); |
| 48 | + runQuery(schema); |
| 49 | + } |
| 50 | + |
| 51 | + @AfterEach |
| 52 | + void tearDown() { |
| 53 | + runQuery("DROP TABLE " + sqlStatements.getAssetTable() + " CASCADE"); |
| 54 | + runQuery("DROP TABLE " + sqlStatements.getDataAddressTable() + " CASCADE"); |
| 55 | + runQuery("DROP TABLE " + sqlStatements.getAssetPropertyTable() + " CASCADE"); |
| 56 | + } |
| 57 | + |
| 58 | + @Override |
| 59 | + protected SqlAssetIndex getAssetIndex() { |
| 60 | + return sqlAssetIndex; |
| 61 | + } |
| 62 | + |
| 63 | + private DataSource createDataSource() { |
| 64 | + var ds = new PGSimpleDataSource(); |
| 65 | + // this can be obtained after setting up the Cosmos-PG cluster |
| 66 | + // should be injected through environment variables |
| 67 | + ds.setServerNames(new String[]{ "c-edc-pg-test-cluster.pnrboctaun4gkt.postgres.cosmos.azure.com" }); |
| 68 | + ds.setPortNumbers(new int[]{ 5432 }); |
| 69 | + ds.setUser("<USER>"); |
| 70 | + ds.setPassword("<PASSWORD>"); |
| 71 | + ds.setDatabaseName("<DBNAME>"); |
| 72 | + ds.setSslmode("require"); |
| 73 | + return ds; |
| 74 | + } |
| 75 | + |
| 76 | + private void runQuery(String schema) { |
| 77 | + try (var connection = dataSource.getConnection()) { |
| 78 | + transactionContext.execute(() -> queryExecutor.execute(connection, schema)); |
| 79 | + } catch (SQLException e) { |
| 80 | + throw new RuntimeException(e); |
| 81 | + } |
| 82 | + } |
| 83 | + } |
| 84 | + ``` |
0 commit comments