Skip to content

Commit a18f81f

Browse files
committed
Use environment variables for secrets
1 parent e37533a commit a18f81f

File tree

5 files changed

+99
-80
lines changed

5 files changed

+99
-80
lines changed

README.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,21 @@ target:
146146
groupId: 100 # write as regular group, use -1 to use current user (default).
147147
```
148148
149+
Secrets can be provided as environment variables as well:
150+
151+
| Environment variable | Corresponding value |
152+
| --- | --- |
153+
| `SOURCE_S3_ACCESS_TOKEN` | `source.s3.accessToken` |
154+
| `SOURCE_S3_SECRET_KEY` | `source.s3.secretKey` |
155+
| `SOURCE_AZURE_USERNAME` | `source.azure.username` |
156+
| `SOURCE_AZURE_PASSWORD` | `source.azure.password` |
157+
| `SOURCE_AZURE_ACCOUNT_NAME` | `source.azure.accountName` |
158+
| `SOURCE_AZURE_ACCOUNT_KEY` | `source.azure.accountKey` |
159+
| `SOURCE_AZURE_SAS_TOKEN` | `source.azure.sasToken` |
160+
| `REDIS_URL` | `redis.url` |
161+
162+
Replace `SOURCE` with `TARGET` in the variables above to configure the target storage.
163+
149164
### Cleaner
150165

151166
Source files can be automatically be removed by a cleaner process. This checks whether the file has already been extracted and is older than a configured age. This feature is not enabled by default. It can be configured in the `cleaner` configuration section:

gradle.properties

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
kotlinVersion=1.4.20
1+
kotlinVersion=1.4.32
2+
dokkaVersion=1.4.30

restructure.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,9 +132,10 @@ paths:
132132
# Output directory in target
133133
output: /output
134134
# Output path construction factory
135-
#factory: org.radarbase.output.path.MonthlyObservationKeyPathFactory
135+
factory: org.radarbase.output.path.FormattedPathFactory
136136
# Additional properties
137-
# properties: {}
137+
# properties:
138+
# format: ${projectId}/${userId}/${topic}/${time:mm}/${time:YYYYmmDD_HH'00'}${attempt}${extension}
138139

139140
# Individual topic configuration
140141
topics:

settings.gradle

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ pluginManagement {
55
useModule("org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlinVersion")
66
}
77
if (requested.id.id == "org.jetbrains.dokka") {
8-
useModule("org.jetbrains.dokka:dokka-gradle-plugin:$kotlinVersion")
8+
useModule("org.jetbrains.dokka:dokka-gradle-plugin:$dokkaVersion")
99
}
1010
}
1111
}

src/main/java/org/radarbase/output/config/RestructureConfig.kt

Lines changed: 78 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import org.radarbase.output.config.RestructureConfig.Companion.copyEnv
1515
import org.radarbase.output.config.RestructureConfig.Companion.copyOnChange
1616
import org.radarbase.output.format.FormatFactory
1717
import org.radarbase.output.format.RecordConverterFactory
18-
import org.radarbase.output.path.ObservationKeyPathFactory
18+
import org.radarbase.output.path.FormattedPathFactory
1919
import org.radarbase.output.path.RecordPathFactory
2020
import org.slf4j.LoggerFactory
2121
import java.net.URI
@@ -119,58 +119,58 @@ data class RedisConfig(
119119
}
120120

121121
data class ServiceConfig(
122-
/** Whether to enable the service mode of this application. */
123-
val enable: Boolean,
124-
/** Polling interval in seconds. */
125-
val interval: Long = 300L,
126-
/** Age in days after an avro file can be removed. Ignored if not strictly positive. */
127-
val deleteAfterDays: Int = -1) {
128-
122+
/** Whether to enable the service mode of this application. */
123+
val enable: Boolean,
124+
/** Polling interval in seconds. */
125+
val interval: Long = 300L,
126+
/** Age in days after an avro file can be removed. Ignored if not strictly positive. */
127+
val deleteAfterDays: Int = -1,
128+
) {
129129
fun validate() {
130130
check(interval > 0) { "Cleaner interval must be strictly positive" }
131131
}
132132
}
133133

134134
data class CleanerConfig(
135-
/** Whether to enable the cleaner. */
136-
val enable: Boolean = false,
137-
/** How often to run the cleaner in seconds. */
138-
val interval: Long = 1260L,
139-
/** Age in days after an avro file can be removed. Must be strictly positive. */
140-
val age: Int = 7) {
141-
135+
/** Whether to enable the cleaner. */
136+
val enable: Boolean = false,
137+
/** How often to run the cleaner in seconds. */
138+
val interval: Long = 1260L,
139+
/** Age in days after an avro file can be removed. Must be strictly positive. */
140+
val age: Int = 7,
141+
) {
142142
fun validate() {
143143
check(age > 0) { "Cleaner file age must be strictly positive" }
144144
check(interval > 0) { "Cleaner interval must be strictly positive" }
145145
}
146146
}
147147

148148
data class WorkerConfig(
149-
/** Whether to enable restructuring */
150-
val enable: Boolean = true,
151-
/** Number of threads to use for processing files. */
152-
val numThreads: Int = 1,
153-
/**
154-
* Maximum number of files to process for a given topic. Limit this to ensure that a single
155-
* processing iteration including lock takes a limited amount of time.
156-
*/
157-
val maxFilesPerTopic: Int? = null,
158-
/**
159-
* Number of files to simultaneously keep in cache, including open writer. A higher size will
160-
* decrease overhead but increase memory usage and open file descriptors.
161-
*/
162-
val cacheSize: Int = CACHE_SIZE_DEFAULT,
163-
/**
164-
* Number of offsets to simultaneously keep in cache. A higher size will
165-
* decrease overhead but increase memory usage.
166-
*/
167-
val cacheOffsetsSize: Long = 500_000,
168-
/**
169-
* Minimum time since the file was last modified in seconds. Avoids
170-
* synchronization issues that may occur in a source file that is being
171-
* appended to.
172-
*/
173-
val minimumFileAge: Long = 60
149+
/** Whether to enable restructuring */
150+
val enable: Boolean = true,
151+
/** Number of threads to use for processing files. */
152+
val numThreads: Int = 1,
153+
/**
154+
* Maximum number of files to process for a given topic. Limit this to ensure that a single
155+
* processing iteration including lock takes a limited amount of time.
156+
*/
157+
val maxFilesPerTopic: Int? = null,
158+
/**
159+
* Number of files to simultaneously keep in cache, including open writer. A higher size will
160+
* decrease overhead but increase memory usage and open file descriptors.
161+
*/
162+
val cacheSize: Int = CACHE_SIZE_DEFAULT,
163+
/**
164+
* Number of offsets to simultaneously keep in cache. A higher size will
165+
* decrease overhead but increase memory usage.
166+
*/
167+
val cacheOffsetsSize: Long = 500_000,
168+
/**
169+
* Minimum time since the file was last modified in seconds. Avoids
170+
* synchronization issues that may occur in a source file that is being
171+
* appended to.
172+
*/
173+
val minimumFileAge: Long = 60,
174174
) {
175175
init {
176176
check(cacheSize >= 1) { "Maximum files per topic must be strictly positive" }
@@ -187,23 +187,23 @@ interface PluginConfig {
187187
}
188188

189189
data class PathConfig(
190-
override val factory: String = ObservationKeyPathFactory::class.qualifiedName!!,
191-
override val properties: Map<String, String> = emptyMap(),
192-
/** Input paths referencing the source resource. */
193-
val inputs: List<Path> = emptyList(),
194-
/** Temporary directory for processing output files before uploading. */
195-
val temp: Path = Files.createTempDirectory("radar-output-restructure"),
196-
/** Output path on the target resource. */
197-
val output: Path = Paths.get("output")
190+
override val factory: String = FormattedPathFactory::class.qualifiedName!!,
191+
override val properties: Map<String, String> = emptyMap(),
192+
/** Input paths referencing the source resource. */
193+
val inputs: List<Path> = emptyList(),
194+
/** Temporary directory for processing output files before uploading. */
195+
val temp: Path = Files.createTempDirectory("radar-output-restructure"),
196+
/** Output path on the target resource. */
197+
val output: Path = Paths.get("output"),
198198
) : PluginConfig {
199199
fun createFactory(): RecordPathFactory = factory.toPluginInstance(properties)
200200
}
201201

202202
data class CompressionConfig(
203-
override val factory: String = CompressionFactory::class.qualifiedName!!,
204-
override val properties: Map<String, String> = emptyMap(),
205-
/** Compression type. Currently one of gzip, zip or none. */
206-
val type: String = "none"
203+
override val factory: String = CompressionFactory::class.qualifiedName!!,
204+
override val properties: Map<String, String> = emptyMap(),
205+
/** Compression type. Currently one of gzip, zip or none. */
206+
val type: String = "none",
207207
) : PluginConfig {
208208
fun createFactory(): CompressionFactory = factory.toPluginInstance(properties)
209209
fun createCompression(): Compression = createFactory()[type]
@@ -231,15 +231,16 @@ private inline fun <reified T: Plugin> String.toPluginInstance(properties: Map<S
231231
}
232232

233233
data class TopicConfig(
234-
/** Topic-specific deduplication handling. */
235-
val deduplication: DeduplicationConfig = DeduplicationConfig(),
236-
/** Whether to exclude the topic from being processed. */
237-
val exclude: Boolean = false,
238-
/**
239-
* Whether to exclude the topic from being deleted, if this configuration has been set
240-
* in the service.
241-
*/
242-
val excludeFromDelete: Boolean = false) {
234+
/** Topic-specific deduplication handling. */
235+
val deduplication: DeduplicationConfig = DeduplicationConfig(),
236+
/** Whether to exclude the topic from being processed. */
237+
val exclude: Boolean = false,
238+
/**
239+
* Whether to exclude the topic from being deleted, if this configuration has been set
240+
* in the service.
241+
*/
242+
val excludeFromDelete: Boolean = false,
243+
) {
243244
fun deduplication(deduplicationDefault: DeduplicationConfig): DeduplicationConfig = deduplication
244245
.withDefaults(deduplicationDefault)
245246
}
@@ -264,10 +265,11 @@ data class DeduplicationConfig(
264265
}
265266

266267
data class HdfsConfig(
267-
/** HDFS name nodes to use. */
268-
val nameNodes: List<String> = emptyList(),
269-
/** Additional HDFS configuration parameters. */
270-
val properties: Map<String, String> = emptyMap()) {
268+
/** HDFS name nodes to use. */
269+
val nameNodes: List<String> = emptyList(),
270+
/** Additional HDFS configuration parameters. */
271+
val properties: Map<String, String> = emptyMap(),
272+
) {
271273

272274
val configuration: Configuration = Configuration()
273275

@@ -296,20 +298,20 @@ data class HdfsConfig(
296298
}
297299

298300
data class ResourceConfig(
299-
/** Resource type. One of s3, hdfs or local. */
300-
val type: String,
301-
val s3: S3Config? = null,
302-
val hdfs: HdfsConfig? = null,
303-
val local: LocalConfig? = null,
304-
val azure: AzureConfig? = null) {
305-
301+
/** Resource type. One of s3, hdfs or local. */
302+
val type: String,
303+
val s3: S3Config? = null,
304+
val hdfs: HdfsConfig? = null,
305+
val local: LocalConfig? = null,
306+
val azure: AzureConfig? = null,
307+
) {
306308
@JsonIgnore
307309
lateinit var sourceType: ResourceType
308310

309311
fun validate() {
310312
sourceType = type.toResourceType()
311313

312-
when(sourceType) {
314+
when (sourceType) {
313315
ResourceType.S3 -> checkNotNull(s3) { "No S3 configuration provided." }
314316
ResourceType.HDFS -> checkNotNull(hdfs) { "No HDFS configuration provided." }.also { it.validate() }
315317
ResourceType.LOCAL -> checkNotNull(local) { "No local configuration provided." }
@@ -357,9 +359,9 @@ data class S3Config(
357359
val endOffsetFromTags: Boolean = false,
358360
) {
359361
fun createS3Client(): MinioClient = MinioClient.Builder()
360-
.endpoint(endpoint)
361-
.credentials(accessToken, secretKey)
362-
.build()
362+
.endpoint(endpoint)
363+
.credentials(accessToken, secretKey)
364+
.build()
363365

364366
fun withEnv(prefix: String): S3Config = this
365367
.copyEnv("${prefix}S3_ACCESS_TOKEN") { copy(accessToken = it) }

0 commit comments

Comments
 (0)