@@ -15,7 +15,7 @@ import org.radarbase.output.config.RestructureConfig.Companion.copyEnv
1515import org.radarbase.output.config.RestructureConfig.Companion.copyOnChange
1616import org.radarbase.output.format.FormatFactory
1717import org.radarbase.output.format.RecordConverterFactory
18- import org.radarbase.output.path.ObservationKeyPathFactory
18+ import org.radarbase.output.path.FormattedPathFactory
1919import org.radarbase.output.path.RecordPathFactory
2020import org.slf4j.LoggerFactory
2121import java.net.URI
@@ -119,58 +119,58 @@ data class RedisConfig(
119119}
120120
121121data class ServiceConfig (
122- /* * Whether to enable the service mode of this application. */
123- val enable : Boolean ,
124- /* * Polling interval in seconds. */
125- val interval : Long = 300L ,
126- /* * Age in days after an avro file can be removed. Ignored if not strictly positive. */
127- val deleteAfterDays : Int = -1 ) {
128-
122+ /* * Whether to enable the service mode of this application. */
123+ val enable : Boolean ,
124+ /* * Polling interval in seconds. */
125+ val interval : Long = 300L ,
126+ /* * Age in days after an avro file can be removed. Ignored if not strictly positive. */
127+ val deleteAfterDays : Int = -1 ,
128+ ) {
129129 fun validate () {
130130 check(interval > 0 ) { " Cleaner interval must be strictly positive" }
131131 }
132132}
133133
134134data class CleanerConfig (
135- /* * Whether to enable the cleaner. */
136- val enable : Boolean = false ,
137- /* * How often to run the cleaner in seconds. */
138- val interval : Long = 1260L ,
139- /* * Age in days after an avro file can be removed. Must be strictly positive. */
140- val age : Int = 7 ) {
141-
135+ /* * Whether to enable the cleaner. */
136+ val enable : Boolean = false ,
137+ /* * How often to run the cleaner in seconds. */
138+ val interval : Long = 1260L ,
139+ /* * Age in days after an avro file can be removed. Must be strictly positive. */
140+ val age : Int = 7 ,
141+ ) {
142142 fun validate () {
143143 check(age > 0 ) { " Cleaner file age must be strictly positive" }
144144 check(interval > 0 ) { " Cleaner interval must be strictly positive" }
145145 }
146146}
147147
148148data class WorkerConfig (
149- /* * Whether to enable restructuring */
150- val enable : Boolean = true ,
151- /* * Number of threads to use for processing files. */
152- val numThreads : Int = 1 ,
153- /* *
154- * Maximum number of files to process for a given topic. Limit this to ensure that a single
155- * processing iteration including lock takes a limited amount of time.
156- */
157- val maxFilesPerTopic : Int? = null ,
158- /* *
159- * Number of files to simultaneously keep in cache, including open writer. A higher size will
160- * decrease overhead but increase memory usage and open file descriptors.
161- */
162- val cacheSize : Int = CACHE_SIZE_DEFAULT ,
163- /* *
164- * Number of offsets to simultaneously keep in cache. A higher size will
165- * decrease overhead but increase memory usage.
166- */
167- val cacheOffsetsSize : Long = 500_000 ,
168- /* *
169- * Minimum time since the file was last modified in seconds. Avoids
170- * synchronization issues that may occur in a source file that is being
171- * appended to.
172- */
173- val minimumFileAge : Long = 60
149+ /* * Whether to enable restructuring */
150+ val enable : Boolean = true ,
151+ /* * Number of threads to use for processing files. */
152+ val numThreads : Int = 1 ,
153+ /* *
154+ * Maximum number of files to process for a given topic. Limit this to ensure that a single
155+ * processing iteration including lock takes a limited amount of time.
156+ */
157+ val maxFilesPerTopic : Int? = null ,
158+ /* *
159+ * Number of files to simultaneously keep in cache, including open writer. A higher size will
160+ * decrease overhead but increase memory usage and open file descriptors.
161+ */
162+ val cacheSize : Int = CACHE_SIZE_DEFAULT ,
163+ /* *
164+ * Number of offsets to simultaneously keep in cache. A higher size will
165+ * decrease overhead but increase memory usage.
166+ */
167+ val cacheOffsetsSize : Long = 500_000 ,
168+ /* *
169+ * Minimum time since the file was last modified in seconds. Avoids
170+ * synchronization issues that may occur in a source file that is being
171+ * appended to.
172+ */
173+ val minimumFileAge : Long = 60 ,
174174) {
175175 init {
176176 check(cacheSize >= 1 ) { " Maximum files per topic must be strictly positive" }
@@ -187,23 +187,23 @@ interface PluginConfig {
187187}
188188
189189data class PathConfig (
190- override val factory : String = ObservationKeyPathFactory : :class.qualifiedName!!,
191- override val properties : Map <String , String > = emptyMap(),
192- /* * Input paths referencing the source resource. */
193- val inputs : List <Path > = emptyList(),
194- /* * Temporary directory for processing output files before uploading. */
195- val temp : Path = Files .createTempDirectory("radar-output-restructure"),
196- /* * Output path on the target resource. */
197- val output : Path = Paths .get("output")
190+ override val factory : String = FormattedPathFactory : :class.qualifiedName!!,
191+ override val properties : Map <String , String > = emptyMap(),
192+ /* * Input paths referencing the source resource. */
193+ val inputs : List <Path > = emptyList(),
194+ /* * Temporary directory for processing output files before uploading. */
195+ val temp : Path = Files .createTempDirectory("radar-output-restructure"),
196+ /* * Output path on the target resource. */
197+ val output : Path = Paths .get("output"),
198198) : PluginConfig {
199199 fun createFactory (): RecordPathFactory = factory.toPluginInstance(properties)
200200}
201201
202202data class CompressionConfig (
203- override val factory : String = CompressionFactory : :class.qualifiedName!!,
204- override val properties : Map <String , String > = emptyMap(),
205- /* * Compression type. Currently one of gzip, zip or none. */
206- val type : String = " none"
203+ override val factory : String = CompressionFactory : :class.qualifiedName!!,
204+ override val properties : Map <String , String > = emptyMap(),
205+ /* * Compression type. Currently one of gzip, zip or none. */
206+ val type : String = " none" ,
207207) : PluginConfig {
208208 fun createFactory (): CompressionFactory = factory.toPluginInstance(properties)
209209 fun createCompression (): Compression = createFactory()[type]
@@ -231,15 +231,16 @@ private inline fun <reified T: Plugin> String.toPluginInstance(properties: Map<S
231231}
232232
233233data class TopicConfig (
234- /* * Topic-specific deduplication handling. */
235- val deduplication : DeduplicationConfig = DeduplicationConfig (),
236- /* * Whether to exclude the topic from being processed. */
237- val exclude : Boolean = false ,
238- /* *
239- * Whether to exclude the topic from being deleted, if this configuration has been set
240- * in the service.
241- */
242- val excludeFromDelete : Boolean = false ) {
234+ /* * Topic-specific deduplication handling. */
235+ val deduplication : DeduplicationConfig = DeduplicationConfig (),
236+ /* * Whether to exclude the topic from being processed. */
237+ val exclude : Boolean = false ,
238+ /* *
239+ * Whether to exclude the topic from being deleted, if this configuration has been set
240+ * in the service.
241+ */
242+ val excludeFromDelete : Boolean = false ,
243+ ) {
243244 fun deduplication (deduplicationDefault : DeduplicationConfig ): DeduplicationConfig = deduplication
244245 .withDefaults(deduplicationDefault)
245246}
@@ -264,10 +265,11 @@ data class DeduplicationConfig(
264265}
265266
266267data class HdfsConfig (
267- /* * HDFS name nodes to use. */
268- val nameNodes : List <String > = emptyList(),
269- /* * Additional HDFS configuration parameters. */
270- val properties : Map <String , String > = emptyMap()) {
268+ /* * HDFS name nodes to use. */
269+ val nameNodes : List <String > = emptyList(),
270+ /* * Additional HDFS configuration parameters. */
271+ val properties : Map <String , String > = emptyMap(),
272+ ) {
271273
272274 val configuration: Configuration = Configuration ()
273275
@@ -296,20 +298,20 @@ data class HdfsConfig(
296298}
297299
298300data class ResourceConfig (
299- /* * Resource type. One of s3, hdfs or local. */
300- val type : String ,
301- val s3 : S3Config ? = null ,
302- val hdfs : HdfsConfig ? = null ,
303- val local : LocalConfig ? = null ,
304- val azure : AzureConfig ? = null ) {
305-
301+ /* * Resource type. One of s3, hdfs or local. */
302+ val type : String ,
303+ val s3 : S3Config ? = null ,
304+ val hdfs : HdfsConfig ? = null ,
305+ val local : LocalConfig ? = null ,
306+ val azure : AzureConfig ? = null ,
307+ ) {
306308 @JsonIgnore
307309 lateinit var sourceType: ResourceType
308310
309311 fun validate () {
310312 sourceType = type.toResourceType()
311313
312- when (sourceType) {
314+ when (sourceType) {
313315 ResourceType .S3 -> checkNotNull(s3) { " No S3 configuration provided." }
314316 ResourceType .HDFS -> checkNotNull(hdfs) { " No HDFS configuration provided." }.also { it.validate() }
315317 ResourceType .LOCAL -> checkNotNull(local) { " No local configuration provided." }
@@ -357,9 +359,9 @@ data class S3Config(
357359 val endOffsetFromTags : Boolean = false ,
358360) {
359361 fun createS3Client (): MinioClient = MinioClient .Builder ()
360- .endpoint(endpoint)
361- .credentials(accessToken, secretKey)
362- .build()
362+ .endpoint(endpoint)
363+ .credentials(accessToken, secretKey)
364+ .build()
363365
364366 fun withEnv (prefix : String ): S3Config = this
365367 .copyEnv(" ${prefix} S3_ACCESS_TOKEN" ) { copy(accessToken = it) }
0 commit comments