@@ -125,7 +125,7 @@ services:
125125 condition : service_completed_successfully
126126 rabbitmq :
127127 condition : service_healthy
128- profiles : [worker, scheduler, s3, oracle, hdfs, hive, clickhouse, mysql, mssql, all]
128+ profiles : [worker, scheduler, s3, oracle, hdfs, hive, clickhouse, mysql, mssql, sftp, all]
129129
130130 test-postgres :
131131 image : postgres
@@ -139,7 +139,7 @@ services:
139139 interval : 30s
140140 timeout : 5s
141141 retries : 3
142- profiles : [s3, oracle, clickhouse, mysql, mssql, hdfs, hive, all]
142+ profiles : [s3, oracle, clickhouse, mysql, mssql, hdfs, hive, sftp, all]
143143
144144 test-s3 :
145145 image : bitnami/minio:latest
@@ -266,6 +266,20 @@ services:
266266 # writing spark dataframe to s3 xml file fails without running hive metastore server
267267 profiles : [hive, hdfs, s3, all]
268268
269+ test-sftp :
270+ image : ${SFTP_IMAGE:-linuxserver/openssh-server}
271+ restart : unless-stopped
272+ ports :
273+ - 2222:2222
274+ environment :
275+ PUID : 1000
276+ PGID : 1000
277+ USER_NAME : syncmaster
278+ PASSWORD_ACCESS : true
279+ SUDO_ACCESS : true
280+ USER_PASSWORD : AesujeifohgoaCu0Boosiet5aimeitho
281+ profiles : [sftp, all]
282+
269283volumes :
270284 postgres_test_data :
271285 rabbitmq_test_data :
0 commit comments