@@ -172,6 +172,9 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
172172 reserveUploadAdditionalInfo : ReserveAdditionalInformation ): Fox [Unit ] =
173173 for {
174174 _ <- dataSourceService.assertDataDirWritable(reserveUploadInfo.organization)
175+ newDataSourceId = DataSourceId (reserveUploadAdditionalInfo.directoryName, reserveUploadInfo.organization)
176+ _ = logger.info(
177+ f " Reserving ${uploadFullName(reserveUploadInfo.uploadId, reserveUploadAdditionalInfo.newDatasetId, newDataSourceId)}... " )
175178 _ <- Fox .fromBool(
176179 ! reserveUploadInfo.needsConversion.getOrElse(false ) || ! reserveUploadInfo.layersToLink
177180 .exists(_.nonEmpty)) ?~> " Cannot use linked layers if the dataset needs conversion"
@@ -186,7 +189,6 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
186189 .insertLong(redisKeyForCurrentUploadedTotalFileSizeInBytes(reserveUploadInfo.uploadId), 0L )
187190 ))
188191 }
189- newDataSourceId = DataSourceId (reserveUploadAdditionalInfo.directoryName, reserveUploadInfo.organization)
190192 _ <- runningUploadMetadataStore.insert(
191193 redisKeyForDataSourceId(reserveUploadInfo.uploadId),
192194 Json .stringify(Json .toJson(newDataSourceId))
@@ -205,8 +207,6 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
205207 redisKeyForLinkedLayerIdentifier(reserveUploadInfo.uploadId),
206208 Json .stringify(Json .toJson(LinkedLayerIdentifiers (reserveUploadInfo.layersToLink)))
207209 )
208- _ = logger.info(
209- f " Reserving ${uploadFullName(reserveUploadInfo.uploadId, reserveUploadAdditionalInfo.newDatasetId, newDataSourceId)}... " )
210210 } yield ()
211211
212212 def addUploadIdsToUnfinishedUploads (
@@ -278,6 +278,9 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
278278 .increaseBy(redisKeyForCurrentUploadedTotalFileSizeInBytes(uploadId), currentChunkSize)
279279 .flatMap(newTotalFileSizeInBytesOpt => {
280280 if (newTotalFileSizeInBytesOpt.getOrElse(0L ) > maxFileSize) {
281+ logger.warn(
282+ s " Received upload chunk for $datasetId that pushes total file size to ${newTotalFileSizeInBytesOpt
283+ .getOrElse(0L )}, which is more than reserved $maxFileSize. Aborting the upload. " )
281284 cleanUpDatasetExceedingSize(uploadDir, uploadId).flatMap(_ =>
282285 Fox .failure(" dataset.upload.moreBytesThanReserved" ))
283286 } else {
@@ -327,10 +330,9 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
327330 } yield
328331 if (knownUpload) {
329332 logger.info(f " Cancelling ${uploadFullName(uploadId, datasetId, dataSourceId)}... " )
330- for {
331- _ <- removeFromRedis(uploadId)
332- _ <- PathUtils .deleteDirectoryRecursively(uploadDirectoryFor(dataSourceId.organizationId, uploadId)).toFox
333- } yield ()
333+ cleanUpUploadedDataset(uploadDirectoryFor(dataSourceId.organizationId, uploadId),
334+ uploadId,
335+ reason = " Cancelled by user" )
334336 } else Fox .failure(s " Unknown upload " )
335337 }
336338
@@ -364,7 +366,7 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
364366 unpackToDir = unpackToDirFor(dataSourceId)
365367 _ <- ensureDirectoryBox(unpackToDir.getParent).toFox ?~> " dataset.import.fileAccessDenied"
366368 unpackResult <- unpackDataset(uploadDir, unpackToDir, datasetId).shiftBox
367- _ <- cleanUpUploadedDataset(uploadDir, uploadId)
369+ _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = " Upload complete, data unpacked. " )
368370 _ <- cleanUpOnFailure(unpackResult,
369371 datasetId,
370372 dataSourceId,
@@ -808,17 +810,19 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
808810 tryo(FileUtils .copyDirectory(uploadDir.toFile, backupDir.toFile))
809811 }
810812
811- private def cleanUpUploadedDataset (uploadDir : Path , uploadId : String ): Fox [Unit ] = {
812- this .synchronized {
813- PathUtils .deleteDirectoryRecursively(uploadDir)
814- }
815- removeFromRedis(uploadId)
816- }
813+ private def cleanUpUploadedDataset (uploadDir : Path , uploadId : String , reason : String ): Fox [Unit ] =
814+ for {
815+ _ <- Fox .successful(logger.info(s " Cleaning up uploaded dataset. Reason: $reason" ))
816+ _ <- removeFromRedis(uploadId)
817+ _ <- this .synchronized {
818+ PathUtils .deleteDirectoryRecursively(uploadDir).toFox
819+ }
820+ } yield ()
817821
818822 private def cleanUpDatasetExceedingSize (uploadDir : Path , uploadId : String ): Fox [Unit ] =
819823 for {
820824 datasetId <- getDatasetIdByUploadId(uploadId)
821- _ <- cleanUpUploadedDataset(uploadDir, uploadId)
825+ _ <- cleanUpUploadedDataset(uploadDir, uploadId, reason = " Exceeded reserved fileSize " )
822826 _ <- remoteWebknossosClient.deleteDataset(datasetId)
823827 } yield ()
824828
@@ -841,7 +845,6 @@ class UploadService @Inject()(dataSourceService: DataSourceService,
841845 _ <- runningUploadMetadataStore.remove(redisKeyForLinkedLayerIdentifier(uploadId))
842846 _ <- runningUploadMetadataStore.remove(redisKeyForUploadId(dataSourceId))
843847 _ <- runningUploadMetadataStore.remove(redisKeyForFilePaths(uploadId))
844-
845848 } yield ()
846849
847850 private def cleanUpOrphanUploads (): Fox [Unit ] =
0 commit comments