Skip to content

Commit db2a6ab

Browse files
authored
Merge pull request #16 from clowder-framework/release/1.10.0
v1.10.0 release
2 parents 3b84e70 + 0792f2c commit db2a6ab

File tree

15 files changed

+463
-89
lines changed

15 files changed

+463
-89
lines changed

CHANGELOG.md

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,21 @@ All notable changes to this project will be documented in this file.
44
The format is based on [Keep a Changelog](http://keepachangelog.com/)
55
and this project adheres to [Semantic Versioning](http://semver.org/).
66

7+
## 1.10.0 - 2020-06-30
8+
9+
### Added
10+
- Ability to mark multiple files in a dataset and perform bulk operations (download, tag, delete) on them at once.
11+
12+
### Fixed
13+
- Return thumbnail as part of the file information.
14+
[#8](https://github.com/clowder-framework/clowder/issues/8)
15+
- Datasets layout on space page would sometimes have overlapping tiles.
16+
17+
### Changed
18+
- mongo-init script with users would return with exit code -1 if user exists, now returns exit code 0.
19+
720
## 1.9.0 - 2020-06-01
21+
822
**_Warning:_ This update modifies information stored in Elasticsearch used for text based searching. To take advantage
923
of these changes a reindex of Elasticsearch is required. A reindex can be started by an admin from the Admin menu.**
1024

app/api/Datasets.scala

Lines changed: 78 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2065,10 +2065,16 @@ class Datasets @Inject()(
20652065
* @param dataset dataset from which to get teh files
20662066
* @param chunkSize chunk size in memory in which to buffer the stream
20672067
* @param compression java built in compression value. Use 0 for no compression.
2068+
* @param bagit whether or not to include bagit structures in zip
2069+
* @param user an optional user to include in metadata
2070+
* @param fileIDs a list of UUIDs of files in the dataset to include (i.e. marked file downloads)
2071+
* @param folderId a folder UUID in the dataset to include (i.e. folder download)
20682072
* @return Enumerator to produce array of bytes from a zipped stream containing the bytes of each file
20692073
* in the dataset
20702074
*/
2071-
def enumeratorFromDataset(dataset: Dataset, chunkSize: Int = 1024 * 8, compression: Int = Deflater.DEFAULT_COMPRESSION, bagit: Boolean, user : Option[User])
2075+
def enumeratorFromDataset(dataset: Dataset, chunkSize: Int = 1024 * 8,
2076+
compression: Int = Deflater.DEFAULT_COMPRESSION, bagit: Boolean,
2077+
user : Option[User], fileIDs: Option[List[UUID]], folderId: Option[UUID])
20722078
(implicit ec: ExecutionContext): Enumerator[Array[Byte]] = {
20732079
implicit val pec = ec.prepare()
20742080
val dataFolder = if (bagit) "data/" else ""
@@ -2077,7 +2083,19 @@ class Datasets @Inject()(
20772083

20782084
// compute list of all files and folder in dataset. This will also make sure
20792085
// that all files and folder names are unique.
2080-
listFilesInFolder(dataset.files, dataset.folders, dataFolder, filenameMap, inputFiles)
2086+
fileIDs match {
2087+
case Some(fids) => {
2088+
Logger.info("Downloading only some files")
2089+
Logger.info(fids.toString)
2090+
listFilesInFolder(fids, List.empty, dataFolder, filenameMap, inputFiles)
2091+
}
2092+
case None => {
2093+
folderId match {
2094+
case Some(fid) => listFilesInFolder(List.empty, List(fid), dataFolder, filenameMap, inputFiles)
2095+
case None => listFilesInFolder(dataset.files, dataset.folders, dataFolder, filenameMap, inputFiles)
2096+
}
2097+
}
2098+
}
20812099

20822100
val md5Files = scala.collection.mutable.HashMap.empty[String, MessageDigest] //for the files
20832101
val md5Bag = scala.collection.mutable.HashMap.empty[String, MessageDigest] //for the bag files
@@ -2121,14 +2139,13 @@ class Datasets @Inject()(
21212139
* the enumerator is finished
21222140
*/
21232141

2124-
var is: Option[InputStream] = addDatasetInfoToZip(dataFolder,dataset,zip)
2142+
var is: Option[InputStream] = addDatasetInfoToZip(dataFolder, dataset, zip)
21252143
//digest input stream
21262144
val md5 = MessageDigest.getInstance("MD5")
21272145
md5Files.put(dataFolder+"_info.json",md5)
21282146
is = Some(new DigestInputStream(is.get,md5))
21292147
file_type = 1 //next is metadata
21302148

2131-
21322149
Enumerator.generateM({
21332150
is match {
21342151
case Some(inputStream) => {
@@ -2415,7 +2432,7 @@ class Datasets @Inject()(
24152432

24162433
// Use custom enumerator to create the zip file on the fly
24172434
// Use a 1MB in memory byte array
2418-
Ok.chunked(enumeratorFromDataset(dataset,1024*1024, compression,bagit,user)).withHeaders(
2435+
Ok.chunked(enumeratorFromDataset(dataset,1024*1024, compression, bagit, user, None, None)).withHeaders(
24192436
CONTENT_TYPE -> "application/zip",
24202437
CONTENT_DISPOSITION -> (FileUtils.encodeAttachment(dataset.name+ ".zip", request.headers.get("user-agent").getOrElse("")))
24212438
)
@@ -2427,6 +2444,62 @@ class Datasets @Inject()(
24272444
}
24282445
}
24292446

2447+
// Takes dataset ID and a comma-separated string of file UUIDs in the dataset and streams just those files as a zip
2448+
def downloadPartial(id: UUID, fileList: String) = PermissionAction(Permission.DownloadFiles, Some(ResourceRef(ResourceRef.dataset, id))) { implicit request =>
2449+
implicit val user = request.user
2450+
datasets.get(id) match {
2451+
case Some(dataset) => {
2452+
val fileIDs = fileList.split(',').map(fid => new UUID(fid)).toList
2453+
val bagit = play.api.Play.configuration.getBoolean("downloadDatasetBagit").getOrElse(true)
2454+
2455+
// Increment download count for each file
2456+
fileIDs.foreach(fid => files.incrementDownloads(fid, user))
2457+
2458+
// Use custom enumerator to create the zip file on the fly
2459+
// Use a 1MB in memory byte array
2460+
Ok.chunked(enumeratorFromDataset(dataset,1024*1024, -1, bagit, user, Some(fileIDs), None)).withHeaders(
2461+
CONTENT_TYPE -> "application/zip",
2462+
CONTENT_DISPOSITION -> (FileUtils.encodeAttachment(dataset.name+ " (Partial).zip", request.headers.get("user-agent").getOrElse("")))
2463+
)
2464+
}
2465+
// If the dataset wasn't found by ID
2466+
case None => {
2467+
NotFound
2468+
}
2469+
}
2470+
}
2471+
2472+
// Takes dataset ID and a folder ID in that dataset and streams just that folder and sub-folders as a zip
2473+
def downloadFolder(id: UUID, folderId: UUID) = PermissionAction(Permission.DownloadFiles, Some(ResourceRef(ResourceRef.dataset, id))) { implicit request =>
2474+
implicit val user = request.user
2475+
datasets.get(id) match {
2476+
case Some(dataset) => {
2477+
val bagit = play.api.Play.configuration.getBoolean("downloadDatasetBagit").getOrElse(true)
2478+
2479+
// Increment download count for each file in folder
2480+
folders.get(folderId) match {
2481+
case Some(fo) => {
2482+
fo.files.foreach(fid => files.incrementDownloads(fid, user))
2483+
2484+
// Use custom enumerator to create the zip file on the fly
2485+
// Use a 1MB in memory byte array
2486+
Ok.chunked(enumeratorFromDataset(dataset,1024*1024, -1, bagit, user, None, Some(folderId))).withHeaders(
2487+
CONTENT_TYPE -> "application/zip",
2488+
CONTENT_DISPOSITION -> (FileUtils.encodeAttachment(dataset.name+ " ("+fo.name+" Folder).zip", request.headers.get("user-agent").getOrElse("")))
2489+
)
2490+
}
2491+
case None => NotFound
2492+
}
2493+
2494+
2495+
}
2496+
// If the dataset wasn't found by ID
2497+
case None => {
2498+
NotFound
2499+
}
2500+
}
2501+
}
2502+
24302503
def updateAccess(id:UUID, access:String) = PermissionAction(Permission.PublicDataset, Some(ResourceRef(ResourceRef.dataset, id))) { implicit request =>
24312504
implicit val user = request.user
24322505
user match {

app/api/Files.scala

Lines changed: 7 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -728,42 +728,29 @@ class Files @Inject()(
728728
"content-type" -> file.contentType,
729729
"date-created" -> file.uploadDate.toString(),
730730
"size" -> file.length.toString,
731+
"thumbnail" -> file.thumbnail_id.orNull,
731732
"authorId" -> file.author.id.stringify,
732733
"status" -> file.status)
733734

734735
// Only include filepath if using DiskByte storage and user is serverAdmin
735736
val jsonMap = file.loader match {
736737
case "services.filesystem.DiskByteStorageService" => {
737738
if (serverAdmin)
738-
Map(
739-
"id" -> file.id.toString,
740-
"filename" -> file.filename,
741-
"filepath" -> file.loader_id,
742-
"filedescription" -> file.description,
743-
"content-type" -> file.contentType,
744-
"date-created" -> file.uploadDate.toString(),
745-
"size" -> file.length.toString,
746-
"authorId" -> file.author.id.stringify,
747-
"status" -> file.status)
739+
defaultMap ++ Map(
740+
"filepath" -> file.loader_id
741+
)
748742
else
749743
defaultMap
750744
}
751745
case "services.s3.S3ByteStorageService" => {
752746
if (serverAdmin) {
753747
val bucketName = configuration.getString(S3ByteStorageService.BucketName).getOrElse("")
754748
val serviceEndpoint = configuration.getString(S3ByteStorageService.ServiceEndpoint).getOrElse("")
755-
Map(
756-
"id" -> file.id.toString,
757-
"filename" -> file.filename,
749+
defaultMap ++ Map(
758750
"service-endpoint" -> serviceEndpoint,
759751
"bucket-name" -> bucketName,
760-
"object-key" -> file.loader_id,
761-
"filedescription" -> file.description,
762-
"content-type" -> file.contentType,
763-
"date-created" -> file.uploadDate.toString(),
764-
"size" -> file.length.toString,
765-
"authorId" -> file.author.id.stringify,
766-
"status" -> file.status)
752+
"object-key" -> file.loader_id
753+
)
767754
} else
768755
defaultMap
769756
}

app/controllers/Application.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -333,6 +333,8 @@ class Application @Inject() (files: FileService, collections: CollectionService,
333333
api.routes.javascript.Datasets.unfollow,
334334
api.routes.javascript.Datasets.detachFile,
335335
api.routes.javascript.Datasets.download,
336+
api.routes.javascript.Datasets.downloadPartial,
337+
api.routes.javascript.Datasets.downloadFolder,
336338
api.routes.javascript.Datasets.getPreviews,
337339
api.routes.javascript.Datasets.updateAccess,
338340
api.routes.javascript.Datasets.addFileEvent,

0 commit comments

Comments
 (0)