File tree Expand file tree Collapse file tree 4 files changed +11
-6
lines changed
core/src/main/scala/org/apache/spark Expand file tree Collapse file tree 4 files changed +11
-6
lines changed Original file line number Diff line number Diff line change @@ -27,7 +27,7 @@ import scala.reflect.ClassTag
27
27
import org .apache .spark .internal .Logging
28
28
import org .apache .spark .network .buffer .{FileSegmentManagedBuffer , ManagedBuffer , NioManagedBuffer }
29
29
import org .apache .spark .network .shuffle .{BlockFetchingListener , DownloadFileManager , ShuffleClient }
30
- import org .apache .spark .storage .{BlockId , StorageLevel }
30
+ import org .apache .spark .storage .{BlockId , EncryptedManagedBuffer , StorageLevel }
31
31
import org .apache .spark .util .ThreadUtils
32
32
33
33
private [spark]
@@ -104,6 +104,8 @@ abstract class BlockTransferService extends ShuffleClient with Closeable with Lo
104
104
data match {
105
105
case f : FileSegmentManagedBuffer =>
106
106
result.success(f)
107
+ case e : EncryptedManagedBuffer =>
108
+ result.success(e)
107
109
case _ =>
108
110
val ret = ByteBuffer .allocate(data.size.toInt)
109
111
ret.put(data.nioByteBuffer())
Original file line number Diff line number Diff line change @@ -721,7 +721,7 @@ private[spark] class BlockManager(
721
721
* Get block from remote block managers as serialized bytes.
722
722
*/
723
723
def getRemoteBytes (blockId : BlockId ): Option [ChunkedByteBuffer ] = {
724
- // TODO if we change this method to return the ManagedBuffer, then getRemoteValues
724
+ // TODO SPARK-25905 if we change this method to return the ManagedBuffer, then getRemoteValues
725
725
// could just use the inputStream on the temp file, rather than reading the file into memory.
726
726
// Until then, replication can cause the process to use too much memory and get killed
727
727
// even though we've read the data to disk.
Original file line number Diff line number Diff line change @@ -201,7 +201,7 @@ private class DiskBlockData(
201
201
private def open () = new FileInputStream (file).getChannel
202
202
}
203
203
204
- private class EncryptedBlockData (
204
+ private [spark] class EncryptedBlockData (
205
205
file : File ,
206
206
blockSize : Long ,
207
207
conf : SparkConf ,
@@ -263,7 +263,8 @@ private class EncryptedBlockData(
263
263
}
264
264
}
265
265
266
- private class EncryptedManagedBuffer (val blockData : EncryptedBlockData ) extends ManagedBuffer {
266
+ private [spark] class EncryptedManagedBuffer (
267
+ val blockData : EncryptedBlockData ) extends ManagedBuffer {
267
268
268
269
// This is the size of the decrypted data
269
270
override def size (): Long = blockData.size
Original file line number Diff line number Diff line change @@ -29,7 +29,7 @@ import org.apache.spark.SparkEnv
29
29
import org .apache .spark .internal .config
30
30
import org .apache .spark .network .buffer .{FileSegmentManagedBuffer , ManagedBuffer }
31
31
import org .apache .spark .network .util .{ByteArrayWritableChannel , LimitedInputStream }
32
- import org .apache .spark .storage .StorageUtils
32
+ import org .apache .spark .storage .{ EncryptedManagedBuffer , StorageUtils }
33
33
import org .apache .spark .unsafe .array .ByteArrayMethods
34
34
import org .apache .spark .util .Utils
35
35
@@ -173,11 +173,13 @@ private[spark] class ChunkedByteBuffer(var chunks: Array[ByteBuffer]) {
173
173
private [spark] object ChunkedByteBuffer {
174
174
175
175
176
- // TODO eliminate this method if we switch BlockManager to getting InputStreams
176
+ // TODO SPARK-25905 eliminate this method if we switch BlockManager to getting InputStreams
177
177
def fromManagedBuffer (data : ManagedBuffer ): ChunkedByteBuffer = {
178
178
data match {
179
179
case f : FileSegmentManagedBuffer =>
180
180
fromFile(f.getFile, f.getOffset, f.getLength)
181
+ case e : EncryptedManagedBuffer =>
182
+ e.blockData.toChunkedByteBuffer(ByteBuffer .allocate _)
181
183
case other =>
182
184
new ChunkedByteBuffer (other.nioByteBuffer())
183
185
}
You can’t perform that action at this time.
0 commit comments