37
37
* Operations include:
38
38
* - writing data obtained from an InputStream
39
39
* - getting an OutputStream to stream the data out
40
- *
40
+ *
41
41
* @author Eliot Horowitz and Guy K. Kloss
42
42
*/
43
43
public class GridFSInputFile extends GridFSFile {
44
-
44
+
45
45
/**
46
46
* Default constructor setting the GridFS file name and providing an input
47
47
* stream containing data to be written to the file.
48
- *
48
+ *
49
49
* @param fs
50
50
* The GridFS connection handle.
51
51
* @param in
52
52
* Stream used for reading data from.
53
53
* @param filename
54
54
* Name of the file to be created.
55
- * @param closeStreamOnPersist
55
+ * @param closeStreamOnPersist
56
56
indicate the passed in input stream should be closed once the data chunk persisted
57
57
*/
58
58
GridFSInputFile ( GridFS fs , InputStream in , String filename , boolean closeStreamOnPersist ) {
59
59
_fs = fs ;
60
60
_in = in ;
61
61
_filename = filename ;
62
62
_closeStreamOnPersist = closeStreamOnPersist ;
63
-
63
+
64
64
_id = new ObjectId ();
65
65
_chunkSize = GridFS .DEFAULT_CHUNKSIZE ;
66
66
_uploadDate = new Date ();
67
67
_messageDigester = _md5Pool .get ();
68
68
_messageDigester .reset ();
69
69
_buffer = new byte [(int ) _chunkSize ];
70
70
}
71
-
71
+
72
72
/**
73
73
* Default constructor setting the GridFS file name and providing an input
74
74
* stream containing data to be written to the file.
75
- *
75
+ *
76
76
* @param fs
77
77
* The GridFS connection handle.
78
78
* @param in
@@ -83,13 +83,13 @@ public class GridFSInputFile extends GridFSFile {
83
83
GridFSInputFile ( GridFS fs , InputStream in , String filename ) {
84
84
this ( fs , in , filename , false );
85
85
}
86
-
86
+
87
87
/**
88
88
* Constructor that only provides a file name, but does not rely on the
89
89
* presence of an {@link java.io.InputStream}. An
90
90
* {@link java.io.OutputStream} can later be obtained for writing using the
91
91
* {@link #getOutputStream()} method.
92
- *
92
+ *
93
93
* @param fs
94
94
* The GridFS connection handle.
95
95
* @param filename
@@ -98,32 +98,32 @@ public class GridFSInputFile extends GridFSFile {
98
98
GridFSInputFile ( GridFS fs , String filename ) {
99
99
this ( fs , null , filename );
100
100
}
101
-
101
+
102
102
/**
103
103
* Minimal constructor that does not rely on the presence of an
104
104
* {@link java.io.InputStream}. An {@link java.io.OutputStream} can later be
105
105
* obtained for writing using the {@link #getOutputStream()} method.
106
- *
106
+ *
107
107
* @param fs
108
108
* The GridFS connection handle.
109
109
*/
110
110
GridFSInputFile ( GridFS fs ) {
111
111
this ( fs , null , null );
112
112
}
113
-
113
+
114
114
/**
115
115
* Sets the file name on the GridFS entry.
116
- *
116
+ *
117
117
* @param fn
118
118
* File name.
119
119
*/
120
120
public void setFilename ( String fn ) {
121
121
_filename = fn ;
122
122
}
123
-
123
+
124
124
/**
125
125
* Sets the content type (MIME type) on the GridFS entry.
126
- *
126
+ *
127
127
* @param ct
128
128
* Content type.
129
129
*/
@@ -148,11 +148,11 @@ public void setChunkSize(long _chunkSize) {
148
148
public void save () {
149
149
save ( _chunkSize );
150
150
}
151
-
151
+
152
152
/**
153
153
* This method first calls saveChunks(long) if the file data has not been saved yet.
154
154
* Then it persists the file entry to GridFS.
155
- *
155
+ *
156
156
* @param chunkSize
157
157
* Size of chunks for file in bytes.
158
158
*/
@@ -169,13 +169,13 @@ public void save( long chunkSize ) {
169
169
throw new MongoException ( "couldn't save chunks" , ioe );
170
170
}
171
171
}
172
-
172
+
173
173
super .save ();
174
174
}
175
-
175
+
176
176
/**
177
177
* @see com.mongodb.gridfs.GridFSInputFile#saveChunks(long)
178
- *
178
+ *
179
179
* @return Number of the next chunk.
180
180
* @throws IOException
181
181
* on problems reading the new entry's
@@ -184,12 +184,12 @@ public void save( long chunkSize ) {
184
184
public int saveChunks () throws IOException {
185
185
return saveChunks ( _chunkSize );
186
186
}
187
-
187
+
188
188
/**
189
189
* Saves all data into chunks from configured {@link java.io.InputStream} input stream
190
190
* to GridFS. A non-default chunk size can be specified.
191
191
* This method does NOT save the file object itself, one must call save() to do so.
192
- *
192
+ *
193
193
* @param chunkSize
194
194
* Size of chunks for file in bytes.
195
195
* @return Number of the next chunk.
@@ -207,11 +207,11 @@ public int saveChunks( long chunkSize ) throws IOException {
207
207
_chunkSize = chunkSize ;
208
208
_buffer = new byte [(int ) _chunkSize ];
209
209
}
210
-
210
+
211
211
if ( chunkSize > 3.5 * 1000 * 1000 ) {
212
212
throw new MongoException ( "chunkSize must be less than 3.5MiB!" );
213
213
}
214
-
214
+
215
215
int bytesRead = 0 ;
216
216
while ( bytesRead >= 0 ) {
217
217
_currentBufferPosition = 0 ;
@@ -223,14 +223,14 @@ public int saveChunks( long chunkSize ) throws IOException {
223
223
_finishData ();
224
224
return _currentChunkNumber ;
225
225
}
226
-
226
+
227
227
/**
228
228
* After retrieving this {@link java.io.OutputStream}, this object will be
229
229
* capable of accepting successively written data to the output stream.
230
230
* To completely persist this GridFS object, you must finally call the {@link java.io.OutputStream#close()}
231
231
* method on the output stream. Note that calling the save() and saveChunks()
232
232
* methods will throw Exceptions once you obtained the OutputStream.
233
- *
233
+ *
234
234
* @return Writable stream object.
235
235
*/
236
236
public OutputStream getOutputStream () {
@@ -239,11 +239,11 @@ public OutputStream getOutputStream() {
239
239
}
240
240
return _outputStream ;
241
241
}
242
-
242
+
243
243
/**
244
244
* Dumps a new chunk into the chunks collection. Depending on the flag, also
245
245
* partial buffers (at the end) are going to be written immediately.
246
- *
246
+ *
247
247
* @param data
248
248
* Data for chunk.
249
249
* @param writePartial
@@ -264,7 +264,7 @@ private void _dumpBuffer( boolean writePartial ) {
264
264
writeBuffer = new byte [_currentBufferPosition ];
265
265
System .arraycopy ( _buffer , 0 , writeBuffer , 0 , _currentBufferPosition );
266
266
}
267
-
267
+
268
268
DBObject chunk = BasicDBObjectBuilder .start ()
269
269
.add ( "files_id" , _id )
270
270
.add ( "n" , _currentChunkNumber )
@@ -275,10 +275,10 @@ private void _dumpBuffer( boolean writePartial ) {
275
275
_messageDigester .update ( writeBuffer );
276
276
_currentBufferPosition = 0 ;
277
277
}
278
-
278
+
279
279
/**
280
280
* Reads a buffer full from the {@link java.io.InputStream}.
281
- *
281
+ *
282
282
* @return Number of bytes read from stream.
283
283
* @throws IOException
284
284
* if the reading from the stream fails.
@@ -296,7 +296,7 @@ private int _readStream2Buffer() throws IOException {
296
296
}
297
297
return bytesRead ;
298
298
}
299
-
299
+
300
300
/**
301
301
* Marks the data as fully written. This needs to be called before super.save()
302
302
*/
@@ -316,7 +316,7 @@ private void _finishData() {
316
316
}
317
317
}
318
318
}
319
-
319
+
320
320
private final InputStream _in ;
321
321
private boolean _closeStreamOnPersist ;
322
322
private boolean _savedChunks = false ;
@@ -326,15 +326,15 @@ private void _finishData() {
326
326
private long _totalBytes = 0 ;
327
327
private MessageDigest _messageDigester = null ;
328
328
private OutputStream _outputStream = null ;
329
-
329
+
330
330
/**
331
331
* A pool of {@link java.security.MessageDigest} objects.
332
332
*/
333
333
static SimplePool <MessageDigest > _md5Pool
334
334
= new SimplePool <MessageDigest >( "md5" , 10 , -1 , false , false ) {
335
335
/**
336
336
* {@inheritDoc}
337
- *
337
+ *
338
338
* @see com.mongodb.util.SimplePool#createNew()
339
339
*/
340
340
protected MessageDigest createNew () {
@@ -345,18 +345,18 @@ protected MessageDigest createNew() {
345
345
}
346
346
}
347
347
};
348
-
348
+
349
349
/**
350
350
* An output stream implementation that can be used to successively write to
351
351
* a GridFS file.
352
- *
352
+ *
353
353
* @author Guy K. Kloss
354
354
*/
355
355
class MyOutputStream extends OutputStream {
356
-
356
+
357
357
/**
358
358
* {@inheritDoc}
359
- *
359
+ *
360
360
* @see java.io.OutputStream#write(int)
361
361
*/
362
362
@ Override
@@ -365,10 +365,10 @@ public void write( int b ) throws IOException {
365
365
byteArray [0 ] = (byte ) (b & 0xff );
366
366
write ( byteArray , 0 , 1 );
367
367
}
368
-
368
+
369
369
/**
370
370
* {@inheritDoc}
371
- *
371
+ *
372
372
* @see java.io.OutputStream#write(byte[], int, int)
373
373
*/
374
374
@ Override
@@ -390,7 +390,7 @@ public void write( byte[] b , int off , int len ) throws IOException {
390
390
}
391
391
}
392
392
}
393
-
393
+
394
394
/**
395
395
* Processes/saves all data from {@link java.io.InputStream} and closes
396
396
* the potentially present {@link java.io.OutputStream}. The GridFS file
0 commit comments