28
28
import java .io .InputStream ;
29
29
import java .io .OutputStream ;
30
30
import java .util .List ;
31
+ import java .util .Random ;
31
32
32
33
public class GridFSTest extends TestCase {
33
34
@@ -146,7 +147,7 @@ public void testMetadata()
146
147
147
148
@ Test (groups = {"basic" })
148
149
public void testBadChunkSize () throws Exception {
149
- int fileSize = ( int )( 2 * GridFS . MAX_CHUNKSIZE );
150
+ int fileSize = 2 * _db . getMongo (). getMaxBsonObjectSize ( );
150
151
if (fileSize > 1024 * 1024 * 1024 )
151
152
//If this is the case, GridFS is probably obsolete...
152
153
fileSize = 10 * 1024 * 1024 ;
@@ -165,16 +166,8 @@ public void testBadChunkSize() throws Exception {
165
166
assertTrue (mongoExc .toString ().contains ("chunkSize must be greater than zero" ));
166
167
}
167
168
168
- try {
169
- inputFile .save (GridFS .MAX_CHUNKSIZE + 10 );
170
- fail ("should have received an exception about a chunk size being too big" );
171
- }catch (MongoException mongoExc ) {
172
- //also expecting it to complain about the chunkSize
173
- assertTrue (mongoExc .toString ().contains ("and less than or equal to GridFS.MAX_CHUNKSIZE" ));
174
- }
175
-
176
169
//For good measure let's save and restore the bytes
177
- inputFile .save (GridFS . MAX_CHUNKSIZE / 2 );
170
+ inputFile .save (_db . getMongo (). getMaxBsonObjectSize () - 500 * 1000 );
178
171
GridFSDBFile savedFile = _fs .findOne (new BasicDBObject ("_id" , inputFile .getId ()));
179
172
ByteArrayOutputStream savedFileByteStream = new ByteArrayOutputStream ();
180
173
savedFile .writeTo (savedFileByteStream );
0 commit comments