|
22 | 22 |
|
23 | 23 | caffe_db_folder = "/data/imagenet/train-lmdb-256x256" |
24 | 24 |
|
| 25 | +def check_batch(batch1, batch2, batch_size, eps = 0.0000001): |
| 26 | + for i in range(batch_size): |
| 27 | + err = np.mean( np.abs(batch1.at(i) - batch2.at(i)) ) |
| 28 | + assert(err < eps) |
| 29 | + |
25 | 30 | def test_tensor_multiple_uses(): |
26 | 31 | batch_size = 128 |
27 | 32 | class HybridPipe(Pipeline): |
@@ -772,3 +777,38 @@ def iter_setup(self): |
772 | 777 | assert out2.shape == out4.shape |
773 | 778 | np.testing.assert_array_equal( expected_last, out2 ) |
774 | 779 | np.testing.assert_array_equal( expected_last, out4 ) |
| 780 | + |
| 781 | +def test_nvjpegdecoder_cached_vs_non_cached(): |
| 782 | + """ |
| 783 | + Checking that cached nvJPEGDecoder produces the same output as non cached version |
| 784 | + """ |
| 785 | + batch_size = 26 |
| 786 | + |
| 787 | + class ComparePipeline(Pipeline): |
| 788 | + def __init__(self, batch_size=batch_size, num_threads=1, device_id=0, num_gpus=10000): |
| 789 | + super(ComparePipeline, self).__init__(batch_size, num_threads, device_id, prefetch_queue_depth = 1) |
| 790 | + self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus, stick_to_shard = True) |
| 791 | + self.decode_non_cached = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB) |
| 792 | + self.decode_cached = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB, |
| 793 | + cache_size=8000, |
| 794 | + cache_threshold=0, |
| 795 | + cache_type='threshold', |
| 796 | + cache_debug=False) |
| 797 | + |
| 798 | + def define_graph(self): |
| 799 | + self.jpegs, self.labels = self.input() |
| 800 | + images_non_cached = self.decode_non_cached(self.jpegs) |
| 801 | + images_cached = self.decode_cached(self.jpegs) |
| 802 | + return (images_non_cached, images_cached) |
| 803 | + |
| 804 | + def iter_setup(self): |
| 805 | + pass |
| 806 | + |
| 807 | + pipe = ComparePipeline() |
| 808 | + pipe.build() |
| 809 | + N_iterations = 100 |
| 810 | + for k in range(N_iterations): |
| 811 | + pipe_out = pipe.run() |
| 812 | + non_cached_data = pipe_out[0].as_cpu() |
| 813 | + cached_data = pipe_out[1].as_cpu() |
| 814 | + check_batch(non_cached_data, cached_data, batch_size) |
0 commit comments