|
| 1 | +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. |
| 2 | +
|
| 3 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +you may not use this file except in compliance with the License. |
| 5 | +You may obtain a copy of the License at |
| 6 | +
|
| 7 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +
|
| 9 | +Unless required by applicable law or agreed to in writing, software |
| 10 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +See the License for the specific language governing permissions and |
| 13 | +limitations under the License. */ |
| 14 | + |
| 15 | +#include <gtest/gtest.h> |
| 16 | +#include <string> |
| 17 | +#include <vector> |
| 18 | + |
| 19 | +#include "LayerGradUtil.h" |
| 20 | +#include "paddle/testing/TestUtil.h" |
| 21 | + |
| 22 | +using namespace paddle; // NOLINT |
| 23 | +using namespace std; // NOLINT |
| 24 | + |
| 25 | +// Do one forward pass of expand layer and check to see if its output |
| 26 | +// matches the given result.(Test onlyCPU currently.) |
| 27 | +void doOneExpandTest(string trans_type, |
| 28 | + bool hasSubseq, |
| 29 | + bool useGpu, |
| 30 | + Argument& input1, |
| 31 | + Argument& input2, |
| 32 | + Argument& result) { |
| 33 | + FLAGS_use_gpu = false; |
| 34 | + // Setting up the expand layer |
| 35 | + TestConfig config; |
| 36 | + config.layerConfig.set_type("expand"); |
| 37 | + |
| 38 | + auto inputType1 = |
| 39 | + trans_type == "non-seq" ? INPUT_DENSE_DIM_DATA : INPUT_SEQUENCE_DATA; |
| 40 | + config.inputDefs.push_back({inputType1, "layer0", 1, 0}); |
| 41 | + auto inputType2 = |
| 42 | + hasSubseq ? INPUT_HASSUB_SEQUENCE_DATA : INPUT_SEQUENCE_DATA; |
| 43 | + |
| 44 | + config.inputDefs.push_back({inputType2, "layer1", 1, 0}); |
| 45 | + config.layerConfig.add_inputs(); |
| 46 | + config.layerConfig.add_inputs(); |
| 47 | + config.layerConfig.set_trans_type(trans_type); |
| 48 | + |
| 49 | + // data layer initialize |
| 50 | + std::vector<DataLayerPtr> dataLayers; |
| 51 | + LayerMap layerMap; |
| 52 | + vector<Argument> datas; |
| 53 | + initDataLayer( |
| 54 | + config, &dataLayers, &datas, &layerMap, "expand", 1, false, useGpu); |
| 55 | + dataLayers[0]->getOutput() = input1; |
| 56 | + dataLayers[1]->getOutput() = input2; |
| 57 | + |
| 58 | + // test layer initialize |
| 59 | + std::vector<ParameterPtr> parameters; |
| 60 | + LayerPtr expandLayer; |
| 61 | + initTestLayer(config, &layerMap, ¶meters, &expandLayer); |
| 62 | + expandLayer->forward(PASS_GC); |
| 63 | + checkMatrixEqual(expandLayer->getOutputValue(), result.value); |
| 64 | +} |
| 65 | + |
| 66 | +TEST(Layer, ExpandLayerFwd) { |
| 67 | + bool useGpu = false; |
| 68 | + |
| 69 | + // Assume batch_size =3 in all cases. |
| 70 | + |
| 71 | + // CPU case 1. non-seq expand to seq |
| 72 | + // input1 = 1,2,3 |
| 73 | + // input2 = [4,5],[6],[7,8,9] |
| 74 | + // result = [1,1],[2],[3,3,3] |
| 75 | + Argument input1, input2, result; |
| 76 | + input1.value = Matrix::create(3, 1, false, useGpu); |
| 77 | + real input1Data[] = {1, 2, 3}; |
| 78 | + input1.value->setData(input1Data); |
| 79 | + |
| 80 | + input2.value = Matrix::create(6, 1, false, useGpu); |
| 81 | + real input2Data[] = {4, 5, 6, 7, 8, 9}; |
| 82 | + input2.value->setData(input2Data); |
| 83 | + input2.sequenceStartPositions = ICpuGpuVector::create(4, useGpu); |
| 84 | + int input2Seq[] = {0, 2, 3, 6}; |
| 85 | + input2.sequenceStartPositions->copyFrom(input2Seq, 4, useGpu); |
| 86 | + |
| 87 | + result.value = Matrix::create(6, 1, false, useGpu); |
| 88 | + real resultData[] = {1, 1, 2, 3, 3, 3}; |
| 89 | + result.value->setData(resultData); |
| 90 | + |
| 91 | + doOneExpandTest("non-seq", false, useGpu, input1, input2, result); |
| 92 | + |
| 93 | + // CPU case 2. non-seq expand to sub-seq |
| 94 | + // NOTE: input1.batch_size == input2.sequencelength in this case. |
| 95 | + // i.e, input1 expands by input2.sequence |
| 96 | + // input1 = 1,2,3 |
| 97 | + // input2 = [[4,5]],[[6]],[[7],[8,9]] |
| 98 | + // result = [[1,1]],[[2]],[[3],[3,3]] |
| 99 | + input2.subSequenceStartPositions = ICpuGpuVector::create(5, useGpu); |
| 100 | + int input2SubSeq[] = {0, 2, 3, 4, 6}; |
| 101 | + input2.subSequenceStartPositions->copyFrom(input2SubSeq, 5, useGpu); |
| 102 | + |
| 103 | + doOneExpandTest("non-seq", true, useGpu, input1, input2, result); |
| 104 | + |
| 105 | + // CPU case 3. seq expand to sub-seq |
| 106 | + // input1 = [1,2],[3],[4] |
| 107 | + // input2 = [[4,5]],[[6]],[[7],[8,9]] |
| 108 | + // result = [[1,1]],[[2]],[[3],[4,4]] |
| 109 | + Matrix::resizeOrCreate(input1.value, 4, 1, false, useGpu); |
| 110 | + real input1Data_case3[] = {1, 2, 3, 4}; |
| 111 | + input1.value->setData(input1Data_case3); |
| 112 | + |
| 113 | + input1.sequenceStartPositions = ICpuGpuVector::create(4, useGpu); |
| 114 | + int input1Seq[] = {0, 2, 3, 4}; |
| 115 | + input1.sequenceStartPositions->copyFrom(input1Seq, 4, useGpu); |
| 116 | + |
| 117 | + real resultData_case3[] = {1, 1, 2, 3, 4, 4}; |
| 118 | + result.value->setData(resultData_case3); |
| 119 | + |
| 120 | + doOneExpandTest("seq", true, useGpu, input1, input2, result); |
| 121 | +} |
| 122 | + |
| 123 | +int main(int argc, char** argv) { |
| 124 | + testing::InitGoogleTest(&argc, argv); |
| 125 | + initMain(argc, argv); |
| 126 | + return RUN_ALL_TESTS(); |
| 127 | +} |
0 commit comments