Skip to content

Commit 270a87f

Browse files
committed
add load op fp16 mode test
1 parent eb95417 commit 270a87f

File tree

2 files changed

+72
-1
lines changed

2 files changed

+72
-1
lines changed

paddle/fluid/operators/load_op.cc

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
See the License for the specific language governing permissions and
1313
limitations under the License. */
1414
#include <fstream>
15+
#include <iostream>
1516

17+
#include "paddle/fluid/framework/data_type_transform.h"
1618
#include "paddle/fluid/framework/op_registry.h"
1719
#include "paddle/fluid/platform/device_context.h"
1820
#include "paddle/fluid/platform/profiler.h"
@@ -51,14 +53,30 @@ class LoadOp : public framework::OperatorBase {
5153
auto in_dtype = framework::ToDataType(tensor->type());
5254
auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype;
5355

56+
std::cout << "In load op: " << std::endl;
57+
std::cout << "before conversion block" << std::endl;
58+
5459
if (in_dtype != out_dtype) {
5560
// convert to float16 tensor
5661
auto in_kernel_type = framework::OpKernelType(in_dtype, place);
5762
auto out_kernel_type = framework::OpKernelType(out_dtype, place);
5863
framework::LoDTensor fp16_tensor;
64+
// copy LoD info to the new tensor
65+
fp16_tensor.set_lod(tensor->lod());
66+
std::cout << "before TransDataType" << std::endl;
5967
framework::TransDataType(in_kernel_type, out_kernel_type, *tensor,
6068
&fp16_tensor);
69+
std::cout << "after TransDataType" << std::endl;
70+
// reset output tensor
71+
out_var->Clear();
72+
tensor = out_var->GetMutable<framework::LoDTensor>();
73+
tensor->set_lod(fp16_tensor.lod());
74+
std::cout << "before TransDataType" << std::endl;
75+
tensor->ShareDataWith(fp16_tensor);
76+
std::cout << "after TransDataType" << std::endl;
6177
}
78+
79+
std::cout << "Out of load op: " << std::endl;
6280
}
6381
};
6482

paddle/fluid/operators/save_load_op_test.cc

Lines changed: 54 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ TEST(SaveLoadOp, CPU) {
6363
}
6464
}
6565

66-
TEST(SaveLoadFP16Op, CPU) {
66+
TEST(SaveFP16Op, CPU) {
6767
paddle::framework::Scope scope;
6868
paddle::platform::CPUPlace place;
6969

@@ -94,3 +94,56 @@ TEST(SaveLoadFP16Op, CPU) {
9494
EXPECT_EQ(expect[i], static_cast<float>(actual[i]));
9595
}
9696
}
97+
98+
TEST(LoadFP16Op, CPU) {
99+
paddle::framework::Scope scope;
100+
paddle::platform::CPUPlace place;
101+
102+
auto var = scope.Var("test_var");
103+
auto tensor = var->GetMutable<paddle::framework::LoDTensor>();
104+
tensor->Resize({3, 10});
105+
106+
paddle::framework::LoD expect_lod;
107+
expect_lod.resize(1);
108+
expect_lod[0].push_back(0);
109+
expect_lod[0].push_back(1);
110+
expect_lod[0].push_back(2);
111+
expect_lod[0].push_back(3);
112+
113+
tensor->set_lod(expect_lod);
114+
float* expect = tensor->mutable_data<float>(place);
115+
for (int64_t i = 0; i < tensor->numel(); ++i) {
116+
expect[i] = static_cast<float>(paddle::platform::float16(i));
117+
}
118+
119+
paddle::framework::AttributeMap attrs;
120+
attrs.insert({"file_path", std::string("tensor.save")});
121+
attrs.insert({"load_as_fp16", true});
122+
123+
auto save_op = paddle::framework::OpRegistry::CreateOp(
124+
"save", {{"X", {"test_var"}}}, {}, attrs);
125+
save_op->Run(scope, place);
126+
127+
auto load_var = scope.Var("out_var");
128+
auto target = load_var->GetMutable<paddle::framework::LoDTensor>();
129+
auto load_op = paddle::framework::OpRegistry::CreateOp(
130+
"load", {}, {{"Out", {"out_var"}}}, attrs);
131+
LOG(INFO) << "before load op run";
132+
load_op->Run(scope, place);
133+
LOG(INFO) << "after load op run";
134+
135+
paddle::platform::float16* actual = target->data<paddle::platform::float16>();
136+
LOG(INFO) << "after target->data";
137+
for (int64_t i = 0; i < tensor->numel(); ++i) {
138+
EXPECT_EQ(expect[i], static_cast<float>(actual[i]));
139+
}
140+
LOG(INFO) << "after expect equal";
141+
142+
auto& actual_lod = target->lod();
143+
EXPECT_EQ(expect_lod.size(), actual_lod.size());
144+
for (size_t i = 0; i < expect_lod.size(); ++i) {
145+
for (size_t j = 0; j < expect_lod[i].size(); ++j) {
146+
EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]);
147+
}
148+
}
149+
}

0 commit comments

Comments
 (0)