@@ -63,7 +63,7 @@ TEST(SaveLoadOp, CPU) {
63
63
}
64
64
}
65
65
66
- TEST (SaveLoadFP16Op , CPU) {
66
+ TEST (SaveFP16Op , CPU) {
67
67
paddle::framework::Scope scope;
68
68
paddle::platform::CPUPlace place;
69
69
@@ -94,3 +94,52 @@ TEST(SaveLoadFP16Op, CPU) {
94
94
EXPECT_EQ (expect[i], static_cast <float >(actual[i]));
95
95
}
96
96
}
97
+
98
+ TEST (LoadFP16Op, CPU) {
99
+ paddle::framework::Scope scope;
100
+ paddle::platform::CPUPlace place;
101
+
102
+ auto var = scope.Var (" test_var" );
103
+ auto tensor = var->GetMutable <paddle::framework::LoDTensor>();
104
+ tensor->Resize ({3 , 10 });
105
+
106
+ paddle::framework::LoD expect_lod;
107
+ expect_lod.resize (1 );
108
+ expect_lod[0 ].push_back (0 );
109
+ expect_lod[0 ].push_back (1 );
110
+ expect_lod[0 ].push_back (2 );
111
+ expect_lod[0 ].push_back (3 );
112
+
113
+ tensor->set_lod (expect_lod);
114
+ float * expect = tensor->mutable_data <float >(place);
115
+ for (int64_t i = 0 ; i < tensor->numel (); ++i) {
116
+ expect[i] = static_cast <float >(paddle::platform::float16 (i));
117
+ }
118
+
119
+ paddle::framework::AttributeMap attrs;
120
+ attrs.insert ({" file_path" , std::string (" tensor.save" )});
121
+ attrs.insert ({" load_as_fp16" , true });
122
+
123
+ auto save_op = paddle::framework::OpRegistry::CreateOp (
124
+ " save" , {{" X" , {" test_var" }}}, {}, attrs);
125
+ save_op->Run (scope, place);
126
+
127
+ auto load_var = scope.Var (" out_var" );
128
+ auto load_op = paddle::framework::OpRegistry::CreateOp (
129
+ " load" , {}, {{" Out" , {" out_var" }}}, attrs);
130
+ load_op->Run (scope, place);
131
+
132
+ auto target = load_var->Get <paddle::framework::LoDTensor>();
133
+ paddle::platform::float16* actual = target.data <paddle::platform::float16>();
134
+ for (int64_t i = 0 ; i < tensor->numel (); ++i) {
135
+ EXPECT_EQ (expect[i], static_cast <float >(actual[i]));
136
+ }
137
+
138
+ auto & actual_lod = target.lod ();
139
+ EXPECT_EQ (expect_lod.size (), actual_lod.size ());
140
+ for (size_t i = 0 ; i < expect_lod.size (); ++i) {
141
+ for (size_t j = 0 ; j < expect_lod[i].size (); ++j) {
142
+ EXPECT_EQ (expect_lod[i][j], actual_lod[i][j]);
143
+ }
144
+ }
145
+ }
0 commit comments