Skip to content

Commit 3afb9dc

Browse files
committed
use double in unittest.
1 parent bce4f7d commit 3afb9dc

File tree

2 files changed

+10
-16
lines changed

2 files changed

+10
-16
lines changed

paddle/operators/linear_chain_crf_op.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -195,8 +195,6 @@ class LinearChainCRFOp : public framework::OperatorWithKernel {
195195
// is the sequence number in a mini-batch. The dimension set here should be
196196
// resized to its correct size in the function Compute.
197197
ctx->SetOutputDim("LogLikelihood", {emission_dims[0], 1});
198-
199-
ctx->ShareLoD("Emission", /*->*/ "EmissionExps");
200198
}
201199

202200
protected:
@@ -402,7 +400,7 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel {
402400
// operator is determined by its input "EmissionExps".
403401
framework::DataType IndicateDataType(
404402
const framework::ExecutionContext& ctx) const override {
405-
return framework::ToDataType(ctx.Input<LoDTensor>("EmissionExps")->type());
403+
return framework::ToDataType(ctx.Input<LoDTensor>("LogLikelihood")->type());
406404
}
407405
};
408406

@@ -562,7 +560,9 @@ REGISTER_OP(linear_chain_crf, ops::LinearChainCRFOp, ops::LinearChainCRFOpMaker,
562560
linear_chain_crf_grad, ops::LinearChainCRFGradOp);
563561
REGISTER_OP_CPU_KERNEL(
564562
linear_chain_crf,
565-
ops::LinearChainCRFOpKernel<paddle::platform::CPUPlace, float>);
563+
ops::LinearChainCRFOpKernel<paddle::platform::CPUPlace, float>,
564+
ops::LinearChainCRFOpKernel<paddle::platform::CPUPlace, double>);
566565
REGISTER_OP_CPU_KERNEL(
567566
linear_chain_crf_grad,
568-
ops::LinearChainCRFGradOpKernel<paddle::platform::CPUPlace, float>);
567+
ops::LinearChainCRFGradOpKernel<paddle::platform::CPUPlace, float>,
568+
ops::LinearChainCRFGradOpKernel<paddle::platform::CPUPlace, double>);

python/paddle/v2/framework/tests/test_linear_chain_crf_op.py

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def __init__(self, seq_start_positions, emission_weights, emission_row_max,
3232
# alpha is a memo table in dynamic programming to caculate
3333
# nomalization factor.
3434
self.alpha = np.zeros(
35-
(seq_start_positions[-1], self.tag_num), dtype="float32")
35+
(seq_start_positions[-1], self.tag_num), dtype="float64")
3636
self.log_likelihood = np.zeros((self.seq_num, 1))
3737

3838
def _l1_norm(self, x):
@@ -92,12 +92,12 @@ def set_test_data(self):
9292
for i in range(SEQ_NUM):
9393
lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN))
9494
emission = np.random.uniform(-1, 1,
95-
[lod[-1][-1], TAG_NUM]).astype("float32")
95+
[lod[-1][-1], TAG_NUM]).astype("float64")
9696
emission_row_max = np.amax(emission, axis=1, keepdims=True)
9797
emission_exps = np.exp(emission - emission_row_max)
9898

9999
transition = np.random.uniform(-0.5, 0.5,
100-
[TAG_NUM + 2, TAG_NUM]).astype("float32")
100+
[TAG_NUM + 2, TAG_NUM]).astype("float64")
101101
transition_exps = np.exp(transition)
102102

103103
labels = np.random.randint(
@@ -128,17 +128,11 @@ def test_check_output(self):
128128
self.check_output()
129129

130130
def test_check_grad(self):
131-
self.check_grad(
132-
["Emission", "Transition"],
133-
"LogLikelihood",
134-
max_relative_error=0.05)
131+
self.check_grad(["Emission", "Transition"], "LogLikelihood")
135132

136133
def test_check_grad_ignore_transition(self):
137134
self.check_grad(
138-
["Emission"],
139-
"LogLikelihood",
140-
max_relative_error=0.05,
141-
no_grad_set=set("Transition"))
135+
["Emission"], "LogLikelihood", no_grad_set=set("Transition"))
142136

143137

144138
if __name__ == "__main__":

0 commit comments

Comments
 (0)