@@ -58,14 +58,15 @@ class MKLDNNLayer : public Layer {
58
58
std::vector<mkldnn::primitive> pipelineFwd_;
59
59
std::vector<mkldnn::primitive> pipelineBwd_;
60
60
61
- // / value and grad are seperated as internal and external buffers.
62
- // / each MKLDNNLayer must init or reset internal buffer at least,
63
- // / and the external buffer format is always nchw of nc(when h==w==1),
64
- // / which is the same format as paddle.
65
- // / The output_.value and output_.grad always save the external data,
66
- // / when mixed with cpu device.
67
- // / When all layers are mkldnn layers, they could save internal data.
68
- // / below MKLDNNMatrix buffers are all internal buffers
61
+ /* Value and grad are seperated as internal and external buffers.
62
+ * Each MKLDNNLayer must init or reset internal buffer at least,
63
+ * and the external buffer format is always nchw of nc(when h==w==1),
64
+ * which is the same format as paddle.
65
+ * The output_.value and output_.grad always save the external data,
66
+ * when mixed with cpu device.
67
+ * When all layers are mkldnn layers, they could save internal data.
68
+ */
69
+ // below MKLDNNMatrix buffers are all internal buffers
69
70
MKLDNNMatrixPtr inVal_;
70
71
MKLDNNMatrixPtr inGrad_;
71
72
MKLDNNMatrixPtr outVal_;
@@ -120,8 +121,8 @@ class MKLDNNLayer : public Layer {
120
121
~MKLDNNLayer () {}
121
122
122
123
virtual bool init (const LayerMap& layerMap, const ParameterMap& parameterMap);
123
- void forward (PassType passType) override ;
124
- void backward (const UpdateCallback& callback) override ;
124
+ virtual void forward (PassType passType);
125
+ virtual void backward (const UpdateCallback& callback);
125
126
126
127
/* *
127
128
* reshape the input image sizes
@@ -217,15 +218,15 @@ class MKLDNNLayer : public Layer {
217
218
* reset output grad from internal primitive desc.
218
219
* merge grad if necessary.
219
220
* reset both internal and external buffer and create reorder if necessary.
220
- * note: about merge grad, when this layer has serval outputs,
221
+ * note: about merge grad, when this layer has several outputs,
221
222
* it could not be mixed with cpu device,
222
223
* since it can not get memory desc from cpu device.
223
224
*/
224
225
void resetOutGrad (MKLDNNMatrixPtr& out, mkldnn::memory::primitive_desc intPD);
225
226
226
227
/* *
227
228
* reset the merge grad primitive if necessary.
228
- * note: do not support the grads are mixed with cpu device,
229
+ * note: do not support the grads mixed with cpu device,
229
230
* since it can not get memory desc from cpu device.
230
231
*/
231
232
void resetMergeGrad (MKLDNNMatrixPtr& out);
@@ -313,17 +314,17 @@ class MKLDNNLayer : public Layer {
313
314
* print the mkldnn memory format of grad
314
315
*/
315
316
virtual void printGradFormat () {
316
- if (extInGrad_) {
317
- VLOG (MKLDNN_FMTS) << extInGrad_->getFormat () << " <<< " ;
318
- }
319
- if (inGrad_) {
320
- VLOG (MKLDNN_FMTS) << inGrad_->getFormat () << " <<<" ;
317
+ if (extOutGrad_) {
318
+ VLOG (MKLDNN_FMTS) << extOutGrad_->getFormat ();
321
319
}
322
320
if (outGrad_) {
323
321
VLOG (MKLDNN_FMTS) << outGrad_->getFormat () << " <<< " ;
324
322
}
325
- if (extOutGrad_) {
326
- VLOG (MKLDNN_FMTS) << extOutGrad_->getFormat ();
323
+ if (inGrad_) {
324
+ VLOG (MKLDNN_FMTS) << inGrad_->getFormat () << " <<<" ;
325
+ }
326
+ if (extInGrad_) {
327
+ VLOG (MKLDNN_FMTS) << extInGrad_->getFormat () << " <<< " ;
327
328
}
328
329
if (wgtGrad_) {
329
330
VLOG (MKLDNN_FMTS) << " Weight grad format: " << wgtGrad_->getFormat ();
0 commit comments