Skip to content

Commit 8a705ae

Browse files
committed
Modifications to TF scripts
1 parent 62d29e4 commit 8a705ae

File tree

2 files changed

+3
-5
lines changed

2 files changed

+3
-5
lines changed

scripts/cifar.sc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
tf.learn.ReLU("Layer_0/ReLU", 0.1f) >>
3030
tf.learn.MaxPool("Layer_0/MaxPool", Seq(1, 2, 2, 1), 1, 1, SamePadding) >>
3131
tf.learn.Conv2D("Layer_1/Conv2D", Shape(2, 2, 16, 32), 1, 1, SamePadding) >>
32-
tf.learn.AddBias(variableScope = "Bias_1") >>
32+
tf.learn.AddBias("Bias_1") >>
3333
tf.learn.ReLU("Layer_1/ReLU", 0.1f) >>
3434
tf.learn.MaxPool("Layer_1/MaxPool", Seq(1, 2, 2, 1), 1, 1, SamePadding) >>
3535
tf.learn.Flatten("Layer_2/Flatten") >>

scripts/rnnPTB.sc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,12 @@
2323
object RNNOutputLayer extends tf.learn.Layer[LSTMTuple, Output]("RNNOutputLayer") {
2424
override val layerType: String = "RNNOutputLayer"
2525

26-
override protected def forward(
27-
input: LSTMTuple, mode: tf.learn.Mode): tf.learn.LayerInstance[LSTMTuple, Output] = {
26+
override protected def _forward(input: LSTMTuple, mode: tf.learn.Mode): Output = {
2827
val weights = tf.variable("OutputWeights", dataType, Shape(numHidden, vocabularySize))
2928
val bias = tf.variable("OutputBias", dataType, Shape(vocabularySize))
3029
val output = tf.linear(tf.reshape(input.output, Shape(-1, numHidden)), weights.value, bias.value)
3130
// We reshape the output logits to feed into the sequence loss layer
32-
val reshapedOutput = tf.reshape(output, Shape(batchSize, numSteps, vocabularySize))
33-
tf.learn.LayerInstance(input, reshapedOutput, trainableVariables = Set(weights, bias))
31+
tf.reshape(output, Shape(batchSize, numSteps, vocabularySize))
3432
}
3533
}
3634

0 commit comments

Comments
 (0)