8
8
from __future__ import unicode_literals
9
9
10
10
import os
11
- import unittest
12
- from collections import namedtuple
13
-
14
- import graphviz as gv
15
- from onnx import TensorProto
16
- from onnx import helper
17
11
12
+ import numpy as np
18
13
import tensorflow as tf
19
14
from tensorflow .python .framework .graph_util import convert_variables_to_constants
20
- import numpy as np
21
-
22
- import os
23
-
24
15
25
16
# pylint: disable=missing-docstring
26
17
29
20
training_epochs = 100
30
21
31
22
# Training Data
32
- train_X = np .array (
23
+ _train_x = np .array (
33
24
[3.3 , 4.4 , 5.5 , 6.71 , 6.93 , 4.168 , 9.779 , 6.182 , 7.59 , 2.167 , 7.042 , 10.791 , 5.313 , 7.997 , 5.654 , 9.27 , 3.1 ])
34
- train_Y = np .array (
25
+ _train_y = np .array (
35
26
[1.7 , 2.76 , 2.09 , 3.19 , 1.694 , 1.573 , 3.366 , 2.596 , 2.53 , 1.221 , 2.827 , 3.465 , 1.65 , 2.904 , 2.42 , 2.94 , 1.3 ])
36
- test_X = np .array ([6.83 , 4.668 , 8.9 , 7.91 , 5.7 , 8.7 , 3.1 , 2.1 ])
37
- test_Y = np .array ([1.84 , 2.273 , 3.2 , 2.831 , 2.92 , 3.24 , 1.35 , 1.03 ])
27
+ _test_x = np .array ([6.83 , 4.668 , 8.9 , 7.91 , 5.7 , 8.7 , 3.1 , 2.1 ])
28
+ _test_y = np .array ([1.84 , 2.273 , 3.2 , 2.831 , 2.92 , 3.24 , 1.35 , 1.03 ])
29
+
38
30
39
31
def freeze_session (sess , keep_var_names = None , output_names = None , clear_devices = True ):
40
32
"""Freezes the state of a session into a pruned computation graph."""
@@ -52,20 +44,21 @@ def freeze_session(sess, keep_var_names=None, output_names=None, clear_devices=T
52
44
output_names , freeze_var_names )
53
45
return frozen_graph
54
46
47
+
55
48
def train (model_path ):
56
- n_samples = train_X .shape [0 ]
49
+ n_samples = _train_x .shape [0 ]
57
50
58
51
# tf Graph Input
59
- X = tf .placeholder (tf .float32 , name = "X" )
60
- Y = tf .placeholder (tf .float32 , name = "Y" )
52
+ x = tf .placeholder (tf .float32 , name = "X" )
53
+ y = tf .placeholder (tf .float32 , name = "Y" )
61
54
62
55
# Set model weights
63
- W = tf .Variable (np .random .randn (), name = "W" )
56
+ w = tf .Variable (np .random .randn (), name = "W" )
64
57
b = tf .Variable (np .random .randn (), name = "b" )
65
58
66
- pred = tf .add (tf .multiply (X , W ), b )
59
+ pred = tf .add (tf .multiply (x , w ), b )
67
60
pred = tf .identity (pred , name = "pred" )
68
- cost = tf .reduce_sum (tf .pow (pred - Y , 2 )) / (2 * n_samples )
61
+ cost = tf .reduce_sum (tf .pow (pred - y , 2 )) / (2 * n_samples )
69
62
70
63
optimizer = tf .train .GradientDescentOptimizer (learning_rate ).minimize (cost )
71
64
saver = tf .train .Saver ()
@@ -75,11 +68,11 @@ def train(model_path):
75
68
sess .run (tf .global_variables_initializer ())
76
69
77
70
# Fit all training data
78
- for epoch in range (training_epochs ):
79
- for (x , y ) in zip (train_X , train_Y ):
80
- sess .run (optimizer , feed_dict = {X : x , Y : y })
81
- training_cost = sess .run (cost , feed_dict = {X : train_X , Y : train_Y })
82
- testing_cost = sess .run (cost , feed_dict = {X : test_X , Y : test_Y })
71
+ for _ in range (training_epochs ):
72
+ for (ix , iy ) in zip (_train_x , _train_y ):
73
+ sess .run (optimizer , feed_dict = {x : ix , y : iy })
74
+ training_cost = sess .run (cost , feed_dict = {x : _train_x , y : _train_y })
75
+ testing_cost = sess .run (cost , feed_dict = {x : _test_x , y : _test_y })
83
76
print ("train_cost={}, test_cost={}, diff={}"
84
77
.format (training_cost , testing_cost , abs (training_cost - testing_cost )))
85
78
@@ -92,8 +85,7 @@ def train(model_path):
92
85
tf .train .write_graph (frozen_graph , p , "frozen.pb" , as_text = False )
93
86
94
87
p = os .path .abspath (os .path .join (model_path , "saved_model" ))
95
- tf .saved_model .simple_save (sess , p , inputs = {"X" : X }, outputs = {"pred" : pred })
88
+ tf .saved_model .simple_save (sess , p , inputs = {"X" : x }, outputs = {"pred" : pred })
96
89
97
90
98
91
train ("models/regression" )
99
-
0 commit comments