Skip to content

Commit 2ed1542

Browse files
authored
Merge pull request #2456 from nedo99/faster_ci
ci: Enable faster execution on CI
2 parents 424aa45 + b412a9b commit 2ed1542

File tree

3 files changed

+27
-5
lines changed

3 files changed

+27
-5
lines changed

AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.ipynb

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@
4747
"source": [
4848
"import string\n",
4949
"import requests\n",
50+
"import os\n",
5051
"\n",
5152
"response = requests.get('https://www.gutenberg.org/cache/epub/1497/pg1497.txt')\n",
5253
"data = response.text.split('\\n')\n",
@@ -253,6 +254,11 @@
253254
"metadata": {},
254255
"outputs": [],
255256
"source": [
257+
"num_epochs = 200\n",
258+
"# For custom epochs numbers from the environment\n",
259+
"if \"ITEX_NUM_EPOCHS\" in os.environ:\n",
260+
" num_epochs = int(os.environ.get('ITEX_NUM_EPOCHS'))\n",
261+
"\n",
256262
"neuron_coef = 4\n",
257263
"itex_lstm_model = Sequential()\n",
258264
"itex_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))\n",
@@ -262,7 +268,7 @@
262268
"itex_lstm_model.add(Dense(units=vocab_size, activation='softmax'))\n",
263269
"itex_lstm_model.summary()\n",
264270
"itex_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
265-
"itex_lstm_model.fit(x,y, batch_size=256, epochs=200)"
271+
"itex_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)"
266272
]
267273
},
268274
{
@@ -296,6 +302,11 @@
296302
"seq_length = x.shape[1]\n",
297303
"vocab_size = y.shape[1]\n",
298304
"\n",
305+
"num_epochs = 20\n",
306+
"# For custom epochs numbers\n",
307+
"if \"KERAS_NUM_EPOCHS\" in os.environ:\n",
308+
" num_epochs = int(os.environ.get('KERAS_NUM_EPOCHS'))\n",
309+
"\n",
299310
"neuron_coef = 1\n",
300311
"keras_lstm_model = Sequential()\n",
301312
"keras_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))\n",
@@ -305,7 +316,7 @@
305316
"keras_lstm_model.add(Dense(units=vocab_size, activation='softmax'))\n",
306317
"keras_lstm_model.summary()\n",
307318
"keras_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n",
308-
"keras_lstm_model.fit(x,y, batch_size=256, epochs=20)"
319+
"keras_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)"
309320
]
310321
},
311322
{

AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/TextGenerationModelTraining.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828

2929
import string
3030
import requests
31+
import os
3132

3233
response = requests.get('https://www.gutenberg.org/cache/epub/1497/pg1497.txt')
3334
data = response.text.split('\n')
@@ -168,6 +169,11 @@ def tokenize_prepare_dataset(lines):
168169
# In[ ]:
169170

170171

172+
num_epochs = 200
173+
# For custom epochs numbers from the environment
174+
if "ITEX_NUM_EPOCHS" in os.environ:
175+
num_epochs = int(os.environ.get('ITEX_NUM_EPOCHS'))
176+
171177
neuron_coef = 4
172178
itex_lstm_model = Sequential()
173179
itex_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))
@@ -177,7 +183,7 @@ def tokenize_prepare_dataset(lines):
177183
itex_lstm_model.add(Dense(units=vocab_size, activation='softmax'))
178184
itex_lstm_model.summary()
179185
itex_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
180-
itex_lstm_model.fit(x,y, batch_size=256, epochs=200)
186+
itex_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)
181187

182188

183189
# ## Compared to LSTM from Keras
@@ -201,6 +207,11 @@ def tokenize_prepare_dataset(lines):
201207
seq_length = x.shape[1]
202208
vocab_size = y.shape[1]
203209

210+
num_epochs = 20
211+
# For custom epochs numbers
212+
if "KERAS_NUM_EPOCHS" in os.environ:
213+
num_epochs = int(os.environ.get('KERAS_NUM_EPOCHS'))
214+
204215
neuron_coef = 1
205216
keras_lstm_model = Sequential()
206217
keras_lstm_model.add(Embedding(input_dim=vocab_size, output_dim=seq_length, input_length=seq_length))
@@ -210,7 +221,7 @@ def tokenize_prepare_dataset(lines):
210221
keras_lstm_model.add(Dense(units=vocab_size, activation='softmax'))
211222
keras_lstm_model.summary()
212223
keras_lstm_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
213-
keras_lstm_model.fit(x,y, batch_size=256, epochs=20)
224+
keras_lstm_model.fit(x,y, batch_size=256, epochs=num_epochs)
214225

215226

216227
# ## Generating text based on the input

AI-and-Analytics/Features-and-Functionality/IntelTensorFlow_TextGeneration_with_LSTM/sample.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
"id": "inc_text_generation_lstm_py",
2424
"steps": [
2525
"export ITEX_ENABLE_NEXTPLUGGABLE_DEVICE=0",
26-
"python TextGenerationModelTraining.py"
26+
"ITEX_NUM_EPOCHS=5 KERAS_NUM_EPOCHS=5 python TextGenerationModelTraining.py"
2727
]
2828
}
2929
]

0 commit comments

Comments
 (0)