Skip to content

Commit 8c7a74f

Browse files
MarkDaoustcopybara-github
authored andcommitted
Simplify the TensorFlow basics section.
1. Split "TensorFlow basics" from "TensorFlow in depth" 2. Compress `eager.ipynb` to a quick overview of "TF Basics". 3. "Effective TF2" is written for TF1 users. 4. Move everything else to a new section. PiperOrigin-RevId: 414830812
1 parent 8f11555 commit 8c7a74f

File tree

5 files changed

+932
-1194
lines changed

5 files changed

+932
-1194
lines changed

site/en/guide/_toc.yaml

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,44 +3,48 @@ toc:
33
path: /guide/
44

55
- heading: "TensorFlow basics"
6-
- title: "Eager execution"
7-
path: /guide/eager
8-
- title: "Tensor"
6+
- title: "Overview"
7+
path: /guide/basics
8+
- title: "Tensors"
99
path: /guide/tensor
10-
- title: "Variable"
10+
- title: "Variables"
1111
path: /guide/variable
1212
- title: "Automatic differentiation"
1313
path: /guide/autodiff
14-
- title: "Intro to graphs and functions"
14+
- title: "Graphs and functions"
1515
path: /guide/intro_to_graphs
16-
- title: "Intro to modules, layers, and models"
16+
- title: "Modules, layers, and models"
1717
path: /guide/intro_to_modules
1818
- title: "Training loops"
1919
path: /guide/basic_training_loops
20+
21+
- heading: "Keras"
22+
- include: /guide/keras/_toc.yaml
23+
24+
- heading: "TensorFlow in depth"
25+
- title: "Tensor slicing"
26+
path: /guide/tensor_slicing
2027
- title: "Advanced autodiff"
2128
path: /guide/advanced_autodiff
2229
- title: "Ragged tensor"
2330
path: /guide/ragged_tensor
2431
- title: "Sparse tensor"
2532
path: /guide/sparse_tensor
33+
34+
- title: "Random number generation"
35+
path: /guide/random_numbers
2636
- title: "NumPy API"
37+
status: experimental
2738
path: /guide/tf_numpy
28-
- title: "Tensor slicing"
29-
path: /guide/tensor_slicing
3039
- title: "Thinking in TensorFlow 2"
3140
path: /guide/effective_tf2
3241

33-
- heading: "Keras"
34-
- include: /guide/keras/_toc.yaml
35-
3642
- heading: "Customization"
3743
- title: "Create an op"
3844
path: /guide/create_op
39-
- title: "Extension type"
45+
- title: "Extension types"
4046
path: /guide/extension_type
4147
status: experimental
42-
- title: "Random number generation"
43-
path: /guide/random_numbers
4448

4549
- heading: "Data input pipelines"
4650
- title: "tf.data"

site/en/guide/basic_training_loops.ipynb

Lines changed: 81 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,11 @@
9090
},
9191
"outputs": [],
9292
"source": [
93-
"import tensorflow as tf"
93+
"import tensorflow as tf\n",
94+
"\n",
95+
"import matplotlib.pyplot as plt\n",
96+
"\n",
97+
"colors = plt.rcParams['axes.prop_cycle'].by_key()['color']"
9498
]
9599
},
96100
{
@@ -142,16 +146,20 @@
142146
"TRUE_W = 3.0\n",
143147
"TRUE_B = 2.0\n",
144148
"\n",
145-
"NUM_EXAMPLES = 1000\n",
149+
"NUM_EXAMPLES = 201\n",
146150
"\n",
147151
"# A vector of random x values\n",
148-
"x = tf.random.normal(shape=[NUM_EXAMPLES])\n",
152+
"x = tf.linspace(-2,2, NUM_EXAMPLES)\n",
153+
"x = tf.cast(x, tf.float32)\n",
154+
"\n",
155+
"def f(x):\n",
156+
" return x * TRUE_W + TRUE_B\n",
149157
"\n",
150158
"# Generate some noise\n",
151159
"noise = tf.random.normal(shape=[NUM_EXAMPLES])\n",
152160
"\n",
153161
"# Calculate y\n",
154-
"y = x * TRUE_W + TRUE_B + noise"
162+
"y = f(x) + noise"
155163
]
156164
},
157165
{
@@ -163,9 +171,7 @@
163171
"outputs": [],
164172
"source": [
165173
"# Plot all the data\n",
166-
"import matplotlib.pyplot as plt\n",
167-
"\n",
168-
"plt.scatter(x, y, c=\"b\")\n",
174+
"plt.plot(x, y, '.')\n",
169175
"plt.show()"
170176
]
171177
},
@@ -271,8 +277,10 @@
271277
},
272278
"outputs": [],
273279
"source": [
274-
"plt.scatter(x, y, c=\"b\")\n",
275-
"plt.scatter(x, model(x), c=\"r\")\n",
280+
"plt.plot(x, y, '.', label=\"Data\")\n",
281+
"plt.plot(x, f(x), label=\"Ground truth\")\n",
282+
"plt.plot(x, model(x), label=\"Predictions\")\n",
283+
"plt.legend()\n",
276284
"plt.show()\n",
277285
"\n",
278286
"print(\"Current loss: %1.6f\" % loss(y, model(x)).numpy())"
@@ -341,23 +349,37 @@
341349
"model = MyModel()\n",
342350
"\n",
343351
"# Collect the history of W-values and b-values to plot later\n",
344-
"Ws, bs = [], []\n",
352+
"weights = []\n",
353+
"biases = []\n",
345354
"epochs = range(10)\n",
346355
"\n",
347356
"# Define a training loop\n",
357+
"def report(model, loss):\n",
358+
" return f\"W = {model.w.numpy():1.2f}, b = {model.b.numpy():1.2f}, loss={current_loss:2.5f}\"\n",
359+
"\n",
360+
"\n",
348361
"def training_loop(model, x, y):\n",
349362
"\n",
350363
" for epoch in epochs:\n",
351364
" # Update the model with the single giant batch\n",
352365
" train(model, x, y, learning_rate=0.1)\n",
353366
"\n",
354367
" # Track this before I update\n",
355-
" Ws.append(model.w.numpy())\n",
356-
" bs.append(model.b.numpy())\n",
368+
" weights.append(model.w.numpy())\n",
369+
" biases.append(model.b.numpy())\n",
357370
" current_loss = loss(y, model(x))\n",
358371
"\n",
359-
" print(\"Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f\" %\n",
360-
" (epoch, Ws[-1], bs[-1], current_loss))\n"
372+
" print(f\"Epoch {epoch:2d}:\")\n",
373+
" print(\" \", report(model, current_loss))"
374+
]
375+
},
376+
{
377+
"cell_type": "markdown",
378+
"metadata": {
379+
"id": "8dKKLU4KkQEq"
380+
},
381+
"source": [
382+
"Do the training"
361383
]
362384
},
363385
{
@@ -368,21 +390,50 @@
368390
},
369391
"outputs": [],
370392
"source": [
371-
"print(\"Starting: W=%1.2f b=%1.2f, loss=%2.5f\" %\n",
372-
" (model.w, model.b, loss(y, model(x))))\n",
393+
"current_loss = loss(y, model(x))\n",
373394
"\n",
374-
"# Do the training\n",
375-
"training_loop(model, x, y)\n",
395+
"print(f\"Starting:\")\n",
396+
"print(\" \", report(model, current_loss))\n",
376397
"\n",
377-
"# Plot it\n",
378-
"plt.plot(epochs, Ws, \"r\",\n",
379-
" epochs, bs, \"b\")\n",
398+
"training_loop(model, x, y)"
399+
]
400+
},
401+
{
402+
"cell_type": "markdown",
403+
"metadata": {
404+
"id": "JPJgimg8kSA4"
405+
},
406+
"source": [
407+
"Plot the evolution of the weights over time:"
408+
]
409+
},
410+
{
411+
"cell_type": "code",
412+
"execution_count": null,
413+
"metadata": {
414+
"id": "ND1fQw8sbTNr"
415+
},
416+
"outputs": [],
417+
"source": [
418+
"plt.plot(epochs, weights, label='Weights', color=colors[0])\n",
419+
"plt.plot(epochs, [TRUE_W] * len(epochs), '--',\n",
420+
" label = \"True weight\", color=colors[0])\n",
380421
"\n",
381-
"plt.plot([TRUE_W] * len(epochs), \"r--\",\n",
382-
" [TRUE_B] * len(epochs), \"b--\")\n",
422+
"plt.plot(epochs, biases, label='bias', color=colors[1])\n",
423+
"plt.plot(epochs, [TRUE_B] * len(epochs), \"--\",\n",
424+
" label=\"True bias\", color=colors[1])\n",
383425
"\n",
384-
"plt.legend([\"W\", \"b\", \"True W\", \"True b\"])\n",
385-
"plt.show()\n"
426+
"plt.legend()\n",
427+
"plt.show()"
428+
]
429+
},
430+
{
431+
"cell_type": "markdown",
432+
"metadata": {
433+
"id": "zhlwj1ojkcUP"
434+
},
435+
"source": [
436+
"Visualize how the trained model performs"
386437
]
387438
},
388439
{
@@ -393,9 +444,10 @@
393444
},
394445
"outputs": [],
395446
"source": [
396-
"# Visualize how the trained model performs\n",
397-
"plt.scatter(x, y, c=\"b\")\n",
398-
"plt.scatter(x, model(x), c=\"r\")\n",
447+
"plt.plot(x, y, '.', label=\"Data\")\n",
448+
"plt.plot(x, f(x), label=\"Ground truth\")\n",
449+
"plt.plot(x, model(x), label=\"Predictions\")\n",
450+
"plt.legend()\n",
399451
"plt.show()\n",
400452
"\n",
401453
"print(\"Current loss: %1.6f\" % loss(model(x), y).numpy())"
@@ -531,8 +583,7 @@
531583
"colab": {
532584
"collapsed_sections": [
533585
"5rmpybwysXGV",
534-
"iKD__8kFCKNt",
535-
"vPnIVuaSJwWz"
586+
"iKD__8kFCKNt"
536587
],
537588
"name": "basic_training_loops.ipynb",
538589
"toc_visible": true

0 commit comments

Comments
 (0)