Skip to content

Commit 22b8aff

Browse files
committed
knit guides and examples
1 parent 442bad2 commit 22b8aff

39 files changed

+1281
-1068
lines changed

man/layer_tfsm.Rd

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

vignettes/custom_train_step_in_tensorflow.Rmd

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ Let's see how that works.
4242
## Setup
4343

4444

45-
```r
45+
``` r
4646
library(reticulate)
4747
library(tensorflow, exclude = c("set_random_seed", "shape"))
4848
library(keras3)
@@ -74,11 +74,10 @@ to update the state of the metrics that were passed in `compile()`,
7474
and we query results from `self$metrics` at the end to retrieve their current value.
7575

7676

77-
```r
77+
``` r
7878
CustomModel <- new_model_class(
7979
"CustomModel",
8080
train_step = function(data) {
81-
# unpack data into x, y, and sample_weight
8281
c(x, y = NULL, sample_weight = NULL) %<-% data
8382

8483
with(tf$GradientTape() %as% tape, {
@@ -113,7 +112,7 @@ CustomModel <- new_model_class(
113112
Let's try this out:
114113

115114

116-
```r
115+
``` r
117116
# Construct and compile an instance of CustomModel
118117
inputs <- keras_input(shape = 32)
119118
outputs <- layer_dense(inputs, 1)
@@ -128,7 +127,7 @@ model |> fit(x, y, epochs = 3)
128127

129128
```
130129
## Epoch 1/3
131-
## 32/32 - 1s - 23ms/step - loss: 2.9118 - mae: 1.3597
130+
## 32/32 - 1s - 19ms/step - loss: 2.9118 - mae: 1.3597
132131
## Epoch 2/3
133132
## 32/32 - 0s - 1ms/step - loss: 2.6026 - mae: 1.2856
134133
## Epoch 3/3
@@ -154,7 +153,7 @@ on any object listed here at the beginning of each `fit()` epoch or at the begin
154153
`evaluate()`.
155154

156155

157-
```r
156+
``` r
158157
CustomModel <- new_model_class(
159158
"CustomModel",
160159
initialize = function(...) {
@@ -164,7 +163,6 @@ CustomModel <- new_model_class(
164163
self$loss_fn <- loss_mean_squared_error()
165164
},
166165
train_step = function(data) {
167-
# unpack data into x, y, and sample_weight
168166
c(x, y = NULL, sample_weight = NULL) %<-% data
169167

170168
with(tf$GradientTape() %as% tape, {
@@ -214,11 +212,11 @@ model |> fit(x, y, epochs = 3)
214212

215213
```
216214
## Epoch 1/3
217-
## 32/32 - 1s - 19ms/step - loss: 2.6540 - mae: 1.2901
215+
## 32/32 - 1s - 16ms/step - loss: 2.6540 - mae: 1.2901
218216
## Epoch 2/3
219-
## 32/32 - 0s - 2ms/step - loss: 2.4139 - mae: 1.2303
217+
## 32/32 - 0s - 1ms/step - loss: 2.4139 - mae: 1.2303
220218
## Epoch 3/3
221-
## 32/32 - 0s - 2ms/step - loss: 2.2080 - mae: 1.1761
219+
## 32/32 - 0s - 1ms/step - loss: 2.2080 - mae: 1.1761
222220
```
223221

224222
## Supporting `sample_weight` & `class_weight`
@@ -233,11 +231,10 @@ it manually if you don't rely on `compile()` for losses & metrics)
233231
- That's it.
234232

235233

236-
```r
234+
``` r
237235
CustomModel <- new_model_class(
238236
"CustomModel",
239237
train_step = function(data) {
240-
# unpack data into x, y, and sample_weight
241238
c(x, y = NULL, sample_weight = NULL) %<-% data
242239

243240
with(tf$GradientTape() %as% tape, {
@@ -285,11 +282,11 @@ model |> fit(x, y, sample_weight = sw, epochs = 3)
285282

286283
```
287284
## Epoch 1/3
288-
## 32/32 - 1s - 21ms/step - loss: 0.1607 - mae: 1.3018
285+
## 32/32 - 1s - 18ms/step - loss: 0.1607 - mae: 1.3018
289286
## Epoch 2/3
290287
## 32/32 - 0s - 1ms/step - loss: 0.1452 - mae: 1.2999
291288
## Epoch 3/3
292-
## 32/32 - 0s - 2ms/step - loss: 0.1335 - mae: 1.2986
289+
## 32/32 - 0s - 1ms/step - loss: 0.1335 - mae: 1.2986
293290
```
294291

295292
## Providing your own evaluation step
@@ -298,7 +295,7 @@ What if you want to do the same for calls to `model.evaluate()`? Then you would
298295
override `test_step` in exactly the same way. Here's what it looks like:
299296

300297

301-
```r
298+
``` r
302299
CustomModel <- new_model_class(
303300
"CustomModel",
304301
test_step = function(data) {
@@ -335,7 +332,7 @@ model |> evaluate(x, y)
335332
```
336333

337334
```
338-
## 32/32 - 0s - 9ms/step - loss: 0.0000e+00 - mae: 1.3947
335+
## 32/32 - 0s - 8ms/step - loss: 0.0000e+00 - mae: 1.3947
339336
```
340337

341338
```
@@ -359,7 +356,7 @@ Let's consider:
359356
- A loss function to train the discriminator.
360357

361358

362-
```r
359+
``` r
363360
# Create the discriminator
364361
discriminator <-
365362
keras_model_sequential(name = "discriminator", input_shape = c(28, 28, 1)) |>
@@ -402,7 +399,7 @@ Here's a feature-complete GAN class, overriding `compile()` to use its own signa
402399
and implementing the entire GAN algorithm in 17 lines in `train_step`:
403400

404401

405-
```r
402+
``` r
406403
GAN <- Model(
407404
classname = "GAN",
408405

@@ -480,7 +477,7 @@ GAN <- Model(
480477
Let's test-drive it:
481478

482479

483-
```r
480+
``` r
484481
batch_size <- 64
485482
c(c(x_train, .), c(x_test, .)) %<-% dataset_mnist()
486483
all_digits <- op_concatenate(list(x_train, x_test))
@@ -510,7 +507,7 @@ gan |> fit(
510507
```
511508

512509
```
513-
## 100/100 - 5s - 53ms/step - d_loss: 0.0000e+00 - g_loss: 0.0000e+00
510+
## 100/100 - 5s - 47ms/step - d_loss: 0.0000e+00 - g_loss: 0.0000e+00
514511
```
515512

516513
The ideas behind deep learning are simple, so why should their implementation be painful?

vignettes/distributed_training_with_tensorflow.Rmd

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ industry workflows.
4040

4141

4242

43-
```r
43+
``` r
4444
library(keras3)
4545
library(tensorflow, exclude = c("shape", "set_random_seed"))
4646
library(tfdatasets, exclude = "shape")
@@ -90,7 +90,7 @@ in a multi-device or distributed workflow.
9090
Schematically, it looks like this:
9191

9292

93-
```r
93+
``` r
9494
# Create a MirroredStrategy.
9595
strategy <- tf$distribute$MirroredStrategy()
9696
cat(sprintf('Number of devices: %d\n', strategy$num_replicas_in_sync))
@@ -113,7 +113,7 @@ with(startegy$scope(), {
113113
Here's a simple end-to-end runnable example:
114114

115115

116-
```r
116+
``` r
117117
get_compiled_model <- function() {
118118
inputs <- keras_input(shape = 784)
119119
outputs <- inputs |>
@@ -173,7 +173,7 @@ cat(sprintf('Number of devices: %d\n', strategy$num_replicas_in_sync))
173173
## Number of devices: 2
174174
```
175175

176-
```r
176+
``` r
177177
# Open a strategy scope.
178178
with(strategy$scope(), {
179179
# Everything that creates variables should be under the strategy scope.
@@ -193,10 +193,10 @@ with(strategy$scope(), {
193193

194194
```
195195
## Epoch 1/2
196-
## 782/782 - 6s - 7ms/step - loss: 3.0622 - sparse_categorical_accuracy: 0.8615 - val_loss: 1.1367 - val_sparse_categorical_accuracy: 0.9006
196+
## 782/782 - 5s - 7ms/step - loss: 3.0622 - sparse_categorical_accuracy: 0.8615 - val_loss: 1.1367 - val_sparse_categorical_accuracy: 0.9006
197197
## Epoch 2/2
198198
## 782/782 - 3s - 4ms/step - loss: 0.5774 - sparse_categorical_accuracy: 0.9259 - val_loss: 0.6612 - val_sparse_categorical_accuracy: 0.9210
199-
## 157/157 - 0s - 3ms/step - loss: 0.6729 - sparse_categorical_accuracy: 0.9150
199+
## 157/157 - 0s - 2ms/step - loss: 0.6729 - sparse_categorical_accuracy: 0.9150
200200
```
201201

202202
```
@@ -218,7 +218,7 @@ training from your saved model.
218218
Here's a simple example:
219219

220220

221-
```r
221+
``` r
222222
# Prepare a directory to store all the checkpoints.
223223
checkpoint_dir <- "./ckpt"
224224
if (!dir.exists(checkpoint_dir)) {
@@ -274,10 +274,10 @@ run_training(epochs = 1)
274274
```
275275

276276
```
277-
## 782/782 - 6s - 7ms/step - loss: 2.9519 - sparse_categorical_accuracy: 0.8655 - val_loss: 1.3110 - val_sparse_categorical_accuracy: 0.8836
277+
## 782/782 - 4s - 6ms/step - loss: 2.9519 - sparse_categorical_accuracy: 0.8655 - val_loss: 1.3110 - val_sparse_categorical_accuracy: 0.8836
278278
```
279279

280-
```r
280+
``` r
281281
# Calling the same function again will resume from where we left off
282282
run_training(epochs = 1)
283283
```

vignettes/distribution.Rmd

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ clients, while preserving its global semantics.
4242
## Setup
4343

4444

45-
```r
45+
``` r
4646
# This guide assumes there are 8 GPUs available for testing. If you don't have
4747
# 8 gpus available locally, you can set the following envvar to
4848
# make xla initialize the CPU as 8 devices, to enable local testing
@@ -52,7 +52,7 @@ Sys.setenv("XLA_FLAGS" = "--xla_force_host_platform_device_count=8")
5252

5353

5454

55-
```r
55+
``` r
5656
library(keras3)
5757

5858
# The distribution API is only implemented for the JAX backend for now.
@@ -79,7 +79,7 @@ You can find more detailed concept explainers in the
7979
[TensorFlow DTensor guide](https://www.tensorflow.org/guide/dtensor_overview#dtensors_model_of_distributed_tensors).
8080

8181

82-
```r
82+
``` r
8383
# Retrieve the local available gpu devices.
8484
devices <- jax$devices() # "gpu"
8585
str(devices)
@@ -97,7 +97,7 @@ str(devices)
9797
## $ :TFRT_CPU_7
9898
```
9999

100-
```r
100+
``` r
101101
# Define a 2x4 device mesh with data and model parallel axes
102102
mesh <- keras$distribution$DeviceMesh(
103103
shape = shape(2, 4),
@@ -140,7 +140,7 @@ portion of the input data.
140140

141141
Here is a sample usage of this class.
142142

143-
```r
143+
``` r
144144
# Create DataParallel with list of devices.
145145
# As a shortcut, the devices can be skipped,
146146
# and Keras will detect all local available devices.
@@ -184,14 +184,14 @@ model |> fit(dataset, epochs = 3)
184184

185185
```
186186
## Epoch 1/3
187-
## 8/8 - 0s - 47ms/step - loss: 1.0629
187+
## 8/8 - 0s - 37ms/step - loss: 1.0629
188188
## Epoch 2/3
189-
## 8/8 - 0s - 7ms/step - loss: 0.9712
189+
## 8/8 - 0s - 5ms/step - loss: 0.9712
190190
## Epoch 3/3
191-
## 8/8 - 0s - 7ms/step - loss: 0.9322
191+
## 8/8 - 0s - 5ms/step - loss: 0.9322
192192
```
193193

194-
```r
194+
``` r
195195
model |> evaluate(dataset)
196196
```
197197

@@ -235,7 +235,7 @@ multiple matches, a `ValueError` is raised. If no matches are found, `NULL` is
235235
returned.
236236

237237

238-
```r
238+
``` r
239239
mesh_2d <- keras$distribution$DeviceMesh(
240240
shape = shape(2, 4),
241241
axis_names = c("data", "model"),
@@ -280,17 +280,17 @@ model |> fit(dataset, epochs = 3)
280280
## Epoch 1/3
281281
## 8/8 - 0s - 29ms/step - loss: 1.0714
282282
## Epoch 2/3
283-
## 8/8 - 0s - 4ms/step - loss: 0.9744
283+
## 8/8 - 0s - 3ms/step - loss: 0.9744
284284
## Epoch 3/3
285-
## 8/8 - 0s - 5ms/step - loss: 0.9280
285+
## 8/8 - 0s - 4ms/step - loss: 0.9280
286286
```
287287

288-
```r
288+
``` r
289289
model |> evaluate(dataset)
290290
```
291291

292292
```
293-
## 8/8 - 0s - 9ms/step - loss: 0.8802
293+
## 8/8 - 0s - 7ms/step - loss: 0.8802
294294
```
295295

296296
```
@@ -304,7 +304,7 @@ more data parallel or model parallel. You can do this by adjusting the shape of
304304
the mesh. And no changes are needed for any other code.
305305

306306

307-
```r
307+
``` r
308308
full_data_parallel_mesh <- keras$distribution$DeviceMesh(
309309
shape = shape(8, 1),
310310
axis_names = list("data", "model"),

vignettes/examples/index.Rmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
---
22
title: Keras examples
33
output: rmarkdown::html_vignette
4-
date: 'Last Modified: 2023-11-30; Last Rendered: 2024-05-16'
4+
date: 'Last Modified: 2023-11-30; Last Rendered: 2024-05-21'
55
vignette: >
66
%\VignetteIndexEntry{Keras examples}
77
%\VignetteEngine{knitr::rmarkdown}

vignettes/examples/nlp/neural_machine_translation_with_transformer.Rmd

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -583,7 +583,7 @@ transformer |> fit(train_ds, epochs = epochs,
583583
```
584584

585585
```
586-
## 1297/1297 - 56s - 43ms/step - accuracy: 0.7708 - loss: 1.5755 - val_accuracy: 0.7784 - val_loss: 1.3806
586+
## 1297/1297 - 58s - 44ms/step - accuracy: 0.7709 - loss: 1.5752 - val_accuracy: 0.7731 - val_loss: 1.4209
587587
```
588588

589589

0 commit comments

Comments
 (0)