@@ -610,21 +610,21 @@ resolve_main_thread_generators <- function(x, callback_type = "on_train_batch_be
610610# ' multiple inputs). If all inputs in the model are named, you can also pass a
611611# ' list mapping input names to data. `x` can be `NULL` (default) if feeding
612612# ' from framework-native tensors (e.g. TensorFlow data tensors). You can also
613- # ' pass a `tfdataset` or a generator returning a list with `(inputs, targets)` or
614- # ' `(inputs, targets, sample_weights)`.
615- # ' @param y Vector, matrix, or array of target (label) data (or list if the model has
616- # ' multiple outputs). If all outputs in the model are named, you can also pass
617- # ' a list mapping output names to data. `y` can be `NULL` (default) if feeding
618- # ' from framework-native tensors (e.g. TensorFlow data tensors).
613+ # ' pass a `tfdataset` or a generator returning a list with `(inputs, targets)`
614+ # ' or `(inputs, targets, sample_weights)`.
615+ # ' @param y Vector, matrix, or array of target (label) data (or list if the
616+ # ' model has multiple outputs). If all outputs in the model are named, you can
617+ # ' also pass a list mapping output names to data. `y` can be `NULL` (default)
618+ # ' if feeding from framework-native tensors (e.g. TensorFlow data tensors).
619619# ' @param batch_size Integer or `NULL`. Number of samples per gradient update.
620620# ' If unspecified, `batch_size` will default to 32.
621- # ' @param epochs Number of epochs to train the model.
622- # ' Note that in conjunction with `initial_epoch`,
623- # ' `epochs` is to be understood as "final epoch". The model is
624- # ' not trained for a number of iterations given by `epochs`, but
621+ # ' @param epochs Number of epochs to train the model. Note that in conjunction
622+ # ' with `initial_epoch`, `epochs` is to be understood as "final epoch". The
623+ # ' model is not trained for a number of iterations given by `epochs`, but
625624# ' merely until the epoch of index `epochs` is reached.
626- # ' @param verbose Verbosity mode (0 = silent, 1 = progress bar, 2 = one line per
627- # ' epoch). Defaults to
625+ # ' @param verbose Verbosity mode (0 = silent, 1 = progress bar, 2 = one line
626+ # ' per epoch). Defaults to 1 in most contexts, 2 if in knitr render or running
627+ # ' on a distributed training server.
628628# ' @param view_metrics View realtime plot of training metrics (by epoch). The
629629# ' default (`"auto"`) will display the plot when running within RStudio,
630630# ' `metrics` were specified during model [compile()], `epochs > 1` and
@@ -634,21 +634,21 @@ resolve_main_thread_generators <- function(x, callback_type = "on_train_batch_be
634634# ' @param validation_split Float between 0 and 1. Fraction of the training data
635635# ' to be used as validation data. The model will set apart this fraction of
636636# ' the training data, will not train on it, and will evaluate the loss and any
637- # ' model metrics on this data at the end of each epoch. The validation data
638- # ' is selected from the last samples in the `x` and `y` data provided,
639- # ' before shuffling.
637+ # ' model metrics on this data at the end of each epoch. The validation data is
638+ # ' selected from the last samples in the `x` and `y` data provided, before
639+ # ' shuffling.
640640# ' @param validation_data Data on which to evaluate the loss and any model
641641# ' metrics at the end of each epoch. The model will not be trained on this
642642# ' data. This could be a list (x_val, y_val) or a list (x_val, y_val,
643643# ' val_sample_weights). `validation_data` will override `validation_split`.
644- # ' @param shuffle shuffle: Logical (whether to shuffle the training data
645- # ' before each epoch) or string (for "batch"). "batch" is a special option
646- # ' for dealing with the limitations of HDF5 data; it shuffles in batch-sized
647- # ' chunks. Has no effect when `steps_per_epoch` is not `NULL`.
644+ # ' @param shuffle shuffle: Logical (whether to shuffle the training data before
645+ # ' each epoch) or string (for "batch"). "batch" is a special option for
646+ # ' dealing with the limitations of HDF5 data; it shuffles in batch-sized
647+ # ' chunks. Has no effect when `steps_per_epoch` is not `NULL`.
648648# ' @param class_weight Optional named list mapping indices (integers) to a
649- # ' weight (float) value, used for weighting the loss function
650- # ' (during training only). This can be useful to tell the model to
651- # ' "pay more attention" to samples from an under-represented class.
649+ # ' weight (float) value, used for weighting the loss function (during training
650+ # ' only). This can be useful to tell the model to "pay more attention" to
651+ # ' samples from an under-represented class.
652652# ' @param sample_weight Optional array of the same length as x, containing
653653# ' weights to apply to the model's loss for each sample. In the case of
654654# ' temporal data, you can pass a 2D array with shape (samples,
@@ -660,14 +660,14 @@ resolve_main_thread_generators <- function(x, callback_type = "on_train_batch_be
660660# ' @param steps_per_epoch Total number of steps (batches of samples) before
661661# ' declaring one epoch finished and starting the next epoch. When training
662662# ' with input tensors such as TensorFlow data tensors, the default `NULL` is
663- # ' equal to the number of samples in your dataset divided by the batch
664- # ' size, or 1 if that cannot be determined.
663+ # ' equal to the number of samples in your dataset divided by the batch size,
664+ # ' or 1 if that cannot be determined.
665665# ' @param validation_steps Only relevant if `steps_per_epoch` is specified.
666666# ' Total number of steps (batches of samples) to validate before stopping.
667667# ' @param ... Unused
668668# '
669- # ' @return A `history` object that contains all information collected
670- # ' during training.
669+ # ' @return A `history` object that contains all information collected during
670+ # ' training.
671671# '
672672# ' @family model functions
673673# '
0 commit comments