diff --git a/advanced_source/dynamic_quantization_tutorial.py b/advanced_source/dynamic_quantization_tutorial.py index c8d94789d5d..c5b7d70a046 100644 --- a/advanced_source/dynamic_quantization_tutorial.py +++ b/advanced_source/dynamic_quantization_tutorial.py @@ -134,10 +134,18 @@ def tokenize(self, path): # ----------------------------- # # This is a tutorial on dynamic quantization, a quantization technique -# that is applied after a model has been trained. Therefore, we'll simply load some -# pretrained weights into this model architecture; these weights were obtained -# by training for five epochs using the default settings in the word language model -# example. +# that is applied after a model has been trained. Therefore, we'll simply +# load some pretrained weights into this model architecture; these +# weights were obtained by training for five epochs using the default +# settings in the word language model example. +# +# Before running this tutorial, download the required pre-trained model: +# +# .. code-block:: bash +# +# wget https://s3.amazonaws.com/pytorch-tutorial-assets/word_language_model_quantize.pth +# +# Place the downloaded file in the data directory or update the model_data_filepath accordingly. ntokens = len(corpus.dictionary)