|
21 | 21 |
|
22 | 22 | pip install albumentations
|
23 | 23 | """
|
| 24 | +import subprocess |
| 25 | +import sys |
24 | 26 |
|
25 |
| - |
| 27 | +try: |
| 28 | + import albumentations |
| 29 | + print("albumentations are already installed") |
| 30 | +except ImportError: |
| 31 | + print("albumentations module not found. Installing...") |
| 32 | + subprocess.check_call([sys.executable, "-m", "pip", "install", "albumentations"]) |
| 33 | + print("albumentations module installed successfully.") |
26 | 34 |
|
27 | 35 |
|
28 | 36 |
|
|
77 | 85 | ######################################################################
|
78 | 86 | # VGG became a model that attracted attention because it succeeded in
|
79 | 87 | # building deeper layers and dramatically shortening the training time
|
80 |
| -# compared to alexnet, which was the SOTA model at the time.: |
| 88 | +# compared to ``alexnet``, which was the SOTA model at the time.: |
81 | 89 | #
|
82 | 90 |
|
83 | 91 |
|
|
97 | 105 | ## model configuration
|
98 | 106 |
|
99 | 107 | num_classes = 100
|
100 |
| -# Caltech 257 CIFAR 100 CIFAR10 10 ,MNIST 10 ImageNet 1000 |
| 108 | +# ``Caltech`` 257 CIFAR 100 CIFAR10 10 ,MNIST 10 ImageNet 1000 |
101 | 109 | model_version = None ## you must configure it.
|
102 | 110 |
|
103 | 111 | ## data configuration
|
|
120 | 128 |
|
121 | 129 | update_count = int(256/batch_size)
|
122 | 130 | accum_step = int(256/batch_size)
|
123 |
| -eval_step =26 * accum_step ## Caltech 5 CIFAR 5 MNIST 6 , CIFAR10 5 ImageNet 26 |
| 131 | +eval_step =26 * accum_step ## ``Caltech`` 5 CIFAR 5 MNIST 6 , CIFAR10 5 ImageNet 26 |
124 | 132 |
|
125 | 133 |
|
126 | 134 | ## model configuration
|
|
149 | 157 |
|
150 | 158 | ######################################################################
|
151 | 159 | # We use ``CIFAR100`` Dataset in this tutorial. In VGG paper , the authors
|
152 |
| -# scales image isotropically . Then , they apply |
| 160 | +# scales image ``isotropically`` . Then , they apply |
153 | 161 | # ``Normalization``,``RandomCrop``,``HorizontalFlip`` . So , we need to override
|
154 | 162 | # CIFAR100 class to apply preprocessing.
|
155 | 163 | #
|
@@ -637,7 +645,7 @@ def __getitem__(self, index: int) :
|
637 | 645 | )
|
638 | 646 |
|
639 | 647 | ######################################################################
|
640 |
| -# Conculsion |
| 648 | +# Conclusion |
641 | 649 | # ----------
|
642 | 650 | # We have seen how ``pretraining`` VGG from scratch . This Tutorial will be helpful to reproduce another Foundation Model .
|
643 | 651 |
|
|
0 commit comments