-
Notifications
You must be signed in to change notification settings - Fork 66
Adding VGG that works for neon v2.3 with MKL backend #25
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,65 @@ | ||
| # ---------------------------------------------------------------------------- | ||
| # Copyright 2016 Nervana Systems Inc. | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # ---------------------------------------------------------------------------- | ||
| import numpy as np | ||
| from neon.util.persist import get_data_cache_or_nothing | ||
| from neon.data.dataloader_transformers import OneHot, TypeCast, BGRMeanSubtract | ||
| from neon.data.aeon_shim import AeonDataLoader | ||
|
|
||
|
|
||
| def common_config(manifest_file, manifest_root, batch_size, subset_pct): | ||
| cache_root = get_data_cache_or_nothing('i1k-cache/') | ||
| image_config = {"type": "image", | ||
| "height": 224, | ||
| "width": 224} | ||
| label_config = {"type": "label", | ||
| "binary": False} | ||
| augmentation = {"type": "image", | ||
| "scale": [0.585, 0.875], | ||
| "crop_enable": True} | ||
|
|
||
| return {'manifest_filename': manifest_file, | ||
| 'manifest_root': manifest_root, | ||
| 'batch_size': batch_size, | ||
| 'subset_fraction': float(subset_pct/100.0), | ||
| 'block_size': 5000, | ||
| 'cache_directory': cache_root, | ||
| 'etl': [image_config, label_config], | ||
| 'augmentation': [augmentation]} | ||
|
|
||
|
|
||
| def wrap_dataloader(dl): | ||
| dl = OneHot(dl, index=1, nclasses=1000) | ||
| dl = TypeCast(dl, index=0, dtype=np.float32) | ||
| dl = BGRMeanSubtract(dl, index=0) | ||
| return dl | ||
|
|
||
|
|
||
| def make_train_loader(manifest_file, manifest_root, backend_obj, subset_pct=100, random_seed=0): | ||
| aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz, subset_pct) | ||
| aeon_config['manifest_root'] = manifest_root | ||
| aeon_config['shuffle_manifest'] = True | ||
| aeon_config['shuffle_enable'] = True | ||
| aeon_config['random_seed'] = random_seed | ||
| aeon_config['augmentation'][0]["center"] = False | ||
| aeon_config['augmentation'][0]["flip_enable"] = True | ||
|
|
||
| return wrap_dataloader(AeonDataLoader(aeon_config, backend_obj)) | ||
|
|
||
|
|
||
| def make_validation_loader(manifest_file, manifest_root, backend_obj, subset_pct=100): | ||
| aeon_config = common_config(manifest_file, manifest_root, backend_obj.bsz, subset_pct) | ||
| aeon_config['manifest_root'] = manifest_root | ||
|
|
||
| return wrap_dataloader(AeonDataLoader(aeon_config, backend_obj)) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,14 +1,19 @@ | ||
| #Overview | ||
|
|
||
| This example VGG directory contains scripts to perform VGG training and inference using MKL backend and GPU backend | ||
|
|
||
| ##Model | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Change to ## Model |
||
|
|
||
| Here we have ported the weights for the 16 and 19 layer VGG models from the Caffe model zoo (see [link](https://github.com/BVLC/caffe/wiki/Model-Zoo#models-used-by-the-vgg-team-in-ilsvrc-2014)) | ||
| Here we have ported the weights for the 16 and 19 layer VGG models from the Caffe model zoo (see [link](https://github.com/BVLC/caffe/wiki/Model-Zoo#models-used-by-the-vgg-team-in-ilsvrc-2014)): https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/VGG_D_fused_conv_bias.p for VGG_D and https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/VGG_E_fused_conv_bias.p for VGG_E | ||
|
|
||
| ### Model script | ||
| The model run script is included here [vgg_neon.py](./vgg_neon.py). This script can easily be adapted for fine tuning this network but we have focused on inference here because a successful training protocol may require details beyond what is available from the Caffe model zoo. | ||
| The model run scripts included here [vgg_neon_train.py] (./vgg_neon_train.py) and [vgg_neon_inference.py] (./vgg_neon_inference.py) perform training and inference respectively. We are providing both the training and the inference script, they can be adapted for fine tuning this network but we have yet to test the training script because a successful training protocol may require details beyond what is available from the Caffe model zoo. The inference script will take the trained weight file as input: supply it with the VGG_D_fused_conv_bias.p or VGG_E_fused_conv_bias.p or trained models from running VGG training. | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. change "[vgg_neon_train.py] (./vgg_neon_train.py)" to "vgg_neon_train.py", so the hyperlink will work change "[vgg_neon_inference.py] (./vgg_neon_inference.py)" to "vgg_neon_inference.py", so the hyperlink will work |
||
|
|
||
| ### Trained weights | ||
| The trained weights file can be downloaded from AWS using the following links: | ||
| [VGG_D.p]( https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/VGG_D.p) and [VGG_E.p][S3_WEIGHTS_FILE]. | ||
| [S3_WEIGHTS_FILE]: https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/VGG_E.p | ||
| [VGG_D_fused_conv_bias.p](https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/VGG_D_fused_conv_bias.p) | ||
| [VGG_E_fused_conv_bias.p](https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/VGG_E_fused_conv_bias.p) | ||
|
|
||
|
|
||
| ## Performance | ||
|
|
||
|
|
@@ -27,75 +32,57 @@ Testing the image classification performance for the two models on the ILSVRC 20 | |
|
|
||
| These results are calculated using a single scale, using a 224x224 crop of each image. These results are comparable to the classification accuracy we computed using the Caffe model zoo 16 and 19 layer VGG models using Caffe [Caffe model zoo](https://github.com/BVLC/caffe/wiki/Model-Zoo#models-used-by-the-vgg-team-in-ilsvrc-2014). | ||
|
|
||
| ## Instructions | ||
|
|
||
| To run this model script on the ILSVRC2012 dataset, you will need to have the data in the neon aeon format; follow | ||
| the instructions in the neon/example/imagenet/README.md to setup the dataset. | ||
|
|
||
| If neon is installed into a `virtualenv`, make sure that it is activated before running the commands below. | ||
|
|
||
| ### Speed | ||
| ### Training | ||
|
|
||
| We ran speed benchmarks on this model using neon. These results are using a 64 image batch size with 3x224x224 input images. The results are in the tables below: | ||
| To run the training of VGG with MKL backend: | ||
|
|
||
| #### VGG D | ||
| ``` | ||
| ---------------------- | ||
| | Func | Time | | ||
| ---------------------- | ||
| | fprop | 366 ms | | ||
| | bprop | 767 ms | | ||
| | update | 19 ms | | ||
| ---------------------- | ||
| | Total | 1152 ms | | ||
| ---------------------- | ||
| ``` | ||
| python -u vgg_neon_train.py -c vgg_mkl.cfg -vvv --save_path VGG16-model.prm --output_file VGG16-data.h5 --caffe | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. move this line into ``` (code marks) |
||
|
|
||
| #### VGG E | ||
| ``` | ||
| ----------------------- | ||
| | Func | Time | | ||
| ----------------------- | ||
| | fprop | 452 ms | | ||
| | bprop | 940 ms | | ||
| | update | 20 ms | | ||
| ----------------------- | ||
| | Total | 1412 ms | | ||
| ----------------------- | ||
| ``` | ||
| The run times for the fprop and bprop pass and the parameter update are given in the table below. The iteration row is the combined runtime for all functions in a training iteration. These results are for each minibatch consisting of 64 images of shape 224x224x3. The model was run 12 times, the first two passes were ignored and the last 10 were used to get the benchmark results. | ||
| "numactl -i all" is our recommendation to get as much performance as possible for Intel architecture-based servers which | ||
| feature multiple sockets and when NUMA is enabled. On such systems, please run the following: | ||
|
|
||
| numactl -i all python -u vgg_neon_train.py -c vgg_mkl.cfg -vvv --save_path VGG16-model.prm --output_file VGG16-data.h5 --caffe | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. move this line into ``` (code marks) |
||
|
|
||
| vgg_mkl.cfg is an example configuration file where it describes the above generated dataset and also the other parameters. | ||
|
|
||
| System specs: | ||
| ``` | ||
| Intel(R) Core(TM) i5-4690 CPU @ 3.50GHz | ||
| Ubunutu 14.04 | ||
| GPU: GeForce GTX TITAN X | ||
| CUDA Driver Version 7.0 | ||
| manifest = [train:/data/I1K/i1k-extracted/train-index.csv, val:/data/I1K/i1k-extracted/val-index.csv] | ||
| manifest_root = //data/I1K/i1k-extracted/ | ||
| backend = mkl | ||
| verbose = True | ||
| epochs = 150 | ||
| batch_size = 64 | ||
| eval_freq = 1 | ||
| datatype = f32 | ||
| ``` | ||
|
|
||
| ## Instructions | ||
|
|
||
| Make sure that your local repo is synced to the proper neon repo commit (see version below) and run the [installation procedure](http://neon.nervanasys.com/docs/latest/installation.html) before proceeding. To run | ||
| this model script on the ILSVRC2012 dataset, you will need to have the data in the neon macrobatch format; follow | ||
| the instructions in the neon documentations for [setting up the data sets](http://neon.nervanasys.com/docs/latest/datasets.html#imagenet). | ||
| To run the training of VGG with GPU backend: | ||
| modify the above vgg_mkl.cfg 'backend' entry or simply using the following command: | ||
|
|
||
| If neon is installed into a `virtualenv`, make sure that it is activated before running the commands below. | ||
| python -u vgg_neon_train.py -c vgg_mkl.cfg -b gpu -vvv --save_path VGG16-model.prm --output_file VGG16-data.h5 --caffe | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. move this line into ``` (code marks) |
||
|
|
||
| To run the evaluation of the model: | ||
| To run the evaluation/inference of the model: | ||
| ``` | ||
| # for 16 layer VGG D model | ||
| python vgg_neon.py --vgg_ver D --model_file VGG_D.p -w path/to/dataset/batches -z 64 --caffe | ||
| # for 16 layer VGG D model | ||
| python vgg_neon_inference.py -c vgg_mkl.cfg --vgg_ver D --model_file VGG16-model.prm -z 64 --caffe | ||
|
|
||
| # for 16 layer VGG D model | ||
| python vgg_neon.py --vgg_ver E --model_file VGG_E.p -w path/to/dataset/batches -z 64 --caffe | ||
| # for 16 layer VGG E model | ||
| python vgg_neon_inference.py -c vgg_mkl.cfg --vgg_ver E --model_file VGG19-model.prm -z 64 --caffe | ||
| ``` | ||
|
|
||
| Note that the `--caffe` option is needed to match the dropout implementation used by Caffe. | ||
|
|
||
| The batch size is set to 64 in the examples above because with larger batch size the model may not fit on some GPUs. Use smaller batch sizes if necessary. The script given here can easily be altered for model fine tuning. See the neon user manual for help with that. | ||
| Please note that VGG16.prm and VGG19.prm could be the ported weights VGG_D_fused_conv_bias.p and VGG_E_fused_conv_bias.p | ||
|
|
||
|
|
||
| ### Version compatibility | ||
|
|
||
| Neon version: commit SHA [e7ab2c2e2](https://github.com/NervanaSystems/neon/commit/e7ab2c2e27f113a4d36d17ba8c79546faed7d916). | ||
| Neon version: neon v2.3 | ||
|
|
||
| ## Citation | ||
|
|
||
|
|
||
This file was deleted.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,95 @@ | ||
| #!/usr/bin/env python | ||
| # ---------------------------------------------------------------------------- | ||
| # Copyright 2015 Nervana Systems Inc. | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # ---------------------------------------------------------------------------- | ||
| """ | ||
| Simplified version of VGG model D and E | ||
| Based on manuscript: | ||
| Very Deep Convolutional Networks for Large-Scale Image Recognition | ||
| K. Simonyan, A. Zisserman | ||
| arXiv:1409.1556 | ||
| """ | ||
|
|
||
| from neon.util.argparser import NeonArgparser | ||
| from neon.initializers import Constant, GlorotUniform, Xavier | ||
| from neon.layers import Conv, Dropout, Pooling, GeneralizedCost, Affine | ||
| from neon.optimizers import GradientDescentMomentum, Schedule, MultiOptimizer | ||
| from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification | ||
| from neon.models import Model | ||
| from neon.callbacks.callbacks import Callbacks | ||
| from imagenet_data import make_train_loader, make_validation_loader | ||
|
|
||
| # parse the command line arguments | ||
| parser = NeonArgparser(__doc__) | ||
| parser.add_argument('--vgg_version', default='D', choices=['D', 'E'], | ||
| help='vgg model type') | ||
| parser.add_argument('--subset_pct', type=float, default=100, | ||
| help='subset of training dataset to use (percentage)') | ||
| parser.add_argument('--test_only', action='store_true', | ||
| help='skip fitting - evaluate metrics on trained model weights') | ||
| args = parser.parse_args() | ||
|
|
||
|
|
||
| init1 = Xavier(local=True) | ||
| initfc = GlorotUniform() | ||
|
|
||
| relu = Rectlin() | ||
| conv_params = {'init': init1, | ||
| 'strides': 1, | ||
| 'padding': 1, | ||
| 'bias': Constant(0), | ||
| 'activation': relu} | ||
|
|
||
| # Set up the model layers | ||
| layers = [] | ||
|
|
||
| # set up 3x3 conv stacks with different feature map sizes | ||
| for nofm in [64, 128, 256, 512, 512]: | ||
| layers.append(Conv((3, 3, nofm), **conv_params)) | ||
| layers.append(Conv((3, 3, nofm), **conv_params)) | ||
| if nofm > 128: | ||
| layers.append(Conv((3, 3, nofm), **conv_params)) | ||
| if args.vgg_version == 'E': | ||
| layers.append(Conv((3, 3, nofm), **conv_params)) | ||
| layers.append(Pooling(2, strides=2)) | ||
|
|
||
| layers.append(Affine(nout=4096, init=initfc, bias=Constant(0), activation=relu)) | ||
| layers.append(Dropout(keep=0.5)) | ||
| layers.append(Affine(nout=4096, init=initfc, bias=Constant(0), activation=relu)) | ||
| layers.append(Dropout(keep=0.5)) | ||
| layers.append(Affine(nout=1000, init=initfc, bias=Constant(0), activation=Softmax())) | ||
|
|
||
| cost = GeneralizedCost(costfunc=CrossEntropyMulti()) | ||
|
|
||
| model = Model(layers=layers) | ||
|
|
||
| # setup data provider | ||
| assert 'train' in args.manifest, "Missing train manifest" | ||
| assert 'val' in args.manifest, "Missing validation manifest" | ||
| rseed = 0 if args.rng_seed is None else args.rng_seed | ||
| train = make_train_loader(args.manifest['train'], args.manifest_root, | ||
| model.be, args.subset_pct, rseed) | ||
| test = make_validation_loader(args.manifest['val'], args.manifest_root, model.be, args.subset_pct) | ||
|
|
||
| # configure callbacks | ||
| top5 = TopKMisclassification(k=5) | ||
| callbacks = Callbacks(model, eval_set=test, metric=top5, **args.callback_args) | ||
|
|
||
| # create learning rate schedules and optimizers | ||
| weight_sched = Schedule(list(range(14, 75, 15)), 0.1) | ||
| opt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.0005, schedule=weight_sched) | ||
| opt_biases = GradientDescentMomentum(0.02, 0.9, schedule=weight_sched) | ||
| opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases}) | ||
|
|
||
| model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Change to # Overview