-
Notifications
You must be signed in to change notification settings - Fork 66
Image classification topologies corrected for Neon 2.2 and Neon 2.3 #24
base: master
Are you sure you want to change the base?
Changes from all commits
02f75df
1fc4a10
19926d4
952048d
d1aedf4
ae6d4b0
ed06362
4ad1a24
3e7736f
f3eb100
839eaf5
4690ccf
0655d21
f7e6337
ccd9daf
14be62e
e58f718
70ff8df
9e8c7a5
5502ec3
8b4be94
a428edb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -15,24 +15,50 @@ | |
| # ---------------------------------------------------------------------------- | ||
| import os | ||
|
|
||
| import numpy as np | ||
| from neon.util.argparser import NeonArgparser | ||
| from neon.util.persist import load_obj | ||
| from neon.transforms import Misclassification, CrossEntropyMulti | ||
| from neon.optimizers import GradientDescentMomentum | ||
| from neon.layers import GeneralizedCost | ||
| from neon.models import Model | ||
| from neon.data import DataLoader, ImageParams | ||
|
|
||
| from neon.data.dataloader_transformers import OneHot, TypeCast, BGRMeanSubtract | ||
| from neon.data.aeon_shim import AeonDataLoader | ||
|
|
||
| # parse the command line arguments (generates the backend) | ||
| parser = NeonArgparser(__doc__) | ||
| args = parser.parse_args() | ||
|
|
||
| # setup data provider | ||
| test_dir = os.path.join(args.data_dir, 'val') | ||
| shape = dict(channel_count=3, height=32, width=32) | ||
| test_params = ImageParams(center=True, flip=False, **shape) | ||
| common = dict(target_size=1, nclasses=10) | ||
| test_set = DataLoader(set_name='val', repo_dir=test_dir, media_params=test_params, **common) | ||
| def wrap_dataloader(dl, dtype=np.float32): | ||
| dl = OneHot(dl, index=1, nclasses=10) | ||
| dl = TypeCast(dl, index=0, dtype=dtype) | ||
| dl = BGRMeanSubtract(dl, index=0) | ||
| return dl | ||
|
|
||
| def config(manifest_filename, manifest_root, batch_size, subset_pct): | ||
| image_config = {"type": "image", | ||
| "height": 32, | ||
| "width": 32} | ||
| label_config = {"type": "label", | ||
| "binary": False} | ||
| augmentation = {"type": "image", | ||
| "crop_enable": True, | ||
| "center": True, | ||
| "flip_enable": False} | ||
|
|
||
| return {'manifest_filename': manifest_filename, | ||
| 'manifest_root': manifest_root, | ||
| 'batch_size': batch_size, | ||
| 'subset_fraction': float(subset_pct/100.0), | ||
| 'etl': [image_config, label_config], | ||
| 'augmentation': [augmentation]} | ||
|
|
||
| def make_val_config(manifest_filename, manifest_root, batch_size, subset_pct=100): | ||
| val_config = config(manifest_filename, manifest_root, batch_size, subset_pct) | ||
| return wrap_dataloader(AeonDataLoader(val_config)) | ||
|
|
||
| test_set = make_val_config(args.manifest["val"], args.manifest_root, batch_size=args.batch_size) | ||
|
|
||
| model = Model(load_obj(args.model_file)) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. model=Model(load_obj(args.model_file)) will break the code, since neon v2.2 and v2.3 was not supposed to initialize a model using old weights. model=Model(load_obj(args.model_file) works with new weights. When we see there is an assert failure. This usually mean only a subset of the cifar10 layers was involved in saving the weights. So we need to print out those layer information from the old weight file and see which layers are missing. We then delete those layers (temporarily) from cifar10 layers. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is resnet_eval.py still functional/successful in loading weights? |
||
| cost = GeneralizedCost(costfunc=CrossEntropyMulti()) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -23,14 +23,14 @@ WEIGHTS_FILE=${WEIGHTS_URL##*/} | |
| echo "Downloading weights file from ${WEIGHTS_URL}" | ||
| curl -o $WEIGHTS_FILE $WEIGHTS_URL 2> /dev/null | ||
|
|
||
| python -u $TEST_SCRIPT -i ${EXECUTOR_NUMBER} -vvv --model_file $WEIGHTS_FILE --no_progress_bar -w /usr/local/data/CIFAR10/macrobatches | tee output.dat 2>&1 | ||
| python -u $TEST_SCRIPT -i ${EXECUTOR_NUMBER} -vvv --model_file $WEIGHTS_FILE --manifest val:/data/CIFAR/val-index.csv --manifest_root /data/CIFAR -b gpu -z 32 --no_progress_bar 2>&1 | tee output.dat | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can you use "/dataset/aeon/CIFAR10" instead of "/data/CIFAR"?
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @baojun-nervana Out of curiosity, is there any particular reason why these paths should be changed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @tpatejko that is the validation dataset directory, so we don't have to update those after the PR. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Changed |
||
| rc=$? | ||
| if [ $rc -ne 0 ];then | ||
| exit $rc | ||
| fi | ||
|
|
||
| # get the top-1 misclass | ||
| top1=`tail -n 1 output.dat | sed "s/.*Accuracy: //" | sed "s/ \% (Top-1).*//"` | ||
| top1=`cat output.dat | sed -n "s/.*Accuracy: \(.*\) \% (Top-1).*/\1/p"` | ||
|
|
||
| top1pass=0 | ||
| top1pass=`echo $top1'>'85 | bc -l` | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,7 +22,7 @@ WEIGHTS_FILE=${WEIGHTS_URL##*/} | |
| echo "Downloading weights file from ${WEIGHTS_URL}" | ||
| curl -o $WEIGHTS_FILE $WEIGHTS_URL 2> /dev/null | ||
|
|
||
| python -u alexnet_neon.py --test_only -i ${EXECUTOR_NUMBER} -w /usr/local/data/I1K/macrobatches/ -vvv --model_file $WEIGHTS_FILE --no_progress_bar | tee output.dat 2>&1 | ||
| python -u alexnet_neon.py --test_only -i ${EXECUTOR_NUMBER} -w /data/i1k-extracted/ --manifest_root /data/i1k-extracted --manifest train:/data/i1k-extracted/train-index.csv --manifest val:/data/i1k-extracted/val-index.csv -vvv --model_file $WEIGHTS_FILE --no_progress_bar -z 256 2>&1 | tee output.dat | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can you use "/dataset/aeon/I1K/i1k-extracted" instead of "/data/i1k-extracted"? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Changed |
||
| rc=$? | ||
| if [ $rc -ne 0 ];then | ||
| exit $rc | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
"The mode weight file" will probably need to be converted to new format. Have you already obtained a trained weight with neon v2.2/v2.3 format?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Assuming yes.