diff --git a/CaffeLoader.py b/CaffeLoader.py index d09ccdd..ad2e7f6 100644 --- a/CaffeLoader.py +++ b/CaffeLoader.py @@ -193,28 +193,31 @@ def buildSequential(channel_list, pooling): 'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'], } +name_dict = { +'vgg19': ['vgg-19', 'vgg19', 'vgg_19',], +'vgg16': ['vgg-16', 'vgg16', 'vgg_16', 'fcn32s', 'pruning', 'sod'], +} + def modelSelector(model_file, pooling): - vgg_list = ["fcn32s", "pruning", "sod", "vgg"] - if any(name in model_file for name in vgg_list): - if "pruning" in model_file: - print("VGG-16 Architecture Detected") - print("Using The Channel Pruning Model") - cnn, layerList = VGG_PRUNED(buildSequential(channel_list['VGG-16p'], pooling)), vgg16_dict - elif "fcn32s" in model_file: + if any(name in model_file for name in ['vgg'] + name_dict['vgg16'] + name_dict['vgg19']): + if any(name in model_file for name in name_dict['vgg16']): print("VGG-16 Architecture Detected") - print("Using the fcn32s-heavy-pascal Model") - cnn, layerList = VGG_FCN32S(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict - elif "sod" in model_file: - print("VGG-16 Architecture Detected") - print("Using The SOD Fintune Model") - cnn, layerList = VGG_SOD(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict - elif "19" in model_file: + if "pruning" in model_file: + print("Using The Channel Pruning Model") + cnn, layerList = VGG_PRUNED(buildSequential(channel_list['VGG-16p'], pooling)), vgg16_dict + elif "fcn32s" in model_file: + print("Using the fcn32s-heavy-pascal Model") + cnn, layerList = VGG_FCN32S(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict + elif "sod" in model_file: + print("Using The SOD Fintune Model") + cnn, layerList = VGG_SOD(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict + elif "16" in model_file: + cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict + elif any(name in model_file for name in name_dict['vgg19']): print("VGG-19 Architecture Detected") - cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict - elif "16" in model_file: - print("VGG-16 Architecture Detected") - cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict + if "19" in model_file: + cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict else: raise ValueError("VGG architecture not recognized.") elif "nin" in model_file: @@ -251,4 +254,4 @@ def loadCaffemodel(model_file, pooling, use_gpu, disable_check): print_loadcaffe(cnn, layerList) - return cnn, layerList + return cnn, layerList \ No newline at end of file diff --git a/INSTALL.md b/INSTALL.md index 7d7c3c2..dfdde55 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -197,7 +197,7 @@ Then using https://pytorch.org/, get the correct pip command, paste it into the ``` -pip3 install torch===1.3.0 torchvision===0.4.1 -f https://download.pytorch.org/whl/torch_stable.html +pip3 install torch===1.3.1 torchvision===0.4.2 -f https://download.pytorch.org/whl/torch_stable.html ``` diff --git a/README.md b/README.md index 35cbe79..44e8948 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,7 @@ path or a full absolute path. when using ADAM you will probably need to play with other parameters to get good results, especially the style weight, content weight, and learning rate. * `-learning_rate`: Learning rate to use with the ADAM optimizer. Default is 1e1. +* `-normalize_weights`: If this flag is present, style and content weights will be divided by the number of channels for each layer. Idea from [PytorchNeuralStyleTransfer](https://github.com/leongatys/PytorchNeuralStyleTransfer). **Output options**: * `-output_image`: Name of the output image. Default is `out.png`.