Skip to content

Commit 84902ee

Browse files
authored
Merge pull request #223 from rizoudal/rizoudal
Refactor: commented out ViT-related mentions in files
2 parents a155af8 + 8bbd83c commit 84902ee

File tree

6 files changed

+32
-36
lines changed

6 files changed

+32
-36
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ The open-source software AUCMEDI allows fast setup of medical image classificati
1414
- Wide range of 2D/3D data entry options with interfaces to the most common medical image formats such as DICOM, MetaImage, NifTI, PNG or TIF already supplied.
1515
- Selection of pre-processing methods for preparing images, such as augmentation processes, color conversions, windowing, filtering, resizing and normalization.
1616
- Use of deep neural networks for binary, multi-class as well as multi-label classification and efficient methods against class imbalances using modern loss functions such as focal loss.
17-
- Library from modern architectures, like ResNet up to EfficientNet and Vision-Transformers (ViT)⁠.
17+
- Library from modern architectures, like ResNet up to ConvNeXt. <!-- and Vision-Transformers (ViT)⁠.-->
1818
- Complex ensemble learning techniques (combination of predictions) using test-time augmentation, bagging via cross-validation or stacking via logistic regressions.
1919
- Explainable AI to explain opaque decision-making processes of the models using activation maps such as Grad-CAM or backpropagation.
2020
- Automated Machine Learning (AutoML) mentality to ensure easy deployment, integration and maintenance of complex medical image classification pipelines (Docker).

aucmedi/automl/block_train.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
import numpy as np
2525
import json
2626
from tensorflow.keras.metrics import AUC
27-
from tensorflow_addons.metrics import F1Score
2827
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, \
2928
ReduceLROnPlateau, EarlyStopping
3029
# Internal libraries
@@ -140,8 +139,7 @@ def block_train(config):
140139
"workers": config["workers"],
141140
"batch_queue_size": 4,
142141
"loss": loss,
143-
"metrics": [AUC(100), F1Score(num_classes=class_n,
144-
average="macro")],
142+
"metrics": [AUC(100)],
145143
"pretrained_weights": True,
146144
"multiprocessing": False,
147145
}

aucmedi/neural_network/architectures/image/__init__.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -61,10 +61,10 @@
6161
# Xception
6262
from aucmedi.neural_network.architectures.image.xception import Xception
6363
# Vision Transformer (ViT)
64-
from aucmedi.neural_network.architectures.image.vit_b16 import ViT_B16
65-
from aucmedi.neural_network.architectures.image.vit_b32 import ViT_B32
66-
from aucmedi.neural_network.architectures.image.vit_l16 import ViT_L16
67-
from aucmedi.neural_network.architectures.image.vit_l32 import ViT_L32
64+
# from aucmedi.neural_network.architectures.image.vit_b16 import ViT_B16
65+
# from aucmedi.neural_network.architectures.image.vit_b32 import ViT_B32
66+
# from aucmedi.neural_network.architectures.image.vit_l16 import ViT_L16
67+
# from aucmedi.neural_network.architectures.image.vit_l32 import ViT_L32
6868
# ConvNeXt
6969
from aucmedi.neural_network.architectures.image.convnext_base import ConvNeXtBase
7070
from aucmedi.neural_network.architectures.image.convnext_tiny import ConvNeXtTiny
@@ -103,10 +103,10 @@
103103
"VGG16": VGG16,
104104
"VGG19": VGG19,
105105
"Xception": Xception,
106-
"ViT_B16": ViT_B16,
107-
"ViT_B32": ViT_B32,
108-
"ViT_L16": ViT_L16,
109-
"ViT_L32": ViT_L32,
106+
# "ViT_B16": ViT_B16,
107+
# "ViT_B32": ViT_B32,
108+
# "ViT_L16": ViT_L16,
109+
# "ViT_L32": ViT_L32,
110110
"ConvNeXtBase": ConvNeXtBase,
111111
"ConvNeXtTiny": ConvNeXtTiny,
112112
"ConvNeXtSmall": ConvNeXtSmall,
@@ -190,10 +190,10 @@
190190
"VGG16": "caffe",
191191
"VGG19": "caffe",
192192
"Xception": "tf",
193-
"ViT_B16": "tf",
194-
"ViT_B32": "tf",
195-
"ViT_L16": "tf",
196-
"ViT_L32": "tf",
193+
# "ViT_B16": "tf",
194+
# "ViT_B32": "tf",
195+
# "ViT_L16": "tf",
196+
# "ViT_L32": "tf",
197197
"ConvNeXtBase": None,
198198
"ConvNeXtTiny": None,
199199
"ConvNeXtSmall": None,

requirements.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@ scikit-image==0.21.0
88
lime==0.2.0.1
99
pooch==1.6.0
1010
classification-models-3D==1.0.10
11-
vit-keras==0.1.2
12-
tensorflow-addons==0.21.0
11+
# vit-keras==0.1.2
1312
Keras-Applications==1.0.8
1413
SimpleITK==2.2.0
1514
batchgenerators==0.25

setup.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,7 @@
3535
'lime>=0.2.0.1',
3636
'pooch>=1.6.0',
3737
'classification-models-3D>=1.0.10',
38-
'vit-keras>=0.1.2',
39-
'tensorflow-addons>=0.21.0',
38+
# 'vit-keras>=0.1.2',
4039
'Keras-Applications==1.0.8',
4140
'SimpleITK>=2.2.0',
4241
'batchgenerators>=0.25',

tests/test_architectures_image.py

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -665,7 +665,7 @@ def test_Xception(self):
665665
# Architecture: ViT B16 #
666666
#-------------------------------------------------#
667667
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
668-
def test_ViT_B16(self):
668+
# def test_ViT_B16(self):
669669
# self.datagen_RGB.sf_resize = Resize(shape=(224, 224))
670670
# arch = ViT_B16(Classifier(n_labels=4), channels=3,
671671
# input_shape=(224, 224))
@@ -676,16 +676,16 @@ def test_ViT_B16(self):
676676
# batch_queue_size=1, input_shape=(224, 224))
677677
# try : model.model.summary()
678678
# except : raise Exception()
679-
self.assertTrue(supported_standardize_mode["ViT_B16"] == "tf")
680-
self.assertTrue(sdm_global["2D.ViT_B16"] == "tf")
681-
self.assertTrue("2D.ViT_B16" in architecture_dict)
679+
# self.assertTrue(supported_standardize_mode["ViT_B16"] == "tf")
680+
# self.assertTrue(sdm_global["2D.ViT_B16"] == "tf")
681+
# self.assertTrue("2D.ViT_B16" in architecture_dict)
682682
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))
683683

684684
#-------------------------------------------------#
685685
# Architecture: ViT B32 #
686686
#-------------------------------------------------#
687687
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
688-
def test_ViT_B32(self):
688+
# def test_ViT_B32(self):
689689
# self.datagen_RGB.sf_resize = Resize(shape=(224, 224))
690690
# arch = ViT_B32(Classifier(n_labels=4), channels=3,
691691
# input_shape=(224, 224))
@@ -696,16 +696,16 @@ def test_ViT_B32(self):
696696
# batch_queue_size=1, input_shape=(224, 224))
697697
# try : model.model.summary()
698698
# except : raise Exception()
699-
self.assertTrue(supported_standardize_mode["ViT_B32"] == "tf")
700-
self.assertTrue(sdm_global["2D.ViT_B32"] == "tf")
701-
self.assertTrue("2D.ViT_B32" in architecture_dict)
699+
# self.assertTrue(supported_standardize_mode["ViT_B32"] == "tf")
700+
# self.assertTrue(sdm_global["2D.ViT_B32"] == "tf")
701+
# self.assertTrue("2D.ViT_B32" in architecture_dict)
702702
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))
703703

704704
#-------------------------------------------------#
705705
# Architecture: ViT L16 #
706706
#-------------------------------------------------#
707707
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
708-
def test_ViT_L16(self):
708+
# def test_ViT_L16(self):
709709
# self.datagen_RGB.sf_resize = Resize(shape=(384, 384))
710710
# arch = ViT_L16(Classifier(n_labels=4), channels=3,
711711
# input_shape=(384, 384))
@@ -716,16 +716,16 @@ def test_ViT_L16(self):
716716
# batch_queue_size=1, input_shape=(384, 384))
717717
# try : model.model.summary()
718718
# except : raise Exception()
719-
self.assertTrue(supported_standardize_mode["ViT_L16"] == "tf")
720-
self.assertTrue(sdm_global["2D.ViT_L16"] == "tf")
721-
self.assertTrue("2D.ViT_L16" in architecture_dict)
719+
# self.assertTrue(supported_standardize_mode["ViT_L16"] == "tf")
720+
# self.assertTrue(sdm_global["2D.ViT_L16"] == "tf")
721+
# self.assertTrue("2D.ViT_L16" in architecture_dict)
722722
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))
723723

724724
#-------------------------------------------------#
725725
# Architecture: ViT L32 #
726726
#-------------------------------------------------#
727727
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
728-
def test_ViT_L32(self):
728+
# def test_ViT_L32(self):
729729
# self.datagen_RGB.sf_resize = Resize(shape=(384, 384))
730730
# arch = ViT_L32(Classifier(n_labels=4), channels=3,
731731
# input_shape=(384, 384))
@@ -736,9 +736,9 @@ def test_ViT_L32(self):
736736
# batch_queue_size=1, input_shape=(384, 384))
737737
# try : model.model.summary()
738738
# except : raise Exception()
739-
self.assertTrue(supported_standardize_mode["ViT_L32"] == "tf")
740-
self.assertTrue(sdm_global["2D.ViT_L32"] == "tf")
741-
self.assertTrue("2D.ViT_L32" in architecture_dict)
739+
# self.assertTrue(supported_standardize_mode["ViT_L32"] == "tf")
740+
# self.assertTrue(sdm_global["2D.ViT_L32"] == "tf")
741+
# self.assertTrue("2D.ViT_L32" in architecture_dict)
742742
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))
743743

744744
#-------------------------------------------------#

0 commit comments

Comments
 (0)