Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,12 @@ dev_iDeLUCS/*
.DS_Store
Results/
*/__pycache__/*
idelucs/kmers.c
idelucs/kmers.cpython-*
*.ipynb*
idelucs.egg-info/*
ALL_RESULTS.tsv
build/
*.tfrecord
s_50*
Example/*_shuffled.fas
14 changes: 13 additions & 1 deletion idelucs/PytorchUtils.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,19 @@ def __init__(self, n_input, n_output):
super(NetLinear, self).__init__()
self.n_input = n_input
self.layers = nn.Sequential(

nn.Linear(n_input, 512),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(512, 64)
)

self.fine_tune_layer = nn.Sequential(
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(64, n_output),
nn.Softmax(dim=1)
)

self.classifier = nn.Sequential(
nn.ReLU(),
nn.Dropout(p=0.5),
Expand All @@ -54,6 +60,12 @@ def forward(self, x):
latent = self.layers(x)
out = self.classifier(latent)
return out, latent

def fine_tune(self, x):
x = x.view(-1, self.n_input)
x = self.layers(x)
out = self.fine_tune_layer(x)
return out


class myDataset(Dataset):
Expand Down
36 changes: 34 additions & 2 deletions idelucs/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@


from idelucs.utils import SummaryFasta, plot_confusion_matrix, \
label_features, compute_results
label_features, compute_results, cluster_acc

from idelucs import models

Expand Down Expand Up @@ -110,6 +110,7 @@ def run(args):
model_min_loss=np.inf

for i in range(args['n_epochs']):
print(f"EPOCH {i}")
loss = model.contrastive_training_epoch()
model_min_loss = min(model_min_loss, loss)
model_loss.append(loss)
Expand Down Expand Up @@ -156,16 +157,43 @@ def run(args):
if args['GT_file'] != None:
unique_labels = list(np.unique(model.GT))
numClasses = len(unique_labels)
predictions = []
y = np.array(list(map(lambda x: unique_labels.index(x), model.GT)))
np.set_printoptions(threshold=np.inf)

results, ind = compute_results(y_pred, latent, y)
print("Without fine-tuning result:", results)

if args["fine_tune"]:
ind, acc = cluster_acc(y, y_pred)

# print("Y-pred: ", y_pred)
for i in range(10):
model.fine_tune(ind)
y_pred, probabilities, latent = model.predict()
y_pred = y_pred.astype(np.int32)

d = {}
count = 0
for i in range(y_pred.shape[0]):
if y_pred[i] in d:
y_pred[i] = d[y_pred[i]]
else:
d[y_pred[i]] = count
y_pred[i] = count
count += 1
predictions.append(y_pred)
y_pred, probabilities = label_features(np.array(predictions), args['n_clusters'])
results, ind = compute_results(y_pred, latent, y)
# print(results, ind, y, y_pred)
d = {}
for i, j in ind:
d[i] = j

# d maps predicted to truth
if -1 in y_pred:
d[-1] = 0

w = np.zeros((numClasses, max(max(y_pred) + 1, max(y) + 1)), dtype=np.int64)
clustered = np.zeros_like(y, dtype=bool)
for i in range(y.shape[0]):
Expand All @@ -190,6 +218,8 @@ def run(args):
#clustered = (probabilities >= 0.9)
clustered = (probabilities >= 0.0)



print(results)
sys.stdout.write(f"\r........ Saving Results ..............")
sys.stdout.flush()
Expand Down Expand Up @@ -258,6 +288,8 @@ def run(args):
def main():
parser= argparse.ArgumentParser()
parser.add_argument('--sequence_file', action='store',type=str)
parser.add_argument('--fine_tune', action='store',type=bool, default=False)

parser.add_argument('--n_clusters', action='store',type=int,default=0,
help='Expected or maximum number of clusters to find. \n'
'It should be equal or greater than n_true_clusters \n'
Expand Down
84 changes: 73 additions & 11 deletions idelucs/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from .LossFunctions import IID_loss, info_nce_loss
from .PytorchUtils import NetLinear, myNet
from .ResNet import ResNet18
from .utils import SequenceDataset, create_dataloader
from .utils import SequenceDataset, create_dataloader, generate_dataloader, generate_dataloader_tfrecord, generate_finetune_dataloader

# Random Seeds for reproducibility.
torch.manual_seed(0)
Expand Down Expand Up @@ -48,7 +48,6 @@ def __init__(self, args: dict):

self.sequence_file = args['sequence_file']
self.GT_file = args['GT_file']

self.n_clusters = args['n_clusters']
self.k = args['k']

Expand All @@ -57,7 +56,7 @@ def __init__(self, args: dict):
self.net = NetLinear(self.n_features, args['n_clusters'])
self.reduce = False

elif args['model_size'] == 'small':
elif args['model_size'] == 'smal l':
if self.k % 2 == 0: n_in = (4**self.k + 4**(self.k//2))//2
else: n_in = (4**self.k)//2
#d = {4: 135, 5: 511, 6: 2079}
Expand Down Expand Up @@ -102,18 +101,74 @@ def build_dataloader(self):
#Data Files
data_path = self.sequence_file
GT_file = self.GT_file

self.dataloader = create_dataloader(data_path,

if self.sequence_file.endswith('.tfrecord'):
self.dataloader = generate_dataloader_tfrecord(data_path,
self.n_mimics,
k=self.k,
batch_size=self.batch_sz,
GT_file=GT_file,
reduce=self.reduce)
else:
self.dataloader = create_dataloader(data_path,
self.n_mimics,
k=self.k,
batch_size=self.batch_sz,
GT_file=GT_file,
reduce=self.reduce)

def fine_tune(self, ind):
# maps true labels to new labels
d = {}
for j, i in ind:
d[i] = j
self.net.train()
dataloader = generate_finetune_dataloader(
d,
self.GT_file,
self.sequence_file,
self.k,
self.batch_sz,
self.reduce
)
loss_func = nn.CrossEntropyLoss()
for _, sample_batched in enumerate(dataloader):
sample = sample_batched['true'].view(-1, 1, self.n_features).type(dtype)
labels = torch.nn.functional.one_hot(sample_batched['label'], num_classes=self.n_clusters).type(dtype)
# print(labels)

self.optimizer.zero_grad()

pred_result = self.net.fine_tune(sample)
# print(pred_result)

loss = loss_func(pred_result, labels)
# print(loss)
loss.backward()
self.optimizer.step()


if self.schedule == 'Plateau':
self.scheduler.step(running_loss)
elif self.schedule == 'Triangle':
self.scheduler.step()

def contrastive_training_epoch(self):
self.net.train()
running_loss = 0.0

# dataloader = generate_dataloader(
# self.sequence_file,
# self.n_mimics,
# self.k,
# self.batch_sz,
# self.reduce
# )
# dataloader = generate_dataloader_tfrecord(
# self.sequence_file,
# self.n_mimics,
# self.k,
# self.batch_sz,
# self.reduce
# )
for i_batch, sample_batched in enumerate(self.dataloader):
sample = sample_batched['true'].view(-1, 1, self.n_features).type(dtype)
modified_sample = sample_batched['modified'].view(-1, 1, self.n_features).type(dtype)
Expand Down Expand Up @@ -145,12 +200,19 @@ def contrastive_training_epoch(self):
def predict(self, data=None):

n_features = self.n_features
# if self.sequence_file.endswith('.tfrecord'):
# test_dataloader = generate_dataloader_tfrecord(self.sequence_file,
# self.n_mimics,
# k=self.k,
# batch_size=self.batch_sz,
# reduce=self.reduce)
# else:
test_dataset = SequenceDataset(self.sequence_file, k=self.k, transform=None, GT_file=self.GT_file, reduce=self.reduce)
test_dataloader = DataLoader(test_dataset,
batch_size=self.batch_sz,
shuffle=False,
num_workers=0,
drop_last=False)
batch_size=self.batch_sz,
shuffle=False,
num_workers=0,
drop_last=False)
y_pred = []
probabilities = []
latent = []
Expand Down
Loading