Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 7 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@ are executed first, thus enabling faster feedback for failed tests to developers
**This is a WIP**
-----------------

DATASET: https://github.com/staiyeba/atcs-dataset
DATASET Format notes: https://github.com/staiyeba/retecs-forked/tree/master
- DATASET: https://github.com/staiyeba/atcs-dataset
- DATASET Format notes: https://github.com/staiyeba/retecs-forked/tree/master
- Old Dataset format: https://github.com/staiyeba/test-pipeline-optimizing-algorithm/blob/master/datasets/case_1/10000_3.csv

##The Algorithm

Expand All @@ -22,19 +23,15 @@ DATASET Format notes: https://github.com/staiyeba/retecs-forked/tree/master
delta_new = 0
if tx != ty:
if tx.pos < ty.pos:
if tx.pos < pre_test_max_pos(ty) and \
ty.pos > subsequent_test_min_pos(tx):
if tx.pos < pre_test_max_pos(ty) and ty.pos > subsequent_test_min_pos(tx):
if tx.fail_prob < ty.fail_prob:
delta_new = (ty.fail_prob − tx.fail_prob) \
∗ (ty.pos − tx.pos)
delta_new = (ty.fail_prob − tx.fail_prob) ∗ (ty.pos − tx.pos)
if delta_max < delta_new:
delta_max = delta_new
else:
if ty.pos < pre_test_max_pos(tx) and \
tx.pos > subsequent_test_min_pos(ty):
if ty.pos < pre_test_max_pos(tx) and tx.pos > subsequent_test_min_pos(ty):
if ty.fail_prob < tx.fail_prob:
delta_new = (tx.fail_prob − ty.fail_prob) \
∗ (tx.pos − ty.pos)
delta_new = (tx.fail_prob − ty.fail_prob) ∗ (tx.pos − ty.pos)
if delta_max < delta_new:
delta_max = delta_new
if delta_max > 0:
Expand Down
Binary file added __pycache__/rules.cpython-38.pyc
Binary file not shown.
Binary file added __pycache__/support.cpython-38.pyc
Binary file not shown.
32,261 changes: 32,261 additions & 0 deletions dataset/iofrol.csv

Large diffs are not rendered by default.

32,261 changes: 32,261 additions & 0 deletions dataset/iofrol_dep.csv

Large diffs are not rendered by default.

25,595 changes: 25,595 additions & 0 deletions dataset/paintcontrol.csv

Large diffs are not rendered by default.

25,595 changes: 25,595 additions & 0 deletions dataset/paintcontrol_dep.csv

Large diffs are not rendered by default.

38 changes: 38 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import sys
import support
import os.path
import random
import argparse
import rules

# Run with: python .\main.py -f dataset/iofrol.csv -t 10 15 -d 8 2
def main(dataset_name: str, test_depend_rules: tuple) -> None:
dataset = support.get_dataset(dataset_name)
generator = rules.Rules(dataset)
generator.construct_all_ruleset(test_depend_rules)

if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Algorithm for optimizing test sets')

parser.add_argument("-f", type=str, dest='dataset_name', help='The input file name of dataset')
parser.add_argument('-t', type=int, dest='tests', default=[], nargs='+', help='An array specifying the number of tests (-t) x which should have a dependency (-d) y, can not exceed maximum number of tests available')
parser.add_argument("-d", type=int, dest='dependencies', default=[], nargs='+', help='For each test x in -t, specifies the number of dependencies that should be created')

args = parser.parse_args()

if len(args.dependencies) != len(args.tests):
print("The number of tests don't correspond with the number of added dependencies...")
print("Tests {}, Dependencies {}".format(len(args.dependencies), len(args.tests)))
print("Exiting script...")
exit(0)
if not os.path.isfile(args.dataset_name):
print ("File {} does not exist".format(args.filename))
print("Exiting script...")
exit(0)

test_depend_rules = list(map(lambda x, y:(x,y), args.dependencies, args.tests)) ## a tuple of ints [(10, 2), (20,4)]
main(args.dataset_name, test_depend_rules)




29 changes: 29 additions & 0 deletions rules.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import random

class Rules:
def __init__(self, dataset: dict) -> None:
self.dataset = dataset
self.available_tests = [id for id in dataset.keys()]

def construct_one_rule_on_test(self, depend_number: int) -> None:
# Pick a test to construct this number of dependencie on.
test_id = random.choice(self.available_tests)
self.available_tests.remove( test_id )

possible_dependencies = [key for key in self.dataset.keys() if key is not test_id]
choosen_dependencies = random.sample(possible_dependencies, k=depend_number)

#Add this number of dependencies to this test
self.dataset[test_id]['Depend'] = choosen_dependencies

print("Test_ID {} with rules {}".format(test_id, choosen_dependencies))

def construct_one_ruleset(self, tests_number: int, depend_number: int) -> None:
for i in range(0, tests_number):
self.construct_one_rule_on_test(depend_number)

def construct_all_ruleset(self, test_depend_rules: tuple):
for ruleset in test_depend_rules:
tests_number = ruleset[0] # Number of tests
depend_number = ruleset[1] # Number of dependencies for 'tests_number' of rules
self.construct_one_ruleset(tests_number, depend_number)
22 changes: 22 additions & 0 deletions support.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import csv
import ast
import json

def convert_dictionary(element):
element['Id'] = int(element['Id'])
element['Duration'] = int(element['Duration'])
element['CalcPrio'] = int(element['CalcPrio'])
element['LastResults'] = ast.literal_eval( element['LastResults'] )
element['Verdict'] = int(element['Verdict'])
element['Cycle'] = int(element['Cycle'])
element['Depend'] = []

def get_dataset(file_name: str) -> dict:
dataset = {}
with open(file_name, newline="\n") as csvfile:
reader = csv.DictReader(csvfile, delimiter=';')
for row in reader:
convert_dictionary(row)
dataset[row['Id']] = row
return dataset