Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 54 additions & 0 deletions additional_tasks/unzip.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"cells": [
{
"cell_type": "code",
"id": "initial_id",
"metadata": {
"collapsed": true,
"ExecuteTime": {
"end_time": "2024-10-11T04:17:08.045148Z",
"start_time": "2024-10-11T04:15:47.453326Z"
}
},
"source": [
"import zipfile\n",
"#location of the zip file\n",
"zip_ref = zipfile.ZipFile(\"../additional_tasks/dataset/archive.zip\", 'r')\n",
"#loaction which we want to extract the file\n",
"zip_ref.extractall(\"dataset/imagenet/\")\n",
"zip_ref.close()"
],
"outputs": [],
"execution_count": 4
},
{
"metadata": {},
"cell_type": "code",
"outputs": [],
"execution_count": null,
"source": "",
"id": "57ac71e361a0662c"
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Binary file added exps.zip
Binary file not shown.
Binary file added models.zip
Binary file not shown.
15 changes: 11 additions & 4 deletions quant_aware.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,18 @@
if __name__ == '__main__':
exp_dir = 'exps/{}'.format(args.exp_name)

arch_path = '{}/arch'.format(exp_dir)
tmp_lst = json.load(open(arch_path, 'r'))
# Define the path to the architecture file (assuming the file is named 'arch.json')
arch_file_path = os.path.join(exp_dir, 'arch', 'arch.json')

# Check if the architecture file exists
assert os.path.exists(arch_file_path), f"Architecture file does not exist: {arch_file_path}"

# Load architecture and quantization info from the JSON file
with open(arch_file_path, 'r') as f:
tmp_lst = json.load(f)
info, q_info = tmp_lst
print(info)
print(q_info)
print("Architecture Info:", info)
print("Quantization Info:", q_info)

DynamicSeparableConv2d.KERNEL_TRANSFORM_MODE = 1
DynamicSeparableQConv2d.KERNEL_TRANSFORM_MODE = 1
Expand Down
10 changes: 6 additions & 4 deletions search.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
parser.add_argument('--acc_train_sample', type=int, default=None)
parser.add_argument('--mode', type=str, default='evolution', choices=['evolution'])
parser.add_argument('--constraint', type=float, default=120)
parser.add_argument('--exp_name', type=str, default='test')
parser.add_argument('--exp_name', type=str, default='search')
args, _ = parser.parse_known_args()
print(args)

Expand All @@ -38,12 +38,15 @@ def add_arch(info, lst):
dic = {}
whole = {}
candidate_archs = []

out_dir = 'exps/{}'.format(args.exp_name)
lats = []
# in here they used 12.80 as latency limit
for i in [args.constraint]:
res, info, t = evolution_gather(parser, force_latency=i)
acc, arch, lat = info
print((i, res, lat, arch, acc))
# here they check i is already there or not or acc higher one
if i not in dic or dic[i] < acc:
dic[i] = acc
whole[i] = (t, res, lat, arch, acc)
Expand All @@ -54,6 +57,5 @@ def add_arch(info, lst):
json.dump(candidate_archs[0], open('{}/arch'.format(out_dir), 'w'))
json.dump(lats, open('{}/lat'.format(out_dir), 'w'))


if __name__ == '__main__':
main()
if __name__ == '__main__':
main()
55 changes: 43 additions & 12 deletions test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,56 +18,87 @@
from imagenet_codebase.run_manager import ImagenetRunConfig, RunManager

parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--exp_dir', type=str, default=None)
parser.add_argument('--exp_dir', type=str, default='./exps/test')
args, _ = parser.parse_known_args()


if __name__ == '__main__':
# Initialize predictors
latency_predictor = LatencyPredictor(type='latency')
energy_predictor = LatencyPredictor(type='energy')
arch_dir = '{}/arch'.format(args.exp_dir)
assert os.path.exists(arch_dir)
tmp_lst = json.load(open(arch_dir, 'r'))

# Define the experiment directory and architecture file
arch_dir = os.path.join(args.exp_dir, 'arch', 'arch.json') # Assuming the architecture file is 'arch.json'
assert os.path.exists(arch_dir), f"Architecture file does not exist: {arch_dir}"

# Load architecture and quantization info from the JSON file
with open(arch_dir, 'r') as f:
tmp_lst = json.load(f)
info, q_info = tmp_lst
print(info)
print(q_info)
print("Architecture Info:", info)
print("Quantization Info:", q_info)

# Latency and energy prediction
X = LatencyPredictor(type='latency')
print('Latency: {:.2f}ms'.format(X.predict_lat(dict(info, **q_info))))

Y = LatencyPredictor(type='energy')
print('Energy: {:.2f}mJ'.format(Y.predict_lat(dict(info, **q_info))))
ckpt_path = '{}/checkpoint/model_best.pth.tar'.format(args.exp_dir)

# Define the checkpoint path
ckpt_path = os.path.join(args.exp_dir, 'checkpoint', 'model_best.pth.tar')
if os.path.exists(ckpt_path):
DynamicSeparableConv2d.KERNEL_TRANSFORM_MODE = 1
DynamicSeparableQConv2d.KERNEL_TRANSFORM_MODE = 1

# Initialize the dynamic proxyless network
dynamic_proxyless = DynamicQuantizedProxylessNASNets(
ks_list=[3, 5, 7], expand_ratio_list=[4, 6], depth_list=[2, 3, 4], base_stage_width='proxyless',
width_mult_list=1.0, dropout_rate=0, n_classes=1000
ks_list=[3, 5, 7],
expand_ratio_list=[4, 6],
depth_list=[2, 3, 4],
base_stage_width='proxyless',
width_mult_list=1.0,
dropout_rate=0,
n_classes=1000
)

# Load the initial weights for the dynamic proxyless network
proxylessnas_init = torch.load(
'./models/imagenet-OFA',
'./models/imagenet-OFA', # Path to initial weights
map_location='cpu'
)['state_dict']
dynamic_proxyless.load_weights_from_proxylessnas(proxylessnas_init)

# Training configuration
init_lr = 1e-3
run_config = ImagenetRunConfig(
test_batch_size=1000, image_size=224, n_worker=16, valid_size=5000, dataset='imagenet', train_batch_size=256,
init_lr=init_lr, n_epochs=30,
test_batch_size=1000,
image_size=224,
n_worker=16,
valid_size=5000,
dataset='imagenet',
train_batch_size=256,
init_lr=init_lr,
n_epochs=30,
)

# Initialize run manager
run_manager = RunManager('~/tmp', dynamic_proxyless, run_config, init=False)

# Load the best checkpoint
proxylessnas_init = torch.load(
ckpt_path,
map_location='cpu'
)['state_dict']
dynamic_proxyless.load_weights_from_proxylessnas(proxylessnas_init)

# Set active subnet and quantization policy
dynamic_proxyless.set_active_subnet(**info)
dynamic_proxyless.set_quantization_policy(**q_info)

# Validate the network and print accuracy
acc = run_manager.validate(is_test=True)
print('Accuracy: {:.1f}'.format(acc[1]))

else:
print(f"Checkpoint file not found: {ckpt_path}")