Skip to content

Commit e1c6107

Browse files
2 parents 42f029c + c7422ca commit e1c6107

File tree

14 files changed

+260
-195
lines changed

14 files changed

+260
-195
lines changed

.github/workflows/doc_deploy.yml

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
name: Deploy
2+
on:
3+
push:
4+
branches:
5+
- main
6+
7+
jobs:
8+
deploy:
9+
runs-on: ubuntu-latest
10+
permissions:
11+
contents: write # To push a branch
12+
pages: write # To push to a GitHub Pages site
13+
id-token: write # To update the deployment status
14+
steps:
15+
- uses: actions/checkout@v4
16+
with:
17+
fetch-depth: 0
18+
- name: Install latest mdbook
19+
run: |
20+
tag=$(curl 'https://api.github.com/repos/rust-lang/mdbook/releases/latest' | jq -r '.tag_name')
21+
url="https://github.com/rust-lang/mdbook/releases/download/${tag}/mdbook-${tag}-x86_64-unknown-linux-gnu.tar.gz"
22+
mkdir mdbook
23+
curl -sSL $url | tar -xz --directory=./mdbook
24+
echo `pwd`/mdbook >> $GITHUB_PATH
25+
- name: Build Book
26+
run: |
27+
# This assumes your book is in the root of your repository.
28+
# Just add a `cd` here if you need to change to another directory.
29+
mdbook build
30+
- name: Setup Pages
31+
uses: actions/configure-pages@v4
32+
- name: Upload artifact
33+
uses: actions/upload-pages-artifact@v3
34+
with:
35+
# Upload entire repository
36+
path: 'book'
37+
- name: Deploy to GitHub Pages
38+
id: deployment
39+
uses: actions/deploy-pages@v4

book.toml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
# Documentation
2+
# * mdbook https://rust-lang.github.io/mdBook/
3+
# * template https://github.com/kg4zow/mdbook-template/
4+
5+
[book]
6+
authors = ["DD-Ranking Team"]
7+
language = "en"
8+
multilingual = false
9+
src = "doc"
10+
title = "DD-Ranking Benchmark"

configs/Demo_Hard_Label.yaml

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,18 @@
1+
2+
# real data
13
dataset: "CIFAR10"
24
real_data_path: "./dataset/"
5+
custom_val_trans: None
6+
7+
# synthetic data
38
ipc: 10
9+
im_size: (32, 32)
10+
11+
# agent model
412
model_name: "ConvNet-3"
13+
use_torchvision: False
14+
15+
# data augmentation
516
data_aug_func: "dsa"
617
aug_params: {
718
"prob_flip": 0.5,
@@ -13,16 +24,20 @@ aug_params: {
1324
"ratio_crop_pad": 0.125,
1425
"ratio_cutout": 0.5
1526
}
27+
use_zca: False
28+
29+
# training specifics
1630
optimizer: "sgd"
1731
lr_scheduler: "step"
1832
weight_decay: 0.0005
1933
momentum: 0.9
2034
num_eval: 5
21-
im_size: (32, 32)
2235
num_epochs: 1000
23-
use_zca: False
24-
batch_size: 256
36+
syn_batch_size: 128
37+
real_batch_size: 256
2538
default_lr: 0.01
2639
num_workers: 4
27-
save_path: "./results/my_method_hard_label_scores.csv"
28-
device: "cuda"
40+
device: "cuda"
41+
42+
# save path
43+
save_path: "./results/my_method_hard_label_scores.csv"

configs/Demo_Soft_Label.yaml

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,19 @@
1+
# real data
12
dataset: "CIFAR10"
23
real_data_path: "./dataset/"
4+
custom_val_trans: None
5+
6+
# synthetic data
37
ipc: 10
8+
im_size: (32, 32)
9+
10+
# agent model
411
model_name: "ConvNet-3"
12+
stu_use_torchvision: False
13+
tea_use_torchvision: False
14+
teacher_dir: "./teacher_models"
15+
16+
# data augmentation
517
data_aug_func: "dsa"
618
aug_params: {
719
"prob_flip": 0.5,
@@ -13,19 +25,25 @@ aug_params: {
1325
"ratio_crop_pad": 0.125,
1426
"ratio_cutout": 0.5
1527
}
28+
use_zca: True
29+
30+
# soft label settings
1631
soft_label_mode: "S"
1732
soft_label_criterion: "sce"
33+
temperature: 1.0
34+
35+
# training specifics
1836
optimizer: "sgd"
1937
lr_scheduler: "step"
20-
temperature: 20.0
2138
weight_decay: 0.0005
2239
momentum: 0.9
2340
num_eval: 5
24-
im_size: (32, 32)
2541
num_epochs: 1000
26-
use_zca: True
27-
batch_size: 256
2842
default_lr: 0.01
2943
num_workers: 4
30-
save_path: "./results/my_method_soft_label_scores.csv"
3144
device: "cuda"
45+
syn_batch_size: 128
46+
real_batch_size: 256
47+
48+
# save path
49+
save_path: "./results/my_method_soft_label_scores.csv"

dd_ranking/aug/cutmix.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,6 @@
55

66
class Cutmix_Augmentation:
77
def __init__(self, params: dict):
8-
# self.transform = kornia.augmentation.RandomCutMixV2(
9-
# num_mix = params["times"],
10-
# cut_size = params["size"],
11-
# same_on_batch = params["same_on_batch"],
12-
# beta = params["beta"],
13-
# keepdim = params["keep_dim"],
14-
# p = params["prob"]
15-
# )
16-
178
self.cutmix_p = params["cutmix_p"]
189

1910
def rand_bbox(self, size, lam):

dd_ranking/aug/mixup.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,6 @@
55

66
class Mixup_Augmentation:
77
def __init__(self, params: dict):
8-
# self.transform = kornia.augmentation.RandomMixUpV2(
9-
# lambda_val = params["lambda_range"],
10-
# same_on_batch = params["same_on_batch"],
11-
# keepdim = params["keepdim"],
12-
# p = params["prob"]
13-
# )
14-
158
self.mixup_p = params["mixup_p"]
169

1710
def mixup(self, images):

dd_ranking/loss/kl.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,16 @@
11
import torch
22
import torch.nn as nn
33
import torch.nn.functional as F
4-
from torch.nn import KLDivLoss
54

65

76
class KLDivergenceLoss(nn.Module):
87
def __init__(self, temperature=1.2):
98
super(KLDivergenceLoss, self).__init__()
109
self.temperature = temperature
11-
self.kl = KLDivLoss(reduction='batchmean')
1210

1311
def forward(self, stu_outputs, tea_outputs):
1412
stu_probs = F.log_softmax(stu_outputs / self.temperature, dim=1)
15-
tea_probs = F.softmax(tea_outputs / self.temperature, dim=1)
16-
loss = self.kl(stu_probs, tea_probs)
13+
with torch.no_grad():
14+
tea_probs = F.softmax(tea_outputs / self.temperature, dim=1)
15+
loss = F.kl_div(stu_probs, tea_probs, reduction='batchmean')
1716
return loss

dd_ranking/loss/sce.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,13 @@
44

55

66
class SoftCrossEntropyLoss(nn.Module):
7-
def __init__(self):
7+
def __init__(self, temperature=1.2):
88
super(SoftCrossEntropyLoss, self).__init__()
9+
self.temperature = temperature
910

1011
def forward(self, stu_outputs, tea_outputs):
11-
input_log_likelihood = -F.log_softmax(stu_outputs, dim=1)
12-
target_log_likelihood = F.softmax(tea_outputs, dim=1)
12+
input_log_likelihood = -F.log_softmax(stu_outputs / self.temperature, dim=1)
13+
target_log_likelihood = F.softmax(tea_outputs / self.temperature, dim=1)
1314
batch_size = stu_outputs.size(0)
1415
loss = torch.sum(torch.mul(input_log_likelihood, target_log_likelihood)) / batch_size
1516
return loss

0 commit comments

Comments
 (0)