Skip to content

Commit ae05ee3

Browse files
committed
Merge branch 'dev_STG_compress' into main
2 parents 896969e + ff1a564 commit ae05ee3

28 files changed

+8386
-16
lines changed

.github/workflows/publish.yml

Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
# This workflows will upload a Python Package using twine when a release is created
2+
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3+
4+
name: Build and Release Wheels
5+
6+
on:
7+
release:
8+
types: [created]
9+
10+
permissions:
11+
contents: write
12+
13+
jobs:
14+
15+
# Build the wheels using reusable_building.yml
16+
build_wheels:
17+
name: Call reusable building workflow
18+
uses: ./.github/workflows/building.yml
19+
20+
create_release_and_upload_packages:
21+
name: Uplodad to Github Release
22+
needs: [build_wheels]
23+
runs-on: ubuntu-latest
24+
strategy:
25+
fail-fast: false
26+
matrix:
27+
python-version: ['3.10']
28+
steps:
29+
30+
- name: Checkout code
31+
uses: actions/checkout@v3
32+
33+
- name: Download packages
34+
id: download_artifacts
35+
uses: actions/download-artifact@v3
36+
with:
37+
name: compiled_wheels_python${{ matrix.python-version }}
38+
path: dist
39+
40+
- name: Upload packages to GitHub Release
41+
id: upload_assets
42+
run: |
43+
for file in $(ls ./dist/*.*); do
44+
echo "Uploading $file..."
45+
filename=$(basename "$file")
46+
encoded_filename=$(echo "$filename" | sed 's/+/%2B/g')
47+
curl -X POST \
48+
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
49+
-H "Content-Type: application/zip" \
50+
--data-binary @"$file" \
51+
"${{ github.event.release.upload_url }}=$encoded_filename"
52+
done
53+
54+
generate_simple_index_pages:
55+
name: Generate Simple Index Pages
56+
needs: [create_release_and_upload_packages]
57+
runs-on: ubuntu-latest
58+
steps:
59+
60+
- name: Checkout code
61+
uses: actions/checkout@v3
62+
63+
- name: Generate Simple Index Pages
64+
run: python .github/workflows/generate_simple_index_pages.py --outdir ./whl
65+
env:
66+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
67+
68+
- name: Deploy to GitHub Pages
69+
uses: peaceiris/actions-gh-pages@v3
70+
with:
71+
github_token: ${{ secrets.GITHUB_TOKEN }}
72+
publish_dir: ./whl # Directory where the simple index pages are located
73+
destination_dir: whl # The 'wh' folder in the GitHub Pages root
74+
keep_files: false # This will only erase the destination subdirectory.
75+
cname: docs.gsplat.studio
76+
77+
upload_pypi:
78+
name: Upload to PyPi
79+
needs: [build_wheels]
80+
runs-on: ubuntu-latest
81+
environment: production
82+
steps:
83+
84+
- uses: actions/download-artifact@v3
85+
with:
86+
name: pypi_packages
87+
path: dist
88+
89+
- name: Set up Python
90+
uses: actions/setup-python@v4
91+
with:
92+
python-version: '3.7'
93+
94+
- name: Install dependencies
95+
run: |
96+
python -m pip install build twine
97+
shell: bash
98+
99+
# - name: Publish package to Test PyPI
100+
# uses: pypa/gh-action-pypi-publish@release/v1
101+
# with:
102+
# password: ${{ secrets.TEST_PYPI_API_TOKEN }}
103+
# repository-url: https://test.pypi.org/legacy/
104+
105+
- name: Publish package to PyPI
106+
env:
107+
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
108+
run: |
109+
twine upload --username __token__ --password $PYPI_TOKEN dist/*
110+
shell: bash

.gitignore

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,6 @@ target/
7777

7878
# Jupyter Notebook
7979
.ipynb_checkpoints
80-
.ipynb
8180
*.ipynb
8281

8382
# pyenv
@@ -127,9 +126,6 @@ data
127126
results
128127

129128
third_party
130-
# gscodec
131-
setup_gscodec.py
132-
Readme_GSCodec.md
133129

134130
figs
135131
stats
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# ----------------- Training Setting-------------- #
2+
SCENE_DIR="data/tandt"
3+
# eval all 9 scenes for benchmarking
4+
SCENE_LIST="train truck" # truck
5+
# SCENE_LIST="garden bicycle stump bonsai counter kitchen room treehill flowers"
6+
7+
# # 0.36M GSs
8+
# RESULT_DIR="results/benchmark_tt_mcmc_0_36M_png_compression"
9+
# CAP_MAX=360000
10+
11+
# # 0.49M GSs
12+
# RESULT_DIR="results/benchmark_tt_mcmc_tt_0_49M_png_compression"
13+
# CAP_MAX=490000
14+
15+
# 1M GSs
16+
RESULT_DIR="results/benchmark_tt_mcmc_1M_png_compression_pipe_wo_adamask"
17+
CAP_MAX=1000000
18+
19+
# # 4M GSs
20+
# RESULT_DIR="results/benchmark_tt_mcmc_4M_png_compression"
21+
# CAP_MAX=4000000
22+
23+
# ----------------- Training Setting-------------- #
24+
25+
26+
27+
# ----------------- Main Job --------------------- #
28+
run_single_scene() {
29+
local GPU_ID=$1
30+
local SCENE=$2
31+
32+
echo "Running $SCENE on GPU: $GPU_ID"
33+
34+
# train without eval
35+
# CUDA_VISIBLE_DEVICES=$GPU_ID python simple_trainer.py mcmc --eval_steps -1 --disable_viewer --data_factor 1 \
36+
# --strategy.cap-max $CAP_MAX \
37+
# --data_dir $SCENE_DIR/$SCENE/ \
38+
# --result_dir $RESULT_DIR/$SCENE/ \
39+
# --compression_sim \
40+
# --entropy_model_opt \
41+
# # --shN_ada_mask_opt
42+
# # --compression png
43+
44+
45+
# eval: use vgg for lpips to align with other benchmarks
46+
CUDA_VISIBLE_DEVICES=$GPU_ID python simple_trainer.py mcmc --disable_viewer --data_factor 1 \
47+
--strategy.cap-max $CAP_MAX \
48+
--data_dir $SCENE_DIR/$SCENE/ \
49+
--result_dir $RESULT_DIR/$SCENE/ \
50+
--lpips_net vgg \
51+
--compression png \
52+
--ckpt $RESULT_DIR/$SCENE/ckpts/ckpt_29999_rank0.pt
53+
54+
}
55+
# ----------------- Main Job --------------------- #
56+
57+
58+
59+
# ----------------- Experiment Loop -------------- #
60+
GPU_LIST=(4 5)
61+
GPU_COUNT=${#GPU_LIST[@]}
62+
63+
SCENE_IDX=-1
64+
65+
for SCENE in $SCENE_LIST;
66+
do
67+
SCENE_IDX=$((SCENE_IDX + 1))
68+
{
69+
run_single_scene ${GPU_LIST[$SCENE_IDX]} $SCENE
70+
} &
71+
72+
done
73+
74+
# ----------------- Experiment Loop -------------- #
75+
76+
# Wait for finishing the jobs across all scenes
77+
wait
78+
echo "All scenes finished."
79+
80+
# Zip the compressed files and summarize the stats
81+
if command -v zip &> /dev/null
82+
then
83+
echo "Zipping results"
84+
python benchmarks/compression/summarize_stats.py --results_dir $RESULT_DIR --scenes $SCENE_LIST
85+
else
86+
echo "zip command not found, skipping zipping"
87+
fi
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
import json
2+
import os
3+
import subprocess
4+
from collections import defaultdict
5+
from typing import List
6+
7+
from matplotlib.pyplot import step
8+
import numpy as np
9+
import torch
10+
import tyro
11+
12+
13+
def main(results_dir: str, scenes: List[str], num_frame: int):
14+
print("scenes:", scenes)
15+
stage = "compress"
16+
17+
start_frames = [f"{i}" for i in range(0, 300, num_frame)]
18+
19+
summary = defaultdict(list)
20+
for scene in scenes:
21+
22+
for start_frame in start_frames:
23+
24+
scene_dir = os.path.join(results_dir, scene, start_frame)
25+
26+
# if use best_step
27+
try:
28+
best_step = torch.load(os.path.join(scene_dir, f"ckpts/ckpt_best_rank0.pt"))["step"]
29+
except:
30+
best_step = 29999
31+
32+
if stage == "compress":
33+
zip_path = f"{scene_dir}/compression.zip"
34+
if os.path.exists(zip_path):
35+
subprocess.run(f"rm {zip_path}", shell=True)
36+
subprocess.run(f"zip -r {zip_path} {scene_dir}/compression/", shell=True)
37+
out = subprocess.run(
38+
f"stat -c%s {zip_path}", shell=True, capture_output=True
39+
)
40+
size = int(out.stdout)
41+
summary["size"].append(size)
42+
43+
bitrate = size * 8 / 1024**2 / num_frame * 30
44+
summary["bitrate"].append(bitrate)
45+
46+
MB_per_frame = size / 1024**2 / num_frame
47+
summary["MB_per_frame"].append(MB_per_frame)
48+
49+
with open(os.path.join(scene_dir, f"stats/{stage}_step{best_step}.json"), "r") as f:
50+
stats = json.load(f)
51+
for k, v in stats.items():
52+
summary[k].append(v)
53+
54+
stage = "val"
55+
for scene in scenes:
56+
57+
for start_frame in start_frames:
58+
59+
scene_dir = os.path.join(results_dir, scene, start_frame)
60+
61+
try:
62+
best_step = torch.load(os.path.join(scene_dir, f"ckpts/ckpt_best_rank0.pt"))["step"]
63+
except:
64+
best_step = 29999
65+
66+
with open(os.path.join(scene_dir, f"stats/{stage}_step{best_step}.json"), "r") as f:
67+
stats = json.load(f)
68+
for k, v in stats.items():
69+
if k in ['psnr', 'ssim', 'lpips']:
70+
summary['val_'+k].append(v)
71+
72+
73+
for k, v in summary.items():
74+
print(k, np.mean(v))
75+
76+
mean_summary = {k: np.mean(v) for k, v in summary.items()}
77+
78+
with open(f"{results_dir}/comp_summary.json", "w") as fp:
79+
json.dump(mean_summary, fp)
80+
81+
if __name__ == "__main__":
82+
tyro.cli(main)
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import json
2+
import os
3+
import subprocess
4+
from collections import defaultdict
5+
from typing import List
6+
7+
from matplotlib.pyplot import step
8+
import numpy as np
9+
import torch
10+
import tyro
11+
12+
13+
def main(results_dir: str, scenes: List[str], num_frame: int):
14+
print("scenes:", scenes)
15+
stage = "compress"
16+
17+
summary = defaultdict(list)
18+
for scene in scenes:
19+
scene_dir = os.path.join(results_dir, scene)
20+
21+
# if use best_step
22+
try:
23+
best_step = torch.load(os.path.join(scene_dir, f"ckpts/ckpt_best_rank0.pt"))["step"]
24+
except:
25+
best_step = 29999
26+
27+
if stage == "compress":
28+
zip_path = f"{scene_dir}/compression.zip"
29+
if os.path.exists(zip_path):
30+
subprocess.run(f"rm {zip_path}", shell=True)
31+
subprocess.run(f"zip -r {zip_path} {scene_dir}/compression/", shell=True)
32+
out = subprocess.run(
33+
f"stat -c%s {zip_path}", shell=True, capture_output=True
34+
)
35+
size = int(out.stdout)
36+
summary["size"].append(size)
37+
38+
bitrate = size * 8 / 1024**2 / num_frame * 30
39+
summary["bitrate"].append(bitrate)
40+
41+
MB_per_frame = size / 1024**2 / num_frame
42+
summary["MB_per_frame"].append(MB_per_frame)
43+
44+
with open(os.path.join(scene_dir, f"stats/{stage}_step{best_step}.json"), "r") as f:
45+
stats = json.load(f)
46+
for k, v in stats.items():
47+
summary[k].append(v)
48+
49+
stage = "val"
50+
for scene in scenes:
51+
scene_dir = os.path.join(results_dir, scene)
52+
53+
try:
54+
best_step = torch.load(os.path.join(scene_dir, f"ckpts/ckpt_best_rank0.pt"))["step"]
55+
except:
56+
best_step = 29999
57+
58+
with open(os.path.join(scene_dir, f"stats/{stage}_step{best_step}.json"), "r") as f:
59+
stats = json.load(f)
60+
for k, v in stats.items():
61+
if k in ['psnr', 'ssim', 'lpips']:
62+
summary['val_'+k].append(v)
63+
64+
65+
for k, v in summary.items():
66+
print(k, np.mean(v))
67+
68+
mean_summary = {k: np.mean(v) for k, v in summary.items()}
69+
70+
with open(f"{results_dir}/comp_summary.json", "w") as fp:
71+
json.dump(mean_summary, fp)
72+
73+
if __name__ == "__main__":
74+
tyro.cli(main)

0 commit comments

Comments
 (0)