Skip to content
Open
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion conf/dataset/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ cfg:
batch_size: ${training.batch_size}
num_workers: ${training.num_workers}
dataroot: data
conv_type: ${model.model.conv_type}

common_transform:
aug_transform:
Expand All @@ -13,4 +14,4 @@ cfg:
test_transform: "${dataset.cfg.val_transform}"
train_transform:
- "${dataset.cfg.aug_transform}"
- "${dataset.cfg.common_transform}"
- "${dataset.cfg.common_transform}"
1 change: 1 addition & 0 deletions conf/model/segmentation/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@ model:
backbone:
input_nc: ${dataset.cfg.feature_dimension}
architecture: unet
conv_type: null
85 changes: 85 additions & 0 deletions conf/model/segmentation/kpconv/KPFCNN.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# @package model
defaults:
- /model/segmentation/default

model:
conv_type: "PARTIAL_DENSE"
backbone:
_target_: torch_points3d.applications.kpconv.KPConv
config:
define_constants:
in_grid_size: 0.02
in_feat: 64
bn_momentum: 0.2
max_neighbors: 25
down_conv:
down_conv_nn:
[
[[FEAT + 1, in_feat], [in_feat, 2*in_feat]],
[[2*in_feat, 2*in_feat], [2*in_feat, 4*in_feat]],
[[4*in_feat, 4*in_feat], [4*in_feat, 8*in_feat]],
[[8*in_feat, 8*in_feat], [8*in_feat, 16*in_feat]],
[[16*in_feat, 16*in_feat], [16*in_feat, 32*in_feat]],
]
grid_size:
[
[in_grid_size, in_grid_size],
[2*in_grid_size, 2*in_grid_size],
[4*in_grid_size, 4*in_grid_size],
[8*in_grid_size, 8*in_grid_size],
[16*in_grid_size, 16*in_grid_size],
]
prev_grid_size:
[
[in_grid_size, in_grid_size],
[in_grid_size, 2*in_grid_size],
[2*in_grid_size, 4*in_grid_size],
[4*in_grid_size, 8*in_grid_size],
[8*in_grid_size, 16*in_grid_size],
]
block_names:
[
["SimpleBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
["ResnetBBlock", "ResnetBBlock"],
]
has_bottleneck:
[
[False, True],
[True, True],
[True, True],
[True, True],
[True, True],
]
deformable:
[
[False, False],
[False, False],
[False, False],
[False, False],
[False, False],
]
max_num_neighbors:
[[max_neighbors,max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors], [max_neighbors, max_neighbors]]
module_name: KPDualBlock
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Eventually we can redo this to use hydra instantiation for blocks and remove some of the model factory reflection code

up_conv:
module_name: FPModule_PD
up_conv_nn:
[
[32*in_feat + 16*in_feat, 8*in_feat],
[8*in_feat + 8*in_feat, 4*in_feat],
[4*in_feat + 4*in_feat, 2*in_feat],
[2*in_feat + 2*in_feat, in_feat],
]
skip: True
up_k: [1,1,1,1]
bn_momentum:
[
bn_momentum,
bn_momentum,
bn_momentum,
bn_momentum,
bn_momentum,
]
40 changes: 40 additions & 0 deletions conf/model/segmentation/pointnet2/pointnet2_largemsg.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# @package model
defaults:
- /model/segmentation/default

model:
conv_type: "DENSE"
backbone:
_target_: torch_points3d.applications.pointnet2.PointNet2
config:
down_conv:
module_name: PointNetMSGDown
npoint: [1024, 256, 64, 16]
radii: [[0.05, 0.1], [0.1, 0.2], [0.2, 0.4], [0.4, 0.8]]
nsamples: [[16, 32], [16, 32], [16, 32], [16, 32]]
down_conv_nn:
[
[[FEAT+3, 16, 16, 32], [FEAT+3, 32, 32, 64]],
[[32 + 64+3, 64, 64, 128], [32 + 64+3, 64, 96, 128]],
[
[128 + 128+3, 128, 196, 256],
[128 + 128+3, 128, 196, 256],
],
[
[256 + 256+3, 256, 256, 512],
[256 + 256+3, 256, 384, 512],
],
]
up_conv:
module_name: DenseFPModule
up_conv_nn:
[
[512 + 512 + 256 + 256, 512, 512],
[512 + 128 + 128, 512, 512],
[512 + 64 + 32, 256, 256],
[256 + FEAT, 128, 128],
]
skip: True
mlp_cls:
nn: [128, 128]
dropout: 0.5
4 changes: 2 additions & 2 deletions conf/model/segmentation/sparseconv3d/Res16UNet34.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
defaults:
- /model/segmentation/ResUNet32

model:
model:
backbone:
down_conv:
N: [ 0, 2, 3, 4, 6 ]
up_conv:
N: [ 1, 1, 1, 1, 1 ]
N: [ 1, 1, 1, 1, 1 ]
4 changes: 2 additions & 2 deletions conf/model/segmentation/sparseconv3d/ResUNet32.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
defaults:
- /model/segmentation/default

model:
model:
conv_type: "SPARSE"
backbone:
_target_: torch_points3d.applications.sparseconv3d.SparseConv3d
backend: torchsparse

config:
define_constants:
in_feat: 32
Expand Down
1 change: 1 addition & 0 deletions test/.#test_hydra.py
52 changes: 52 additions & 0 deletions test/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from typing import List
import os
import os.path as osp
import pytest

from hydra import compose, initialize
from hydra.test_utils.test_utils import find_parent_dir_containing

from torch_points3d.trainer import LitTrainer
from torch_points3d.core.instantiator import HydraInstantiator


class ScriptRunner:

@staticmethod
def find_hydra_conf_dir(config_dir: str = "conf") -> str:
"""
Util function to find the hydra config directory from the main repository for testing.
Args:
config_dir: Name of config directory.
Returns: Relative config path
"""
parent_dir = find_parent_dir_containing(config_dir)
relative_conf_dir = osp.relpath(parent_dir, os.path.dirname(__file__))
return osp.join(relative_conf_dir, config_dir)

def train(self, cmd_args: List[str]) -> None:
relative_conf_dir = self.find_hydra_conf_dir()
with initialize(config_path=relative_conf_dir, job_name="test_app"):
cfg = compose(config_name="config", overrides=cmd_args)
instantiator = HydraInstantiator()
trainer = LitTrainer(
instantiator,
dataset=cfg.get("dataset"),
trainer=cfg.get("trainer"),
model=cfg.get("model"))
trainer.train()

def hf_train(self, dataset: str, model: str, num_workers: int = 0, fast_dev_run: int = 1):
cmd_args = []
cmd_args.extend([
f'model={model}',
f'dataset={dataset}',
f'trainer.max_epochs=1',
f'training.num_workers=1'
])
self.train(cmd_args)


@pytest.fixture(scope="session")
def script_runner() -> ScriptRunner:
return ScriptRunner()
26 changes: 26 additions & 0 deletions test/test_data/test_batch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import unittest
import torch
from torch_geometric.data import Data
import numpy as np

import os
import sys

ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..")
sys.path.append(ROOT)

from torch_points3d.data.batch import SimpleBatch



def test_fromlist():
nb_points = 100
pos = torch.randn((nb_points, 3))
y = torch.tensor([range(10) for i in range(pos.shape[0])], dtype=torch.float)
d = Data(pos=pos, y=y)
b = SimpleBatch.from_data_list([d, d])
np.testing.assert_equal(b.pos.size(), (2, 100, 3))
np.testing.assert_equal(b.y.size(), (2, 100, 10))



46 changes: 46 additions & 0 deletions test/test_data/test_msdata.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import unittest
import torch
import torch.testing as tt
import numpy as np
from torch_geometric.data import Data


from torch_points3d.data.multiscale_data import MultiScaleBatch, MultiScaleData



def test_apply():
x = torch.tensor([1])
pos = torch.tensor([1])
d1 = Data(x=2 * x, pos=2 * pos)
d2 = Data(x=3 * x, pos=3 * pos)
data = MultiScaleData(x=x, pos=pos, multiscale=[d1, d2])
data.apply(lambda x: 2 * x)
np.testing.assert_equal(data.x[0].item(), 2)
np.testing.assert_equal(data.pos[0].item(), 2)
np.testing.assert_equal(data.multiscale[0].pos[0].item(), 4)
np.testing.assert_equal(data.multiscale[0].x[0].item(), 4)
np.testing.assert_equal(data.multiscale[1].pos[0].item(), 6)
np.testing.assert_equal(data.multiscale[1].x[0].item(), 6)

def test_batch():
x = torch.tensor([1])
pos = x
d1 = Data(x=x, pos=pos)
d2 = Data(x=4 * x, pos=4 * pos)
data1 = MultiScaleData(x=x, pos=pos, multiscale=[d1, d2])

x = torch.tensor([2])
pos = x
d1 = Data(x=x, pos=pos)
d2 = Data(x=4 * x, pos=4 * pos)
data2 = MultiScaleData(x=x, pos=pos, multiscale=[d1, d2])

batch = MultiScaleBatch.from_data_list([data1, data2])
tt.assert_allclose(batch.x, torch.tensor([1, 2]))
tt.assert_allclose(batch.batch, torch.tensor([0, 1]))

ms_batches = batch.multiscale
tt.assert_allclose(ms_batches[0].batch, torch.tensor([0, 1]))
tt.assert_allclose(ms_batches[1].batch, torch.tensor([0, 1]))
tt.assert_allclose(ms_batches[1].x, torch.tensor([4, 8]))
Loading