-
Notifications
You must be signed in to change notification settings - Fork 57
Closed
Labels
Description
I successfully trained a custom model using the sscma library and exported it into the vela.tflite format but once I upload it to groveAIV2 it fails when the invoke function is called (The error is 'MODEL' does not exist). Here is my config file timm_classify.py:
from mmengine.config import read_base
with read_base():
from .._base_.default_runtime import *
from .._base_.schedules.schedule_1x import *
from sscma.models import CrossEntropyLoss, Mixup, CutMix, ImageClassifier, MobileNetv2, GlobalAveragePooling, LinearClsHead
from sscma.infer import CustomInferencer
from sscma.datasets.transforms import (
Resize,
PackInputs,
LoadImageFromFile,
Pad,
HSVRandomAug,
RandomFlip,
Mosaic,
)
from sscma.datasets import ImageNet, ClsDataPreprocessor
from sscma.engine import DetVisualizationHook
from sscma.visualization import UniversalVisualizer
from mmengine.optim import LinearLR, MultiStepLR
from sscma.evaluation import Accuracy
from torch.nn import ReLU6, BCEWithLogitsLoss, ReLU
from torch.optim import Adam, SGD
dataset_type = ImageNet
data_root='dataset/simulation'
train_data = 'train'
val_data = 'val'
metainfo = {'classes' : ['left', 'right', 'forward']}
height = 640
width = 640
imgsz = (width, height)
downsample_factor = (8,)
# TRAIN
batch = 12
workers = 2
persistent_workers = True
widen_factor=0.35
val_batch = 16
val_workers = 2
lr = 0.02
epochs = 50
weight_decay = 0.0005
momentum = 0.95
default_hooks = dict(visualization=dict(type=DetVisualizationHook, score_thr=0.8))
visualizer = dict(type=UniversalVisualizer)
# model settings
data_preprocessor = dict(
type=ClsDataPreprocessor,
num_classes=3,
# RGB format normalization parameters
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
# convert image from BGR to RGB
to_rgb=True,
)
model = dict(
data_preprocessor=data_preprocessor,
type=ImageClassifier,
backbone=dict(type=MobileNetv2, gray_input=False, widen_factor=widen_factor, out_indices=(2,), rep=True),
neck=dict(type=GlobalAveragePooling),
head=dict(
type=LinearClsHead,
in_channels=32,
loss=dict(type=CrossEntropyLoss, loss_weight=1.0),
num_classes=3,
),
train_cfg=dict(
augments=[dict(type=Mixup, alpha=0.8), dict(type=CutMix, alpha=1.0)]
),
)
deploy = dict(
type=CustomInferencer,
)
imdecode_backend = "cv2"
pre_transform = [
dict(
type=LoadImageFromFile,
imdecode_backend=imdecode_backend
)
]
train_pipeline = [
*pre_transform,
dict(type=RandomFlip, prob=0.5),
dict(
type=PackInputs,
),
]
test_pipeline = [
*pre_transform,
dict(
type=PackInputs,
),
]
train_dataloader = dict(
batch_size=batch,
num_workers=workers,
persistent_workers=persistent_workers,
drop_last=False,
dataset=dict(
type=dataset_type,
data_root=data_root,
split=train_data,
pipeline=train_pipeline,
metainfo=metainfo,
),
)
val_dataloader = dict(
batch_size=val_batch,
num_workers=val_workers,
persistent_workers=persistent_workers,
drop_last=False,
dataset=dict(
type=dataset_type,
data_root=data_root,
split=val_data,
pipeline=test_pipeline,
metainfo=metainfo,
),
)
test_dataloader = val_dataloader
find_unused_parameters = True
optim_wrapper = dict(
optimizer=dict(
# type=Adam, lr=lr, betas=momentum, weight_decay=weight_decay, eps=1e-7
type=SGD,
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
),
)
# evaluator
val_evaluator = dict(type=Accuracy)
test_evaluator = val_evaluator
train_cfg = dict(by_epoch=True, max_epochs=epochs)
# learning policy
param_scheduler = [
dict(type=LinearLR, begin=0, end=30, start_factor=0.001, by_epoch=False), # warm-up
dict(
type=MultiStepLR,
begin=1,
end=50,
milestones=[15, 30, 45],
gamma=0.3,
by_epoch=True,
),
]Metadata
Metadata
Assignees
Labels
Type
Projects
Status
Done