Skip to content

Commit

Permalink
new file: projects/mmdet3d_plugin/Actformer/__init__.py
Browse files Browse the repository at this point in the history
	new file:   projects/mmdet3d_plugin/Actformer/apis/__init__.py
	new file:   projects/mmdet3d_plugin/Actformer/apis/mmdet_train.py
	new file:   projects/mmdet3d_plugin/Actformer/apis/test.py
	new file:   projects/mmdet3d_plugin/Actformer/apis/train.py
	new file:   projects/mmdet3d_plugin/Actformer/dense_heads/__init__.py
	new file:   projects/mmdet3d_plugin/Actformer/dense_heads/bevcmc_head.py
	new file:   projects/mmdet3d_plugin/Actformer/dense_heads/bevformer_head.py
	new file:   projects/mmdet3d_plugin/Actformer/dense_heads/bevformer_seg_head.py
	new file:   projects/mmdet3d_plugin/Actformer/detectors/__init__.py
	new file:   projects/mmdet3d_plugin/Actformer/detectors/bevcmc.py
	new file:   projects/mmdet3d_plugin/Actformer/detectors/bevformer.py
	new file:   projects/mmdet3d_plugin/Actformer/detectors/bevformer_fp16.py
	new file:   projects/mmdet3d_plugin/Actformer/hooks/__init__.py
	new file:   projects/mmdet3d_plugin/Actformer/hooks/custom_hooks.py
	new file:   projects/mmdet3d_plugin/Actformer/modules/__init__.py	new file:   projects/mmdet3d_plugin/Actformer/modules/custom_base_transformer_layer.py
	new file:   projects/mmdet3d_plugin/Actformer/modules/decoder.py
	new file:   projects/mmdet3d_plugin/Actformer/modules/encoder.py
	new file:   projects/mmdet3d_plugin/Actformer/modules/multi_scale_deformable_attn_function.py
	new file:   projects/mmdet3d_plugin/Actformer/modules/spatial_cross_attention.py
	new file:   projects/mmdet3d_plugin/Actformer/modules/temporal_self_attention.py
	new file:   projects/mmdet3d_plugin/Actformer/modules/transformer.py
	new file:   projects/mmdet3d_plugin/Actformer/runner/__init__.py
	new file:   projects/mmdet3d_plugin/Actformer/runner/epoch_based_runner.py
	new file:   projects/mmdet3d_plugin/__init__.py
	new file:   projects/mmdet3d_plugin/core/bbox/assigners/__init__.py
	new file:   projects/mmdet3d_plugin/core/bbox/assigners/hungarian_assigner_3d.py
	new file:   projects/mmdet3d_plugin/core/bbox/coders/__init__.py
	new file:   projects/mmdet3d_plugin/core/bbox/coders/nms_free_coder.py
	new file:   projects/mmdet3d_plugin/core/bbox/match_costs/__init__.py
	new file:   projects/mmdet3d_plugin/core/bbox/match_costs/match_cost.py
	new file:   projects/mmdet3d_plugin/core/bbox/util.py
	new file:   projects/mmdet3d_plugin/core/evaluation/__init__.py
	new file:   projects/mmdet3d_plugin/core/evaluation/eval_hooks.py
	new file:   projects/mmdet3d_plugin/core/evaluation/kitti2waymo.py
	new file:   projects/mmdet3d_plugin/datasets/__init__.py
	new file:   projects/mmdet3d_plugin/datasets/builder.py
	new file:   projects/mmdet3d_plugin/datasets/nuscenes_dataset.py
	new file:   projects/mmdet3d_plugin/datasets/nuscenes_eval.py
	new file:   projects/mmdet3d_plugin/datasets/nuscenes_mono_dataset.py
	new file:   projects/mmdet3d_plugin/datasets/pipelines/__init__.py
	new file:   projects/mmdet3d_plugin/datasets/pipelines/formating.py
	new file:   projects/mmdet3d_plugin/datasets/pipelines/loading.py
	new file:   projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py
	new file:   projects/mmdet3d_plugin/datasets/samplers/__init__.py
	new file:   projects/mmdet3d_plugin/datasets/samplers/distributed_sampler.py
	new file:   projects/mmdet3d_plugin/datasets/samplers/group_sampler.py
	new file:   projects/mmdet3d_plugin/datasets/samplers/sampler.py
	new file:   projects/mmdet3d_plugin/datasets/v2x_sim_dataset.py
	new file:   projects/mmdet3d_plugin/datasets/v2x_sim_eval.py
	new file:   projects/mmdet3d_plugin/models/backbones/__init__.py
	new file:   projects/mmdet3d_plugin/models/backbones/vovnet.py
	new file:   projects/mmdet3d_plugin/models/hooks/__init__.py
	new file:   projects/mmdet3d_plugin/models/hooks/hooks.py
	new file:   projects/mmdet3d_plugin/models/opt/__init__.py
	new file:   projects/mmdet3d_plugin/models/opt/adamw.py
	new file:   projects/mmdet3d_plugin/models/utils/__init__.py
	new file:   projects/mmdet3d_plugin/models/utils/bricks.py
	new file:   projects/mmdet3d_plugin/models/utils/grid_mask.py
	new file:   projects/mmdet3d_plugin/models/utils/position_embedding.py
	new file:   projects/mmdet3d_plugin/models/utils/visual.py
  • Loading branch information
hsz0403 committed Mar 31, 2024
1 parent 6c8c96c commit 720b643
Show file tree
Hide file tree
Showing 62 changed files with 10,530 additions and 0 deletions.
6 changes: 6 additions & 0 deletions projects/mmdet3d_plugin/Actformer/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@

from .dense_heads import *
from .detectors import *
from .modules import *
from .runner import *
from .hooks import *
3 changes: 3 additions & 0 deletions projects/mmdet3d_plugin/Actformer/apis/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .train import custom_train_model
from .mmdet_train import custom_train_detector
# from .test import custom_multi_gpu_test
200 changes: 200 additions & 0 deletions projects/mmdet3d_plugin/Actformer/apis/mmdet_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
# ---------------------------------------------
# Copyright (c) OpenMMLab. All rights reserved.
# ---------------------------------------------
# Modified by Zhiqi Li
# ---------------------------------------------
import random
import warnings

import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
Fp16OptimizerHook, OptimizerHook, build_optimizer,
build_runner, get_dist_info)
from mmcv.utils import build_from_cfg

from mmdet.core import EvalHook

from mmdet.datasets import (build_dataset,
replace_ImageToTensor)
from mmdet.utils import get_root_logger
import time
import os.path as osp
from projects.mmdet3d_plugin.datasets.builder import build_dataloader
from projects.mmdet3d_plugin.core.evaluation.eval_hooks import CustomDistEvalHook
from projects.mmdet3d_plugin.datasets import custom_build_dataset
def custom_train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
eval_model=None,
meta=None):
logger = get_root_logger(cfg.log_level)

# prepare data loaders

dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
#assert len(dataset)==1s
if 'imgs_per_gpu' in cfg.data:
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead')
if 'samples_per_gpu' in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f'={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f'{cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu

data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'),
nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'),
) for ds in dataset
]

# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
if eval_model is not None:
eval_model = MMDistributedDataParallel(
eval_model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
if eval_model is not None:
eval_model = MMDataParallel(
eval_model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)


# build runner
optimizer = build_optimizer(model, cfg.optimizer)

if 'runner' not in cfg:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
else:
if 'total_epochs' in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
if eval_model is not None:
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
eval_model=eval_model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
else:
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))

# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp

# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config

# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))

# register profiler hook
#trace_config = dict(type='tb_trace', dir_name='work_dir')
#profiler_config = dict(on_trace_ready=trace_config)
#runner.register_profiler_hook(profiler_config)

if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())

# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if val_samples_per_gpu > 1:
assert False
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(
cfg.data.val.pipeline)
val_dataset = custom_build_dataset(cfg.data.val, dict(test_mode=True))

val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'),
nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'),
)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_cfg['jsonfile_prefix'] = osp.join('val', cfg.work_dir, time.ctime().replace(' ','_').replace(':','_'))
eval_hook = CustomDistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))

# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)

if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)

164 changes: 164 additions & 0 deletions projects/mmdet3d_plugin/Actformer/apis/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
# ---------------------------------------------
# Copyright (c) OpenMMLab. All rights reserved.
# ---------------------------------------------
# Modified by Zhiqi Li
# ---------------------------------------------
import os.path as osp
import pickle
import shutil
import tempfile
import time

import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info

from mmdet.core import encode_mask_results


import mmcv
import numpy as np
import pycocotools.mask as mask_util

def custom_encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code. Semantic Masks only
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = []
for i in range(len(cls_segms)):
encoded_mask_results.append(
mask_util.encode(
np.array(
cls_segms[i][:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
return [encoded_mask_results]

def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
bbox_results = []
mask_results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
have_mask = False
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result, dict):
if 'bbox_results' in result.keys():
bbox_result = result['bbox_results']
batch_size = len(result['bbox_results'])
bbox_results.extend(bbox_result)
if 'mask_results' in result.keys() and result['mask_results'] is not None:
mask_result = custom_encode_mask_results(result['mask_results'])
mask_results.extend(mask_result)
have_mask = True
else:
batch_size = len(result)
bbox_results.extend(result)

#if isinstance(result[0], tuple):
# assert False, 'this code is for instance segmentation, which our code will not utilize.'
# result = [(bbox_results, encode_mask_results(mask_results))
# for bbox_results, mask_results in result]
if rank == 0:

for _ in range(batch_size * world_size):
prog_bar.update()

# collect results from all ranks
if gpu_collect:
bbox_results = collect_results_gpu(bbox_results, len(dataset))
if have_mask:
mask_results = collect_results_gpu(mask_results, len(dataset))
else:
mask_results = None
else:
bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir)
tmpdir = tmpdir+'_mask' if tmpdir is not None else None
if have_mask:
mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir)
else:
mask_results = None

if mask_results is None:
return bbox_results
return {'bbox_results': bbox_results, 'mask_results': mask_results}


def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
'''
bacause we change the sample of the evaluation stage to make sure that each gpu will handle continuous sample,
'''
#for res in zip(*part_list):
for res in part_list:
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results


def collect_results_gpu(result_part, size):
collect_results_cpu(result_part, size)
Loading

0 comments on commit 720b643

Please sign in to comment.