Skip to content

Commit 158d9aa

Browse files
committed
first commit
0 parents  commit 158d9aa

File tree

1,490 files changed

+70440
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,490 files changed

+70440
-0
lines changed

README.md

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
## OverView
2+
![overview](imgs\overview.jpg "OverView")
3+
## Result
4+
![](imgs\result.png)
5+
## 环境
6+
- mmcls>=1.0.0rc0
7+
- mmcv>=2.0.0rc1,<2.1.0
8+
- mmengine>=0.1.0,<1.0.0
9+
- pytorch >= 1.8
10+
## 训练
11+
- 指定tools/lane_with_angle_train.py中的config文件
12+
- python tools/lane_with_angle_train.py
13+

imgs/overview.jpg

57.5 KB
Loading

imgs/result.png

565 KB
Loading
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
# ==========================================================================
2+
# Model
3+
# ==========================================================================
4+
num_head_list = [1, 1, 1]
5+
channels_list = [128, 256, 512]
6+
# channels_list = [512]
7+
backbone_stage_index = [1, 2, 3]
8+
# backbone_stage_index = [3]
9+
backbone_hw_list = [[60, 80], [30, 40], [15, 20]]
10+
norm_cfg = dict(type='BN', requires_grad=True)
11+
data_preprocessor = dict(
12+
type='SegDataPreProcessor',
13+
mean=[123.675, 116.28, 103.53],
14+
std=[58.395, 57.12, 57.375],
15+
bgr_to_rgb=True,
16+
pad_val=0,
17+
seg_pad_val=255,
18+
size = (480, 640))
19+
model = dict(
20+
type='Angle_EncoderDecoder',
21+
data_preprocessor=data_preprocessor,
22+
pretrained=None,
23+
# HACK:Whether to automatically balance multiple losses, only for with_angle_loss
24+
# parse_loss=dict(type='ParseLoss', num_losses=2),
25+
backbone=dict(
26+
type='ResNet',
27+
depth=18,
28+
num_stages=4,
29+
out_indices=(0, 1, 2, 3),
30+
strides=(1, 2, 2, 2),
31+
norm_cfg=norm_cfg,
32+
style='pytorch'),
33+
# HACK:whether with angle fusion
34+
# neck=dict(
35+
# type='Angle_Fusion_Neck',
36+
# hw_list=backbone_hw_list,
37+
# channels_list = channels_list,
38+
# step_list=[8, 4, 2, 1],
39+
# index_list=backbone_stage_index,
40+
# num_head_list=num_head_list),
41+
decode_head=dict(
42+
type='Angle_DAHead',
43+
pam_channels=128,
44+
in_channels=channels_list,
45+
in_index=backbone_stage_index,
46+
channels=256,
47+
dropout_ratio=0.1,
48+
num_classes=2,
49+
norm_cfg=norm_cfg,
50+
upsample_scale_factor_list=[2, 2, 2],
51+
# HACK:whether with angle loss
52+
#with_angle_loss=True,
53+
#num_pre_angle_layers=4,
54+
align_corners=False,
55+
input_transform='resize_concat',
56+
loss_decode=dict(
57+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, class_weight=[0.4, 1])),
58+
test_cfg=dict(mode='whole'))
59+
60+
61+
# ==========================================================================
62+
# Dataset
63+
# ==========================================================================
64+
data_root = '/root/autodl-tmp/MyDataWithAngle_all'
65+
crop_size = (480, 640)
66+
dataset_type = 'Laneimg_Withangle'
67+
train_pipeline = [
68+
dict(type='AngleLoadImageFromFile'),
69+
dict(type='AngleLoadAnnotations'),
70+
dict(
71+
type='RandomResize',
72+
scale=(640, 480),
73+
ratio_range=(0.5, 2.0),
74+
keep_ratio=True),
75+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
76+
dict(type='RandomFlip', prob=0.5),
77+
dict(type='PhotoMetricDistortion'),
78+
dict(type='AnglePackSegInputs')
79+
]
80+
test_pipeline = [
81+
dict(type='AngleLoadImageFromFile'),
82+
dict(type='Resize', scale=(480, 640), keep_ratio=True),
83+
dict(type='AngleLoadAnnotations'),
84+
dict(type='AnglePackSegInputs')
85+
]
86+
train_dataloader = dict(
87+
batch_size=4,
88+
num_workers=8,
89+
persistent_workers=True,
90+
sampler=dict(type='InfiniteSampler', shuffle=True),
91+
dataset=dict(
92+
type=dataset_type,
93+
ann_file='train.txt',
94+
data_root=data_root,
95+
data_prefix=dict(img_path='data/img', seg_map_path='data/label'),
96+
metainfo=dict(
97+
classes=('background', 'lane'),
98+
palette=[[0, 0, 0], [255, 255, 255]]),
99+
pipeline=train_pipeline))
100+
val_dataloader = dict(
101+
batch_size=1,
102+
num_workers=8,
103+
persistent_workers=True,
104+
sampler=dict(type='DefaultSampler', shuffle=False),
105+
dataset=dict(
106+
type=dataset_type,
107+
ann_file='val.txt',
108+
data_root=data_root,
109+
data_prefix=dict(img_path='data/img', seg_map_path='data/label'),
110+
metainfo=dict(
111+
classes=('background', 'lane'),
112+
palette=[[0, 0, 0], [255, 255, 255]]),
113+
pipeline=test_pipeline))
114+
test_dataloader = dict(
115+
batch_size=1,
116+
num_workers=8,
117+
persistent_workers=True,
118+
sampler=dict(type='DefaultSampler', shuffle=False),
119+
dataset=dict(
120+
type=dataset_type,
121+
ann_file='val.txt',
122+
data_root=data_root,
123+
data_prefix=dict(img_path='data/img', seg_map_path='data/label'),
124+
metainfo=dict(
125+
classes=('background', 'lane'),
126+
palette=[[0, 0, 0], [255, 255, 255]]),
127+
pipeline=test_pipeline))
128+
129+
val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU', 'mFscore'])
130+
test_evaluator = val_evaluator
131+
132+
# ==========================================================================
133+
# Default runtime
134+
# ==========================================================================
135+
default_scope = 'mmseg'
136+
env_cfg = dict(
137+
cudnn_benchmark=True,
138+
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
139+
dist_cfg=dict(backend='nccl'),
140+
)
141+
vis_backends = [dict(type='TensorboardVisBackend')]
142+
visualizer = dict(
143+
type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer')
144+
log_processor = dict(type='LogProcessor', by_epoch=False)
145+
log_level = 'INFO'
146+
load_from = None
147+
resume = None
148+
149+
# ==========================================================================
150+
# Shedules
151+
# ==========================================================================
152+
randomness = dict(seed=42)
153+
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
154+
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None)
155+
param_scheduler = [
156+
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
157+
dict(
158+
type='PolyLR',
159+
eta_min=1e-4,
160+
power=0.9,
161+
begin=1000,
162+
end=600000,
163+
by_epoch=False)
164+
]
165+
train_cfg = dict(type='IterBasedTrainLoop', max_iters=600000, val_interval=10000)
166+
val_cfg = dict(type='ValLoop')
167+
test_cfg = dict(type='TestLoop')
168+
default_hooks = dict(
169+
timer=dict(type='IterTimerHook'),
170+
logger=dict(type='LoggerHook', interval=200, log_metric_by_epoch=False),
171+
param_scheduler=dict(type='ParamSchedulerHook'),
172+
checkpoint=dict(type='CheckpointHook', by_epoch=False, save_best='mIoU'),
173+
sampler_seed=dict(type='DistSamplerSeedHook'),
174+
visualization=dict(type='SegVisualizationHook', draw=True, show=False, interval=400))
175+
launcher = 'none'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
norm_cfg = dict(type='SyncBN', requires_grad=True)
2+
3+
model = dict(
4+
type='EncoderDecoder',
5+
data_preprocessor=dict(
6+
type='SegDataPreProcessor',
7+
mean=[123.675, 116.28, 103.53],
8+
std=[58.395, 57.12, 57.375],
9+
bgr_to_rgb=True,
10+
pad_val=0,
11+
seg_pad_val=255,
12+
size=(480, 640)),
13+
pretrained='open-mmlab://resnet50_v1c',
14+
backbone=dict(
15+
type='ResNetV1c',
16+
depth=50,
17+
num_stages=4,
18+
out_indices=(0, 1, 2, 3),
19+
dilations=(1, 1, 2, 4),
20+
strides=(1, 2, 1, 1),
21+
norm_cfg=dict(type='SyncBN', requires_grad=True),
22+
norm_eval=False,
23+
style='pytorch',
24+
contract_dilation=True),
25+
decode_head=dict(
26+
type='DAHead',
27+
in_channels=2048,
28+
in_index=3,
29+
channels=512,
30+
pam_channels=64,
31+
dropout_ratio=0.1,
32+
num_classes=2,
33+
norm_cfg=dict(type='SyncBN', requires_grad=True),
34+
align_corners=False,
35+
loss_decode=dict(
36+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
37+
auxiliary_head=dict(
38+
type='FCNHead',
39+
in_channels=1024,
40+
in_index=2,
41+
channels=256,
42+
num_convs=1,
43+
concat_input=False,
44+
dropout_ratio=0.1,
45+
num_classes=2,
46+
norm_cfg=dict(type='SyncBN', requires_grad=True),
47+
align_corners=False,
48+
loss_decode=dict(
49+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
50+
train_cfg=dict(),
51+
test_cfg=dict(mode='whole')
52+
)
53+
54+
55+
56+
# ==========================================================================
57+
# Dataset
58+
# ==========================================================================
59+
data_root = '/root/autodl-tmp/MyDataWithAngle_all'
60+
crop_size = (480, 640)
61+
dataset_type = 'Laneimg_Withangle'
62+
train_pipeline = [
63+
dict(type='AngleLoadImageFromFile'),
64+
dict(type='AngleLoadAnnotations'),
65+
# dict(
66+
# type='RandomResize',
67+
# scale=(640, 480),
68+
# ratio_range=(0.5, 2.0),
69+
# keep_ratio=True),
70+
# dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
71+
# dict(type='RandomFlip', prob=0.5),
72+
# dict(type='PhotoMetricDistortion'),
73+
dict(type='AnglePackSegInputs')
74+
]
75+
test_pipeline = [
76+
dict(type='AngleLoadImageFromFile'),
77+
# dict(type='Resize', scale=(480, 640), keep_ratio=True),
78+
dict(type='AngleLoadAnnotations'),
79+
dict(type='AnglePackSegInputs')
80+
]
81+
train_dataloader = dict(
82+
batch_size=4,
83+
num_workers=8,
84+
persistent_workers=True,
85+
sampler=dict(type='InfiniteSampler', shuffle=True),
86+
dataset=dict(
87+
type=dataset_type,
88+
ann_file='train.txt',
89+
data_root=data_root,
90+
data_prefix=dict(img_path='data/img', seg_map_path='data/label'),
91+
metainfo=dict(
92+
classes=('background', 'lane'),
93+
palette=[[0, 0, 0], [255, 255, 255]]),
94+
pipeline=train_pipeline))
95+
val_dataloader = dict(
96+
batch_size=1,
97+
num_workers=8,
98+
persistent_workers=True,
99+
sampler=dict(type='DefaultSampler', shuffle=False),
100+
dataset=dict(
101+
type=dataset_type,
102+
ann_file='val.txt',
103+
data_root=data_root,
104+
data_prefix=dict(img_path='data/img', seg_map_path='data/label'),
105+
metainfo=dict(
106+
classes=('background', 'lane'),
107+
palette=[[0, 0, 0], [255, 255, 255]]),
108+
pipeline=test_pipeline))
109+
test_dataloader = dict(
110+
batch_size=1,
111+
num_workers=8,
112+
persistent_workers=True,
113+
sampler=dict(type='DefaultSampler', shuffle=False),
114+
dataset=dict(
115+
type=dataset_type,
116+
ann_file='val.txt',
117+
data_root=data_root,
118+
data_prefix=dict(img_path='data/img', seg_map_path='data/label'),
119+
metainfo=dict(
120+
classes=('background', 'lane'),
121+
palette=[[0, 0, 0], [255, 255, 255]]),
122+
pipeline=test_pipeline))
123+
124+
val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU', 'mFscore'])
125+
test_evaluator = val_evaluator
126+
127+
# ==========================================================================
128+
# Default runtime
129+
# ==========================================================================
130+
default_scope = 'mmseg'
131+
env_cfg = dict(
132+
cudnn_benchmark=True,
133+
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
134+
dist_cfg=dict(backend='nccl'),
135+
)
136+
vis_backends = [dict(type='TensorboardVisBackend')]
137+
visualizer = dict(
138+
type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer')
139+
log_processor = dict(type='LogProcessor', by_epoch=False)
140+
log_level = 'INFO'
141+
load_from = None
142+
resume = '/root/angle-lane-mmseg/work_dirs/da_res_mmlab_alldata/best_mIoU_iter_95000.pth'
143+
144+
# ==========================================================================
145+
# Shedules
146+
# ==========================================================================
147+
randomness = dict(seed=42)
148+
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
149+
optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None)
150+
param_scheduler = [
151+
dict(type='LinearLR', by_epoch=False, start_factor=0.1, begin=0, end=1000),
152+
dict(
153+
type='PolyLR',
154+
eta_min=1e-4,
155+
power=0.9,
156+
begin=1000,
157+
end=600000,
158+
by_epoch=False)
159+
]
160+
train_cfg = dict(type='IterBasedTrainLoop', max_iters=600000, val_interval=500)
161+
val_cfg = dict(type='ValLoop')
162+
test_cfg = dict(type='TestLoop')
163+
default_hooks = dict(
164+
timer=dict(type='IterTimerHook'),
165+
logger=dict(type='LoggerHook', interval=200, log_metric_by_epoch=False),
166+
param_scheduler=dict(type='ParamSchedulerHook'),
167+
checkpoint=dict(type='CheckpointHook', by_epoch=False, save_best='mIoU'),
168+
sampler_seed=dict(type='DistSamplerSeedHook'),
169+
visualization=dict(type='SegVisualizationHook', draw=True, show=False, interval=150))
170+
launcher = 'none'

0 commit comments

Comments
 (0)