Spaces:
Build error
Build error
First model version
Browse files- app.py +5 -5
- configs/{ctw β det}/r50_baseline.yaml +0 -0
- tools/demo.py β det_demo.py +0 -0
- test_contour.sh +0 -3
- tools/test_net.py +0 -95
- tools/train_net.py +0 -174
- train_contour.sh +0 -6
app.py
CHANGED
@@ -8,24 +8,24 @@ os.system('python setup.py build develop --user')
|
|
8 |
import cv2
|
9 |
import pandas as pd
|
10 |
import gradio as gr
|
11 |
-
from
|
12 |
from maskrcnn_benchmark.config import cfg
|
13 |
|
14 |
|
15 |
def infer(filepath):
|
16 |
-
cfg.merge_from_file('./configs/
|
17 |
# manual override some options
|
18 |
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
|
19 |
|
20 |
-
|
21 |
cfg,
|
22 |
min_image_size=800,
|
23 |
confidence_threshold=0.7,
|
24 |
output_polygon=True
|
25 |
)
|
26 |
image = cv2.imread(filepath)
|
27 |
-
result_polygons, result_masks =
|
28 |
-
image =
|
29 |
cv2.imwrite('result.jpg', image)
|
30 |
return 'result.jpg'#, pd.DataFrame(result_words)
|
31 |
|
|
|
8 |
import cv2
|
9 |
import pandas as pd
|
10 |
import gradio as gr
|
11 |
+
from det_demo import DetDemo
|
12 |
from maskrcnn_benchmark.config import cfg
|
13 |
|
14 |
|
15 |
def infer(filepath):
|
16 |
+
cfg.merge_from_file('./configs/det/r50_baseline.yaml')
|
17 |
# manual override some options
|
18 |
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
|
19 |
|
20 |
+
det_demo = DetDemo(
|
21 |
cfg,
|
22 |
min_image_size=800,
|
23 |
confidence_threshold=0.7,
|
24 |
output_polygon=True
|
25 |
)
|
26 |
image = cv2.imread(filepath)
|
27 |
+
result_polygons, result_masks = det_demo.run_on_opencv_image(image)
|
28 |
+
image = det_demo.visualization(image, result_polygons, result_masks)
|
29 |
cv2.imwrite('result.jpg', image)
|
30 |
return 'result.jpg'#, pd.DataFrame(result_words)
|
31 |
|
configs/{ctw β det}/r50_baseline.yaml
RENAMED
File without changes
|
tools/demo.py β det_demo.py
RENAMED
File without changes
|
test_contour.sh
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
export NGPUS=1
|
2 |
-
CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --nproc_per_node=$NGPUS tools/test_net.py \
|
3 |
-
--config-file "configs/ctw/r50_baseline.yaml"
|
|
|
|
|
|
|
|
tools/test_net.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
# Set up custom environment before nearly anything else is imported
|
3 |
-
# NOTE: this should be the first import (no not reorder)
|
4 |
-
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
|
5 |
-
|
6 |
-
import argparse
|
7 |
-
import os
|
8 |
-
|
9 |
-
import torch
|
10 |
-
from maskrcnn_benchmark.config import cfg
|
11 |
-
from maskrcnn_benchmark.data import make_data_loader
|
12 |
-
from maskrcnn_benchmark.engine.inference import inference
|
13 |
-
from maskrcnn_benchmark.modeling.detector import build_detection_model
|
14 |
-
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
|
15 |
-
from maskrcnn_benchmark.utils.collect_env import collect_env_info
|
16 |
-
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
|
17 |
-
from maskrcnn_benchmark.utils.logger import setup_logger
|
18 |
-
from maskrcnn_benchmark.utils.miscellaneous import mkdir
|
19 |
-
|
20 |
-
|
21 |
-
def main():
|
22 |
-
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
|
23 |
-
parser.add_argument(
|
24 |
-
"--config-file",
|
25 |
-
default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
|
26 |
-
metavar="FILE",
|
27 |
-
help="path to config file",
|
28 |
-
)
|
29 |
-
parser.add_argument("--local_rank", type=int, default=0)
|
30 |
-
parser.add_argument(
|
31 |
-
"opts",
|
32 |
-
help="Modify config options using the command-line",
|
33 |
-
default=None,
|
34 |
-
nargs=argparse.REMAINDER,
|
35 |
-
)
|
36 |
-
|
37 |
-
args = parser.parse_args()
|
38 |
-
|
39 |
-
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
|
40 |
-
distributed = num_gpus > 1
|
41 |
-
|
42 |
-
if distributed:
|
43 |
-
torch.cuda.set_device(args.local_rank)
|
44 |
-
torch.distributed.init_process_group(
|
45 |
-
backend="nccl", init_method="env://"
|
46 |
-
)
|
47 |
-
synchronize()
|
48 |
-
|
49 |
-
cfg.merge_from_file(args.config_file)
|
50 |
-
cfg.merge_from_list(args.opts)
|
51 |
-
cfg.freeze()
|
52 |
-
|
53 |
-
save_dir = ""
|
54 |
-
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
|
55 |
-
logger.info("Using {} GPUs".format(num_gpus))
|
56 |
-
logger.info(cfg)
|
57 |
-
|
58 |
-
logger.info("Collecting env info (might take some time)")
|
59 |
-
logger.info("\n" + collect_env_info())
|
60 |
-
|
61 |
-
model = build_detection_model(cfg)
|
62 |
-
model.to(cfg.MODEL.DEVICE)
|
63 |
-
|
64 |
-
output_dir = cfg.OUTPUT_DIR
|
65 |
-
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
|
66 |
-
_ = checkpointer.load(cfg.MODEL.WEIGHT)
|
67 |
-
|
68 |
-
iou_types = ("bbox",)
|
69 |
-
if cfg.MODEL.BOUNDARY_ON:
|
70 |
-
iou_types = iou_types + ("bo",)
|
71 |
-
output_folders = [None] * len(cfg.DATASETS.TEST)
|
72 |
-
dataset_names = cfg.DATASETS.TEST
|
73 |
-
if cfg.OUTPUT_DIR:
|
74 |
-
for idx, dataset_name in enumerate(dataset_names):
|
75 |
-
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
|
76 |
-
mkdir(output_folder)
|
77 |
-
output_folders[idx] = output_folder
|
78 |
-
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
|
79 |
-
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
|
80 |
-
inference(
|
81 |
-
model,
|
82 |
-
data_loader_val,
|
83 |
-
dataset_name=dataset_name,
|
84 |
-
iou_types=iou_types,
|
85 |
-
box_only=False if cfg.MODEL.FCOS_ON or cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
|
86 |
-
device=cfg.MODEL.DEVICE,
|
87 |
-
expected_results=cfg.TEST.EXPECTED_RESULTS,
|
88 |
-
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
|
89 |
-
output_folder=output_folder,
|
90 |
-
)
|
91 |
-
synchronize()
|
92 |
-
|
93 |
-
|
94 |
-
if __name__ == "__main__":
|
95 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools/train_net.py
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
r"""
|
3 |
-
Basic training script for PyTorch
|
4 |
-
"""
|
5 |
-
|
6 |
-
# Set up custom environment before nearly anything else is imported
|
7 |
-
# NOTE: this should be the first import (no not reorder)
|
8 |
-
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
|
9 |
-
|
10 |
-
import argparse
|
11 |
-
import os
|
12 |
-
|
13 |
-
import torch
|
14 |
-
from maskrcnn_benchmark.config import cfg
|
15 |
-
from maskrcnn_benchmark.data import make_data_loader
|
16 |
-
from maskrcnn_benchmark.solver import make_lr_scheduler
|
17 |
-
from maskrcnn_benchmark.solver import make_optimizer
|
18 |
-
from maskrcnn_benchmark.engine.inference import inference
|
19 |
-
from maskrcnn_benchmark.engine.trainer import do_train
|
20 |
-
from maskrcnn_benchmark.modeling.detector import build_detection_model
|
21 |
-
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
|
22 |
-
from maskrcnn_benchmark.utils.collect_env import collect_env_info
|
23 |
-
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
|
24 |
-
from maskrcnn_benchmark.utils.imports import import_file
|
25 |
-
from maskrcnn_benchmark.utils.logger import setup_logger
|
26 |
-
from maskrcnn_benchmark.utils.miscellaneous import mkdir
|
27 |
-
|
28 |
-
|
29 |
-
def train(cfg, local_rank, distributed):
|
30 |
-
model = build_detection_model(cfg)
|
31 |
-
device = torch.device(cfg.MODEL.DEVICE)
|
32 |
-
model.to(device)
|
33 |
-
|
34 |
-
optimizer = make_optimizer(cfg, model)
|
35 |
-
scheduler = make_lr_scheduler(cfg, optimizer)
|
36 |
-
|
37 |
-
if distributed:
|
38 |
-
model = torch.nn.parallel.DistributedDataParallel(
|
39 |
-
model, device_ids=[local_rank], output_device=local_rank,
|
40 |
-
# this should be removed if we update BatchNorm stats
|
41 |
-
broadcast_buffers=False,
|
42 |
-
)
|
43 |
-
|
44 |
-
arguments = {}
|
45 |
-
arguments["iteration"] = 0
|
46 |
-
|
47 |
-
output_dir = cfg.OUTPUT_DIR
|
48 |
-
|
49 |
-
save_to_disk = get_rank() == 0
|
50 |
-
checkpointer = DetectronCheckpointer(
|
51 |
-
cfg, model, optimizer, scheduler, output_dir, save_to_disk
|
52 |
-
)
|
53 |
-
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)
|
54 |
-
arguments.update(extra_checkpoint_data)
|
55 |
-
|
56 |
-
data_loader = make_data_loader(
|
57 |
-
cfg,
|
58 |
-
is_train=True,
|
59 |
-
is_distributed=distributed,
|
60 |
-
start_iter=arguments["iteration"],
|
61 |
-
)
|
62 |
-
|
63 |
-
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
|
64 |
-
|
65 |
-
do_train(
|
66 |
-
model,
|
67 |
-
data_loader,
|
68 |
-
optimizer,
|
69 |
-
scheduler,
|
70 |
-
checkpointer,
|
71 |
-
device,
|
72 |
-
checkpoint_period,
|
73 |
-
arguments,
|
74 |
-
)
|
75 |
-
|
76 |
-
return model
|
77 |
-
|
78 |
-
|
79 |
-
def run_test(cfg, model, distributed):
|
80 |
-
if distributed:
|
81 |
-
model = model.module
|
82 |
-
torch.cuda.empty_cache() # TODO check if it helps
|
83 |
-
iou_types = ("bbox",)
|
84 |
-
if cfg.MODEL.MASK_ON:
|
85 |
-
iou_types = iou_types + ("segm",)
|
86 |
-
if cfg.MODEL.KEYPOINT_ON:
|
87 |
-
iou_types = iou_types + ("keypoints",)
|
88 |
-
output_folders = [None] * len(cfg.DATASETS.TEST)
|
89 |
-
dataset_names = cfg.DATASETS.TEST
|
90 |
-
if cfg.OUTPUT_DIR:
|
91 |
-
for idx, dataset_name in enumerate(dataset_names):
|
92 |
-
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
|
93 |
-
mkdir(output_folder)
|
94 |
-
output_folders[idx] = output_folder
|
95 |
-
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
|
96 |
-
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
|
97 |
-
inference(
|
98 |
-
model,
|
99 |
-
data_loader_val,
|
100 |
-
dataset_name=dataset_name,
|
101 |
-
iou_types=iou_types,
|
102 |
-
box_only=False if cfg.MODEL.FCOS_ON or cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
|
103 |
-
device=cfg.MODEL.DEVICE,
|
104 |
-
expected_results=cfg.TEST.EXPECTED_RESULTS,
|
105 |
-
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
|
106 |
-
output_folder=output_folder,
|
107 |
-
)
|
108 |
-
synchronize()
|
109 |
-
|
110 |
-
|
111 |
-
def main():
|
112 |
-
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
|
113 |
-
parser.add_argument(
|
114 |
-
"--config-file",
|
115 |
-
default="",
|
116 |
-
metavar="FILE",
|
117 |
-
help="path to config file",
|
118 |
-
type=str,
|
119 |
-
)
|
120 |
-
parser.add_argument("--local_rank", type=int, default=0)
|
121 |
-
parser.add_argument(
|
122 |
-
"--skip-test",
|
123 |
-
dest="skip_test",
|
124 |
-
help="Do not test the final model",
|
125 |
-
action="store_true",
|
126 |
-
)
|
127 |
-
parser.add_argument(
|
128 |
-
"opts",
|
129 |
-
help="Modify config options using the command-line",
|
130 |
-
default=None,
|
131 |
-
nargs=argparse.REMAINDER,
|
132 |
-
)
|
133 |
-
|
134 |
-
args = parser.parse_args()
|
135 |
-
|
136 |
-
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
|
137 |
-
args.distributed = num_gpus > 1
|
138 |
-
|
139 |
-
if args.distributed:
|
140 |
-
torch.cuda.set_device(args.local_rank)
|
141 |
-
torch.distributed.init_process_group(
|
142 |
-
backend="nccl", init_method="env://"
|
143 |
-
)
|
144 |
-
synchronize()
|
145 |
-
|
146 |
-
cfg.merge_from_file(args.config_file)
|
147 |
-
cfg.merge_from_list(args.opts)
|
148 |
-
cfg.freeze()
|
149 |
-
|
150 |
-
output_dir = cfg.OUTPUT_DIR
|
151 |
-
if output_dir:
|
152 |
-
mkdir(output_dir)
|
153 |
-
|
154 |
-
logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
|
155 |
-
logger.info("Using {} GPUs".format(num_gpus))
|
156 |
-
logger.info(args)
|
157 |
-
|
158 |
-
logger.info("Collecting env info (might take some time)")
|
159 |
-
logger.info("\n" + collect_env_info())
|
160 |
-
|
161 |
-
logger.info("Loaded configuration file {}".format(args.config_file))
|
162 |
-
with open(args.config_file, "r") as cf:
|
163 |
-
config_str = "\n" + cf.read()
|
164 |
-
logger.info(config_str)
|
165 |
-
logger.info("Running with config:\n{}".format(cfg))
|
166 |
-
|
167 |
-
model = train(cfg, args.local_rank, args.distributed)
|
168 |
-
|
169 |
-
if not args.skip_test:
|
170 |
-
run_test(cfg, model, args.distributed)
|
171 |
-
|
172 |
-
|
173 |
-
if __name__ == "__main__":
|
174 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_contour.sh
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
# export NCCL_P2P_DISABLE=1
|
2 |
-
export NGPUS=1
|
3 |
-
CUDA_VISIBLE_DEVICES=1 python -m torch.distributed.launch --nproc_per_node=$NGPUS tools/train_net.py \
|
4 |
-
--config-file "configs/ic/r50_baseline.yaml" \
|
5 |
-
--skip-test
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|