From ab3767543e23a4aaf59e3aaf9e7784818ab0ec3d Mon Sep 17 00:00:00 2001 From: hongyuanyu Date: Sun, 13 Feb 2022 17:28:48 +0800 Subject: [PATCH] update --- CDARTS/_init_paths.py | 19 + CDARTS/cells/cifar_genotype.json | 1 + CDARTS/cells/dartsv1_genotype.json | 1 + CDARTS/cells/dartsv2_genotype.json | 1 + CDARTS/cells/imagenet_genotype.json | 1 + CDARTS/cells/pcdarts_cifar_genotype.json | 1 + CDARTS/cells/pcdarts_imagenet_genotype.json | 1 + CDARTS/cells/pdarts_genotype.json | 1 + CDARTS/retrain.py | 206 + CDARTS/scripts/run_retrain_cifar_1gpu.sh | 14 + CDARTS/scripts/run_retrain_cifar_4gpus.sh | 15 + CDARTS/scripts/run_retrain_imagenet.sh | 15 + CDARTS/scripts/run_search_cifar_1gpu.sh | 20 + CDARTS/scripts/run_search_cifar_4gpus.sh | 20 + CDARTS/scripts/run_search_imagenet.sh | 20 + CDARTS/scripts/run_test_cifar.sh | 11 + CDARTS/scripts/run_test_imagenet.sh | 11 + CDARTS/search.py | 403 + CDARTS/test.py | 96 + CDARTS_detection/README.md | 58 + CDARTS_detection/compile.sh | 43 + .../configs/CyDAS_retinanet_1x.py | 129 + CDARTS_detection/env.sh | 14 + CDARTS_detection/mmcv/__init__.py | 13 + .../mmcv/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 324 bytes .../__pycache__/opencv_info.cpython-36.pyc | Bin 0 -> 398 bytes .../mmcv/__pycache__/version.cpython-36.pyc | Bin 0 -> 159 bytes .../mmcv/_ext.cpython-36m-x86_64-linux-gnu.so | Bin 0 -> 251384 bytes CDARTS_detection/mmcv/arraymisc/__init__.py | 3 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 243 bytes .../__pycache__/quantization.cpython-36.pyc | Bin 0 -> 1726 bytes .../mmcv/arraymisc/quantization.py | 56 + CDARTS_detection/mmcv/cnn/__init__.py | 11 + .../cnn/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 563 bytes .../cnn/__pycache__/alexnet.cpython-36.pyc | Bin 0 -> 1792 bytes .../cnn/__pycache__/resnet.cpython-36.pyc | Bin 0 -> 7641 bytes .../mmcv/cnn/__pycache__/vgg.cpython-36.pyc | Bin 0 -> 4774 bytes .../__pycache__/weight_init.cpython-36.pyc | Bin 0 -> 1880 bytes CDARTS_detection/mmcv/cnn/alexnet.py | 61 + CDARTS_detection/mmcv/cnn/resnet.py | 314 + CDARTS_detection/mmcv/cnn/vgg.py | 174 + CDARTS_detection/mmcv/cnn/weight_init.py | 57 + CDARTS_detection/mmcv/fileio/__init__.py | 8 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 483 bytes .../mmcv/fileio/__pycache__/io.cpython-36.pyc | Bin 0 -> 3512 bytes .../fileio/__pycache__/parse.cpython-36.pyc | Bin 0 -> 1770 bytes .../mmcv/fileio/handlers/__init__.py | 6 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 389 bytes .../handlers/__pycache__/base.cpython-36.pyc | Bin 0 -> 1289 bytes .../__pycache__/json_handler.cpython-36.pyc | Bin 0 -> 861 bytes .../__pycache__/pickle_handler.cpython-36.pyc | Bin 0 -> 1414 bytes .../__pycache__/yaml_handler.cpython-36.pyc | Bin 0 -> 1099 bytes CDARTS_detection/mmcv/fileio/handlers/base.py | 26 + .../mmcv/fileio/handlers/json_handler.py | 15 + .../mmcv/fileio/handlers/pickle_handler.py | 25 + .../mmcv/fileio/handlers/yaml_handler.py | 23 + CDARTS_detection/mmcv/fileio/io.py | 112 + CDARTS_detection/mmcv/fileio/parse.py | 50 + CDARTS_detection/mmcv/image/__init__.py | 12 + .../image/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 787 bytes .../mmcv/image/__pycache__/io.cpython-36.pyc | Bin 0 -> 2443 bytes CDARTS_detection/mmcv/image/io.py | 79 + .../mmcv/image/transforms/__init__.py | 12 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 769 bytes .../__pycache__/colorspace.cpython-36.pyc | Bin 0 -> 1980 bytes .../__pycache__/geometry.cpython-36.pyc | Bin 0 -> 6201 bytes .../__pycache__/normalize.cpython-36.pyc | Bin 0 -> 594 bytes .../__pycache__/resize.cpython-36.pyc | Bin 0 -> 3286 bytes .../mmcv/image/transforms/colorspace.py | 77 + .../mmcv/image/transforms/geometry.py | 203 + .../mmcv/image/transforms/normalize.py | 17 + .../mmcv/image/transforms/resize.py | 107 + CDARTS_detection/mmcv/opencv_info.py | 12 + CDARTS_detection/mmcv/parallel/__init__.py | 10 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 472 bytes .../__pycache__/_functions.cpython-36.pyc | Bin 0 -> 2443 bytes .../__pycache__/collate.cpython-36.pyc | Bin 0 -> 3203 bytes .../__pycache__/data_container.cpython-36.pyc | Bin 0 -> 3096 bytes .../__pycache__/data_parallel.cpython-36.pyc | Bin 0 -> 597 bytes .../__pycache__/distributed.cpython-36.pyc | Bin 0 -> 2397 bytes .../__pycache__/scatter_gather.cpython-36.pyc | Bin 0 -> 1937 bytes CDARTS_detection/mmcv/parallel/_functions.py | 74 + CDARTS_detection/mmcv/parallel/collate.py | 84 + .../mmcv/parallel/data_container.py | 84 + .../mmcv/parallel/data_parallel.py | 9 + CDARTS_detection/mmcv/parallel/distributed.py | 50 + .../mmcv/parallel/scatter_gather.py | 54 + CDARTS_detection/mmcv/runner/__init__.py | 23 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 1185 bytes .../__pycache__/checkpoint.cpython-36.pyc | Bin 0 -> 8965 bytes .../__pycache__/dist_utils.cpython-36.pyc | Bin 0 -> 2332 bytes .../__pycache__/log_buffer.cpython-36.pyc | Bin 0 -> 1570 bytes .../__pycache__/parallel_test.cpython-36.pyc | Bin 0 -> 2265 bytes .../__pycache__/priority.cpython-36.pyc | Bin 0 -> 1569 bytes .../runner/__pycache__/runner.cpython-36.pyc | Bin 0 -> 13009 bytes .../runner/__pycache__/utils.cpython-36.pyc | Bin 0 -> 2382 bytes CDARTS_detection/mmcv/runner/checkpoint.py | 238 + CDARTS_detection/mmcv/runner/dist_utils.py | 76 + .../mmcv/runner/hooks/__init__.py | 16 + .../hooks/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 781 bytes .../__pycache__/checkpoint.cpython-36.pyc | Bin 0 -> 919 bytes .../hooks/__pycache__/closure.cpython-36.pyc | Bin 0 -> 552 bytes .../hooks/__pycache__/hook.cpython-36.pyc | Bin 0 -> 3030 bytes .../__pycache__/iter_timer.cpython-36.pyc | Bin 0 -> 899 bytes .../__pycache__/lr_updater.cpython-36.pyc | Bin 0 -> 7176 bytes .../hooks/__pycache__/memory.cpython-36.pyc | Bin 0 -> 1081 bytes .../__pycache__/optimizer.cpython-36.pyc | Bin 0 -> 1994 bytes .../__pycache__/sampler_seed.cpython-36.pyc | Bin 0 -> 546 bytes .../mmcv/runner/hooks/checkpoint.py | 25 + CDARTS_detection/mmcv/runner/hooks/closure.py | 9 + CDARTS_detection/mmcv/runner/hooks/hook.py | 58 + .../mmcv/runner/hooks/iter_timer.py | 16 + .../mmcv/runner/hooks/logger/__init__.py | 8 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 383 bytes .../logger/__pycache__/base.cpython-36.pyc | Bin 0 -> 2493 bytes .../logger/__pycache__/pavi.cpython-36.pyc | Bin 0 -> 5071 bytes .../__pycache__/tensorboard.cpython-36.pyc | Bin 0 -> 1952 bytes .../logger/__pycache__/text.cpython-36.pyc | Bin 0 -> 4032 bytes .../mmcv/runner/hooks/logger/base.py | 70 + .../mmcv/runner/hooks/logger/pavi.py | 176 + .../mmcv/runner/hooks/logger/tensorboard.py | 55 + .../mmcv/runner/hooks/logger/text.py | 123 + .../mmcv/runner/hooks/lr_updater.py | 183 + CDARTS_detection/mmcv/runner/hooks/memory.py | 23 + .../mmcv/runner/hooks/optimizer.py | 35 + .../mmcv/runner/hooks/sampler_seed.py | 7 + CDARTS_detection/mmcv/runner/log_buffer.py | 40 + CDARTS_detection/mmcv/runner/parallel_test.py | 74 + CDARTS_detection/mmcv/runner/priority.py | 53 + CDARTS_detection/mmcv/runner/runner.py | 426 + CDARTS_detection/mmcv/runner/utils.py | 78 + CDARTS_detection/mmcv/utils/__init__.py | 17 + .../utils/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 1060 bytes .../utils/__pycache__/config.cpython-36.pyc | Bin 0 -> 5667 bytes .../utils/__pycache__/misc.cpython-36.pyc | Bin 0 -> 6185 bytes .../utils/__pycache__/path.cpython-36.pyc | Bin 0 -> 2193 bytes .../__pycache__/progressbar.cpython-36.pyc | Bin 0 -> 5359 bytes .../utils/__pycache__/timer.cpython-36.pyc | Bin 0 -> 3491 bytes CDARTS_detection/mmcv/utils/config.py | 159 + CDARTS_detection/mmcv/utils/misc.py | 218 + CDARTS_detection/mmcv/utils/path.py | 79 + CDARTS_detection/mmcv/utils/progressbar.py | 174 + CDARTS_detection/mmcv/utils/timer.py | 117 + CDARTS_detection/mmcv/version.py | 1 + CDARTS_detection/mmcv/video/__init__.py | 10 + .../video/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 568 bytes .../mmcv/video/__pycache__/io.cpython-36.pyc | Bin 0 -> 10948 bytes .../video/__pycache__/optflow.cpython-36.pyc | Bin 0 -> 5814 bytes .../__pycache__/processing.cpython-36.pyc | Bin 0 -> 4418 bytes CDARTS_detection/mmcv/video/io.py | 332 + CDARTS_detection/mmcv/video/optflow.py | 171 + .../mmcv/video/optflow_warp/__init__.py | 0 .../mmcv/video/optflow_warp/flow_warp.cpp | 75 + .../mmcv/video/optflow_warp/flow_warp.hpp | 29 + .../video/optflow_warp/flow_warp_module.cpp | 7928 ++++++++++++ .../video/optflow_warp/flow_warp_module.pyx | 27 + CDARTS_detection/mmcv/video/processing.py | 159 + .../mmcv/visualization/__init__.py | 8 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 454 bytes .../__pycache__/color.cpython-36.pyc | Bin 0 -> 1535 bytes .../__pycache__/image.cpython-36.pyc | Bin 0 -> 4742 bytes .../__pycache__/optflow.cpython-36.pyc | Bin 0 -> 3269 bytes CDARTS_detection/mmcv/visualization/color.py | 50 + CDARTS_detection/mmcv/visualization/image.py | 146 + .../mmcv/visualization/optflow.py | 113 + CDARTS_detection/mmdet.egg-info/PKG-INFO | 89 + CDARTS_detection/mmdet.egg-info/SOURCES.txt | 301 + .../mmdet.egg-info/dependency_links.txt | 1 + CDARTS_detection/mmdet.egg-info/not-zip-safe | 1 + CDARTS_detection/mmdet.egg-info/requires.txt | 6 + CDARTS_detection/mmdet.egg-info/top_level.txt | 2 + CDARTS_detection/mmdet/__init__.py | 3 + .../mmdet/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 235 bytes .../mmdet/__pycache__/version.cpython-36.pyc | Bin 0 -> 195 bytes CDARTS_detection/mmdet/apis/__init__.py | 9 + .../apis/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 477 bytes .../mmdet/apis/__pycache__/env.cpython-36.pyc | Bin 0 -> 2319 bytes .../apis/__pycache__/inference.cpython-36.pyc | Bin 0 -> 5563 bytes .../apis/__pycache__/train.cpython-36.pyc | Bin 0 -> 6498 bytes CDARTS_detection/mmdet/apis/env.py | 69 + CDARTS_detection/mmdet/apis/inference.py | 171 + CDARTS_detection/mmdet/apis/train.py | 256 + CDARTS_detection/mmdet/core/__init__.py | 7 + .../core/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 281 bytes .../mmdet/core/anchor/__init__.py | 8 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 409 bytes .../anchor_generator.cpython-36.pyc | Bin 0 -> 2768 bytes .../__pycache__/anchor_target.cpython-36.pyc | Bin 0 -> 4875 bytes .../guided_anchor_target.cpython-36.pyc | Bin 0 -> 8196 bytes .../mmdet/core/anchor/anchor_generator.py | 84 + .../mmdet/core/anchor/anchor_target.py | 186 + .../mmdet/core/anchor/guided_anchor_target.py | 285 + CDARTS_detection/mmdet/core/bbox/__init__.py | 20 + .../bbox/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 1057 bytes .../assign_sampling.cpython-36.pyc | Bin 0 -> 1174 bytes .../__pycache__/bbox_target.cpython-36.pyc | Bin 0 -> 1786 bytes .../bbox/__pycache__/geometry.cpython-36.pyc | Bin 0 -> 1889 bytes .../__pycache__/transforms.cpython-36.pyc | Bin 0 -> 5746 bytes .../mmdet/core/bbox/assign_sampling.py | 33 + .../mmdet/core/bbox/assigners/__init__.py | 8 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 425 bytes .../approx_max_iou_assigner.cpython-36.pyc | Bin 0 -> 4206 bytes .../__pycache__/assign_result.cpython-36.pyc | Bin 0 -> 894 bytes .../__pycache__/base_assigner.cpython-36.pyc | Bin 0 -> 575 bytes .../max_iou_assigner.cpython-36.pyc | Bin 0 -> 4960 bytes .../bbox/assigners/approx_max_iou_assigner.py | 116 + .../core/bbox/assigners/assign_result.py | 19 + .../core/bbox/assigners/base_assigner.py | 8 + .../core/bbox/assigners/max_iou_assigner.py | 152 + .../mmdet/core/bbox/bbox_target.py | 73 + CDARTS_detection/mmdet/core/bbox/geometry.py | 63 + .../mmdet/core/bbox/samplers/__init__.py | 14 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 693 bytes .../__pycache__/base_sampler.cpython-36.pyc | Bin 0 -> 2341 bytes .../combined_sampler.cpython-36.pyc | Bin 0 -> 992 bytes ...stance_balanced_pos_sampler.cpython-36.pyc | Bin 0 -> 1305 bytes .../iou_balanced_neg_sampler.cpython-36.pyc | Bin 0 -> 3680 bytes .../__pycache__/ohem_sampler.cpython-36.pyc | Bin 0 -> 2035 bytes .../__pycache__/pseudo_sampler.cpython-36.pyc | Bin 0 -> 1255 bytes .../__pycache__/random_sampler.cpython-36.pyc | Bin 0 -> 1925 bytes .../sampling_result.cpython-36.pyc | Bin 0 -> 969 bytes .../mmdet/core/bbox/samplers/base_sampler.py | 78 + .../core/bbox/samplers/combined_sampler.py | 16 + .../samplers/instance_balanced_pos_sampler.py | 41 + .../bbox/samplers/iou_balanced_neg_sampler.py | 133 + .../mmdet/core/bbox/samplers/ohem_sampler.py | 73 + .../core/bbox/samplers/pseudo_sampler.py | 26 + .../core/bbox/samplers/random_sampler.py | 53 + .../core/bbox/samplers/sampling_result.py | 24 + .../mmdet/core/bbox/transforms.py | 180 + .../mmdet/core/evaluation/__init__.py | 14 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 723 bytes .../__pycache__/bbox_overlaps.cpython-36.pyc | Bin 0 -> 1479 bytes .../__pycache__/class_names.cpython-36.pyc | Bin 0 -> 5080 bytes .../__pycache__/eval_hooks.cpython-36.pyc | Bin 0 -> 2534 bytes .../__pycache__/mean_ap.cpython-36.pyc | Bin 0 -> 12906 bytes .../__pycache__/recall.cpython-36.pyc | Bin 0 -> 5355 bytes .../mmdet/core/evaluation/bbox_overlaps.py | 49 + .../mmdet/core/evaluation/class_names.py | 108 + .../mmdet/core/evaluation/coco_utils.py | 177 + .../mmdet/core/evaluation/eval_hooks.py | 74 + .../mmdet/core/evaluation/mean_ap.py | 455 + .../mmdet/core/evaluation/recall.py | 193 + CDARTS_detection/mmdet/core/fp16/__init__.py | 4 + .../fp16/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 330 bytes .../__pycache__/decorators.cpython-36.pyc | Bin 0 -> 4254 bytes .../fp16/__pycache__/hooks.cpython-36.pyc | Bin 0 -> 4005 bytes .../fp16/__pycache__/utils.cpython-36.pyc | Bin 0 -> 927 bytes .../mmdet/core/fp16/decorators.py | 160 + CDARTS_detection/mmdet/core/fp16/hooks.py | 126 + CDARTS_detection/mmdet/core/fp16/utils.py | 23 + CDARTS_detection/mmdet/core/mask/__init__.py | 4 + .../mask/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 262 bytes .../__pycache__/mask_target.cpython-36.pyc | Bin 0 -> 1305 bytes .../mask/__pycache__/utils.cpython-36.pyc | Bin 0 -> 1218 bytes .../mmdet/core/mask/mask_target.py | 36 + CDARTS_detection/mmdet/core/mask/utils.py | 30 + .../mmdet/core/post_processing/__init__.py | 8 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 393 bytes .../__pycache__/bbox_nms.cpython-36.pyc | Bin 0 -> 1839 bytes .../__pycache__/merge_augs.cpython-36.pyc | Bin 0 -> 3155 bytes .../mmdet/core/post_processing/bbox_nms.py | 64 + .../mmdet/core/post_processing/merge_augs.py | 96 + CDARTS_detection/mmdet/core/utils/__init__.py | 7 + .../utils/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 397 bytes .../__pycache__/dist_utils.cpython-36.pyc | Bin 0 -> 2906 bytes .../utils/__pycache__/misc.cpython-36.pyc | Bin 0 -> 1353 bytes .../mmdet/core/utils/dist_utils.py | 89 + CDARTS_detection/mmdet/core/utils/misc.py | 37 + CDARTS_detection/mmdet/datasets/__init__.py | 17 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 836 bytes .../__pycache__/builder.cpython-36.pyc | Bin 0 -> 1374 bytes .../__pycache__/cityscapes.cpython-36.pyc | Bin 0 -> 535 bytes .../datasets/__pycache__/coco.cpython-36.pyc | Bin 0 -> 11718 bytes .../__pycache__/custom.cpython-36.pyc | Bin 0 -> 6737 bytes .../dataset_wrappers.cpython-36.pyc | Bin 0 -> 2170 bytes .../__pycache__/registry.cpython-36.pyc | Bin 0 -> 252 bytes .../datasets/__pycache__/voc.cpython-36.pyc | Bin 0 -> 2411 bytes .../__pycache__/wider_face.cpython-36.pyc | Bin 0 -> 1551 bytes .../__pycache__/xml_style.cpython-36.pyc | Bin 0 -> 2709 bytes CDARTS_detection/mmdet/datasets/builder.py | 41 + CDARTS_detection/mmdet/datasets/cityscapes.py | 9 + CDARTS_detection/mmdet/datasets/coco.py | 372 + CDARTS_detection/mmdet/datasets/custom.py | 211 + .../mmdet/datasets/dataset_wrappers.py | 55 + .../mmdet/datasets/loader/__init__.py | 4 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 361 bytes .../__pycache__/build_loader.cpython-36.pyc | Bin 0 -> 2000 bytes .../loader/__pycache__/sampler.cpython-36.pyc | Bin 0 -> 6004 bytes .../mmdet/datasets/loader/build_loader.py | 101 + .../mmdet/datasets/loader/sampler.py | 201 + .../mmdet/datasets/pipelines/__init__.py | 16 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 908 bytes .../__pycache__/compose.cpython-36.pyc | Bin 0 -> 1265 bytes .../__pycache__/formating.cpython-36.pyc | Bin 0 -> 7156 bytes .../__pycache__/loading.cpython-36.pyc | Bin 0 -> 4941 bytes .../__pycache__/test_aug.cpython-36.pyc | Bin 0 -> 1602 bytes .../__pycache__/transforms.cpython-36.pyc | Bin 0 -> 25683 bytes .../mmdet/datasets/pipelines/compose.py | 35 + .../mmdet/datasets/pipelines/formating.py | 186 + .../mmdet/datasets/pipelines/loading.py | 155 + .../mmdet/datasets/pipelines/test_aug.py | 37 + .../mmdet/datasets/pipelines/transforms.py | 853 ++ CDARTS_detection/mmdet/datasets/registry.py | 4 + CDARTS_detection/mmdet/datasets/transforms.py | 147 + CDARTS_detection/mmdet/datasets/utils.py | 68 + CDARTS_detection/mmdet/datasets/voc.py | 66 + CDARTS_detection/mmdet/datasets/wider_face.py | 42 + CDARTS_detection/mmdet/datasets/xml_style.py | 86 + CDARTS_detection/mmdet/models/__init__.py | 20 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 814 bytes .../models/__pycache__/builder.cpython-36.pyc | Bin 0 -> 1661 bytes .../__pycache__/registry.cpython-36.pyc | Bin 0 -> 400 bytes .../mmdet/models/anchor_heads/__init__.py | 13 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 634 bytes .../__pycache__/anchor_head.cpython-36.pyc | Bin 0 -> 8045 bytes .../__pycache__/fcos_head.cpython-36.pyc | Bin 0 -> 11299 bytes .../__pycache__/ga_retina_head.cpython-36.pyc | Bin 0 -> 2784 bytes .../__pycache__/ga_rpn_head.cpython-36.pyc | Bin 0 -> 3578 bytes .../guided_anchor_head.cpython-36.pyc | Bin 0 -> 16696 bytes .../__pycache__/retina_head.cpython-36.pyc | Bin 0 -> 3071 bytes .../__pycache__/rpn_head.cpython-36.pyc | Bin 0 -> 3287 bytes .../__pycache__/ssd_head.cpython-36.pyc | Bin 0 -> 5825 bytes .../mmdet/models/anchor_heads/anchor_head.py | 270 + .../mmdet/models/anchor_heads/fcos_head.py | 389 + .../models/anchor_heads/ga_retina_head.py | 107 + .../mmdet/models/anchor_heads/ga_rpn_head.py | 127 + .../models/anchor_heads/guided_anchor_head.py | 609 + .../mmdet/models/anchor_heads/retina_head.py | 111 + .../mmdet/models/anchor_heads/rpn_head.py | 104 + .../mmdet/models/anchor_heads/ssd_head.py | 193 + .../mmdet/models/backbones/__init__.py | 12 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 675 bytes .../__pycache__/builder.cpython-36.pyc | Bin 0 -> 25224 bytes .../__pycache__/detnas.cpython-36.pyc | Bin 0 -> 10367 bytes .../__pycache__/dropblock.cpython-36.pyc | Bin 0 -> 5051 bytes .../__pycache__/efficientnet.cpython-36.pyc | Bin 0 -> 63066 bytes .../efficientnet_builder.cpython-36.pyc | Bin 0 -> 30137 bytes .../__pycache__/fbnet.cpython-36.pyc | Bin 0 -> 2694 bytes .../__pycache__/fbnet_arch.cpython-36.pyc | Bin 0 -> 891 bytes .../__pycache__/fbnet_blocks.cpython-36.pyc | Bin 0 -> 8652 bytes .../__pycache__/feature_hooks.cpython-36.pyc | Bin 0 -> 1516 bytes .../__pycache__/hrnet.cpython-36.pyc | Bin 0 -> 10244 bytes .../__pycache__/mnasnet.cpython-36.pyc | Bin 0 -> 5047 bytes .../__pycache__/mobilenetv2.cpython-36.pyc | Bin 0 -> 4768 bytes .../__pycache__/mobilenetv3.cpython-36.pyc | Bin 0 -> 11069 bytes .../__pycache__/resnet.cpython-36.pyc | Bin 0 -> 17963 bytes .../__pycache__/resnext.cpython-36.pyc | Bin 0 -> 5428 bytes .../__pycache__/ssd_vgg.cpython-36.pyc | Bin 0 -> 4093 bytes .../__pycache__/utils.cpython-36.pyc | Bin 0 -> 2461 bytes .../mmdet/models/backbones/builder.py | 872 ++ .../mmdet/models/backbones/detnas.py | 342 + .../mmdet/models/backbones/dropblock.py | 150 + .../mmdet/models/backbones/efficientnet.py | 1934 +++ .../models/backbones/efficientnet_builder.py | 1026 ++ .../mmdet/models/backbones/fbnet.py | 77 + .../mmdet/models/backbones/fbnet_arch.py | 63 + .../mmdet/models/backbones/fbnet_blocks.py | 156 + .../mmdet/models/backbones/feature_hooks.py | 31 + .../mmdet/models/backbones/hrnet.py | 484 + .../mmdet/models/backbones/mnasnet.py | 192 + .../mmdet/models/backbones/mobilenetv2.py | 201 + .../mmdet/models/backbones/mobilenetv3.py | 389 + .../mmdet/models/backbones/resnet.py | 822 ++ .../mmdet/models/backbones/resnext.py | 223 + .../mmdet/models/backbones/ssd_vgg.py | 134 + .../mmdet/models/backbones/utils.py | 100 + .../mmdet/models/bbox_heads/__init__.py | 7 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 385 bytes .../__pycache__/bbox_head.cpython-36.pyc | Bin 0 -> 6998 bytes .../convfc_bbox_head.cpython-36.pyc | Bin 0 -> 7729 bytes .../double_bbox_head.cpython-36.pyc | Bin 0 -> 4624 bytes .../models/bbox_heads/auto_head/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 167 bytes .../__pycache__/build_head.cpython-36.pyc | Bin 0 -> 582 bytes .../mbblock_head_search.cpython-36.pyc | Bin 0 -> 1596 bytes .../__pycache__/mbblock_ops.cpython-36.pyc | Bin 0 -> 8857 bytes .../models/bbox_heads/auto_head/build_head.py | 22 + .../auto_head/mbblock_head_search.py | 54 + .../bbox_heads/auto_head/mbblock_ops.py | 169 + .../mmdet/models/bbox_heads/bbox_head.py | 241 + .../models/bbox_heads/convfc_bbox_head.py | 327 + .../models/bbox_heads/double_bbox_head.py | 167 + CDARTS_detection/mmdet/models/builder.py | 43 + .../mmdet/models/detectors/__init__.py | 20 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 904 bytes .../detectors/__pycache__/base.cpython-36.pyc | Bin 0 -> 6267 bytes .../__pycache__/cascade_rcnn.cpython-36.pyc | Bin 0 -> 8984 bytes .../double_head_rcnn.cpython-36.pyc | Bin 0 -> 4567 bytes .../__pycache__/fast_rcnn.cpython-36.pyc | Bin 0 -> 1456 bytes .../__pycache__/faster_rcnn.cpython-36.pyc | Bin 0 -> 880 bytes .../detectors/__pycache__/fcos.cpython-36.pyc | Bin 0 -> 685 bytes .../__pycache__/grid_rcnn.cpython-36.pyc | Bin 0 -> 5270 bytes .../detectors/__pycache__/htc.cpython-36.pyc | Bin 0 -> 9387 bytes .../__pycache__/mask_rcnn.cpython-36.pyc | Bin 0 -> 855 bytes .../mask_scoring_rcnn.cpython-36.pyc | Bin 0 -> 4756 bytes .../__pycache__/retinanet.cpython-36.pyc | Bin 0 -> 700 bytes .../detectors/__pycache__/rpn.cpython-36.pyc | Bin 0 -> 3135 bytes .../__pycache__/single_stage.cpython-36.pyc | Bin 0 -> 2935 bytes .../__pycache__/test_mixins.cpython-36.pyc | Bin 0 -> 4810 bytes .../__pycache__/two_stage.cpython-36.pyc | Bin 0 -> 6885 bytes .../mmdet/models/detectors/base.py | 176 + .../mmdet/models/detectors/cascade_rcnn.py | 379 + .../models/detectors/double_head_rcnn.py | 191 + .../mmdet/models/detectors/fast_rcnn.py | 50 + .../mmdet/models/detectors/faster_rcnn.py | 31 + .../mmdet/models/detectors/fcos.py | 16 + .../mmdet/models/detectors/grid_rcnn.py | 205 + .../mmdet/models/detectors/htc.py | 396 + .../mmdet/models/detectors/mask_rcnn.py | 31 + .../models/detectors/mask_scoring_rcnn.py | 197 + .../mmdet/models/detectors/retinanet.py | 16 + .../mmdet/models/detectors/rpn.py | 92 + .../mmdet/models/detectors/single_stage.py | 95 + .../mmdet/models/detectors/test_mixins.py | 163 + .../mmdet/models/detectors/two_stage.py | 347 + .../mmdet/models/losses/__init__.py | 19 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 1025 bytes .../__pycache__/accuracy.cpython-36.pyc | Bin 0 -> 1248 bytes .../balanced_l1_loss.cpython-36.pyc | Bin 0 -> 1824 bytes .../cross_entropy_loss.cpython-36.pyc | Bin 0 -> 2686 bytes .../__pycache__/focal_loss.cpython-36.pyc | Bin 0 -> 2096 bytes .../__pycache__/ghm_loss.cpython-36.pyc | Bin 0 -> 5390 bytes .../__pycache__/iou_loss.cpython-36.pyc | Bin 0 -> 4014 bytes .../__pycache__/mse_loss.cpython-36.pyc | Bin 0 -> 994 bytes .../__pycache__/smooth_l1_loss.cpython-36.pyc | Bin 0 -> 1445 bytes .../losses/__pycache__/utils.cpython-36.pyc | Bin 0 -> 2665 bytes .../mmdet/models/losses/accuracy.py | 31 + .../mmdet/models/losses/balanced_l1_loss.py | 69 + .../mmdet/models/losses/cross_entropy_loss.py | 103 + .../mmdet/models/losses/focal_loss.py | 82 + .../mmdet/models/losses/ghm_loss.py | 167 + .../mmdet/models/losses/iou_loss.py | 135 + .../mmdet/models/losses/mse_loss.py | 25 + .../mmdet/models/losses/smooth_l1_loss.py | 45 + CDARTS_detection/mmdet/models/losses/utils.py | 97 + .../mmdet/models/mask_heads/__init__.py | 10 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 458 bytes .../__pycache__/fcn_mask_head.cpython-36.pyc | Bin 0 -> 5492 bytes .../fused_semantic_head.cpython-36.pyc | Bin 0 -> 2952 bytes .../__pycache__/grid_head.cpython-36.pyc | Bin 0 -> 9482 bytes .../__pycache__/htc_mask_head.cpython-36.pyc | Bin 0 -> 1407 bytes .../__pycache__/maskiou_head.cpython-36.pyc | Bin 0 -> 5878 bytes .../mmdet/models/mask_heads/fcn_mask_head.py | 179 + .../models/mask_heads/fused_semantic_head.py | 106 + .../mmdet/models/mask_heads/grid_head.py | 359 + .../mmdet/models/mask_heads/htc_mask_head.py | 38 + .../mmdet/models/mask_heads/maskiou_head.py | 181 + .../mmdet/models/necks/__init__.py | 9 + .../necks/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 433 bytes .../necks/__pycache__/bfp.cpython-36.pyc | Bin 0 -> 2971 bytes .../necks/__pycache__/fpn.cpython-36.pyc | Bin 0 -> 6607 bytes .../__pycache__/fpn_panet.cpython-36.pyc | Bin 0 -> 4060 bytes .../necks/__pycache__/hrfpn.cpython-36.pyc | Bin 0 -> 2919 bytes .../necks/__pycache__/nas_fpn.cpython-36.pyc | Bin 0 -> 5597 bytes .../__pycache__/search_pafpn.cpython-36.pyc | Bin 0 -> 3426 bytes .../mmdet/models/necks/auto_neck/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 162 bytes .../__pycache__/build_neck.cpython-36.pyc | Bin 0 -> 568 bytes .../hit_neck_search.cpython-36.pyc | Bin 0 -> 1641 bytes .../__pycache__/hit_ops.cpython-36.pyc | Bin 0 -> 9272 bytes .../models/necks/auto_neck/build_neck.py | 23 + .../models/necks/auto_neck/hit_neck_search.py | 57 + .../mmdet/models/necks/auto_neck/hit_ops.py | 222 + CDARTS_detection/mmdet/models/necks/bfp.py | 102 + CDARTS_detection/mmdet/models/necks/fpn.py | 271 + .../mmdet/models/necks/fpn_panet.py | 169 + CDARTS_detection/mmdet/models/necks/hrfpn.py | 97 + .../mmdet/models/necks/nas_fpn.py | 191 + .../mmdet/models/necks/search_pafpn.py | 146 + .../mmdet/models/plugins/__init__.py | 4 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 293 bytes .../generalized_attention.cpython-36.pyc | Bin 0 -> 7550 bytes .../__pycache__/non_local.cpython-36.pyc | Bin 0 -> 2969 bytes .../models/plugins/generalized_attention.py | 384 + .../mmdet/models/plugins/non_local.py | 114 + CDARTS_detection/mmdet/models/registry.py | 9 + .../mmdet/models/roi_extractors/__init__.py | 3 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 238 bytes .../__pycache__/single_level.cpython-36.pyc | Bin 0 -> 4213 bytes .../models/roi_extractors/single_level.py | 107 + .../mmdet/models/shared_heads/__init__.py | 3 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 223 bytes .../__pycache__/res_layer.cpython-36.pyc | Bin 0 -> 2271 bytes .../mmdet/models/shared_heads/res_layer.py | 72 + .../mmdet/models/utils/__init__.py | 12 + .../utils/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 607 bytes .../__pycache__/conv_module.cpython-36.pyc | Bin 0 -> 4438 bytes .../utils/__pycache__/conv_ws.cpython-36.pyc | Bin 0 -> 1380 bytes .../utils/__pycache__/norm.cpython-36.pyc | Bin 0 -> 1554 bytes .../__pycache__/quant_conv.cpython-36.pyc | Bin 0 -> 7415 bytes .../utils/__pycache__/scale.cpython-36.pyc | Bin 0 -> 745 bytes .../__pycache__/weight_init.cpython-36.pyc | Bin 0 -> 1667 bytes .../mmdet/models/utils/conv_module.py | 172 + .../mmdet/models/utils/conv_ws.py | 46 + CDARTS_detection/mmdet/models/utils/norm.py | 55 + .../mmdet/models/utils/quant_conv.py | 239 + CDARTS_detection/mmdet/models/utils/scale.py | 12 + .../mmdet/models/utils/weight_init.py | 46 + CDARTS_detection/mmdet/ops/__init__.py | 19 + .../ops/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 917 bytes CDARTS_detection/mmdet/ops/dcn/__init__.py | 13 + .../dcn/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 665 bytes .../mmdet/ops/dcn/functions/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 157 bytes .../__pycache__/deform_conv.cpython-36.pyc | Bin 0 -> 4905 bytes .../__pycache__/deform_pool.cpython-36.pyc | Bin 0 -> 1769 bytes .../mmdet/ops/dcn/functions/deform_conv.py | 181 + .../mmdet/ops/dcn/functions/deform_pool.py | 69 + .../mmdet/ops/dcn/modules/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 155 bytes .../__pycache__/deform_conv.cpython-36.pyc | Bin 0 -> 4855 bytes .../__pycache__/deform_pool.cpython-36.pyc | Bin 0 -> 4209 bytes .../mmdet/ops/dcn/modules/deform_conv.py | 157 + .../mmdet/ops/dcn/modules/deform_pool.py | 172 + CDARTS_detection/mmdet/ops/dcn/setup.py | 15 + .../mmdet/ops/dcn/src/deform_conv_cuda.cpp | 695 ++ .../ops/dcn/src/deform_conv_cuda_kernel.cu | 866 ++ .../mmdet/ops/dcn/src/deform_pool_cuda.cpp | 87 + .../ops/dcn/src/deform_pool_cuda_kernel.cu | 364 + CDARTS_detection/mmdet/ops/gcb/__init__.py | 5 + .../gcb/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 219 bytes .../__pycache__/context_block.cpython-36.pyc | Bin 0 -> 2814 bytes .../mmdet/ops/gcb/context_block.py | 104 + .../mmdet/ops/masked_conv/__init__.py | 4 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 299 bytes .../ops/masked_conv/functions/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 165 bytes .../__pycache__/masked_conv.cpython-36.pyc | Bin 0 -> 1913 bytes .../ops/masked_conv/functions/masked_conv.py | 56 + .../mmdet/ops/masked_conv/modules/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 163 bytes .../__pycache__/masked_conv.cpython-36.pyc | Bin 0 -> 1173 bytes .../ops/masked_conv/modules/masked_conv.py | 30 + .../mmdet/ops/masked_conv/setup.py | 12 + .../masked_conv/src/masked_conv2d_cuda.cpp | 74 + .../masked_conv/src/masked_conv2d_kernel.cu | 113 + CDARTS_detection/mmdet/ops/nms/__init__.py | 3 + .../nms/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 234 bytes .../__pycache__/nms_wrapper.cpython-36.pyc | Bin 0 -> 2277 bytes CDARTS_detection/mmdet/ops/nms/nms_wrapper.py | 78 + CDARTS_detection/mmdet/ops/nms/setup.py | 84 + .../mmdet/ops/nms/src/nms_cpu.cpp | 71 + .../mmdet/ops/nms/src/nms_cuda.cpp | 17 + .../mmdet/ops/nms/src/nms_kernel.cu | 131 + .../mmdet/ops/nms/src/soft_nms_cpu.cpp | 10246 ++++++++++++++++ .../mmdet/ops/nms/src/soft_nms_cpu.pyx | 127 + .../mmdet/ops/roi_align/__init__.py | 3 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 233 bytes .../__pycache__/roi_align.cpython-36.pyc | Bin 0 -> 2671 bytes .../mmdet/ops/roi_align/functions/__init__.py | 0 .../ops/roi_align/functions/roi_align.py | 61 + .../mmdet/ops/roi_align/gradcheck.py | 30 + .../mmdet/ops/roi_align/modules/__init__.py | 0 .../mmdet/ops/roi_align/modules/roi_align.py | 16 + .../mmdet/ops/roi_align/roi_align.py | 87 + CDARTS_detection/mmdet/ops/roi_align/setup.py | 12 + .../ops/roi_align/src/roi_align_cuda.cpp | 85 + .../ops/roi_align/src/roi_align_kernel.cu | 282 + .../mmdet/ops/roi_pool/__init__.py | 4 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 280 bytes .../mmdet/ops/roi_pool/functions/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 162 bytes .../__pycache__/roi_pool.cpython-36.pyc | Bin 0 -> 1563 bytes .../mmdet/ops/roi_pool/functions/roi_pool.py | 55 + .../mmdet/ops/roi_pool/gradcheck.py | 15 + .../mmdet/ops/roi_pool/modules/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 160 bytes .../__pycache__/roi_pool.cpython-36.pyc | Bin 0 -> 811 bytes .../mmdet/ops/roi_pool/modules/roi_pool.py | 14 + CDARTS_detection/mmdet/ops/roi_pool/setup.py | 12 + .../mmdet/ops/roi_pool/src/roi_pool_cuda.cpp | 86 + .../mmdet/ops/roi_pool/src/roi_pool_kernel.cu | 156 + .../mmdet/ops/sigmoid_focal_loss/__init__.py | 3 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 287 bytes .../sigmoid_focal_loss/functions/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 172 bytes .../sigmoid_focal_loss.cpython-36.pyc | Bin 0 -> 1222 bytes .../functions/sigmoid_focal_loss.py | 34 + .../sigmoid_focal_loss/modules/__init__.py | 0 .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 170 bytes .../sigmoid_focal_loss.cpython-36.pyc | Bin 0 -> 1144 bytes .../modules/sigmoid_focal_loss.py | 24 + .../mmdet/ops/sigmoid_focal_loss/setup.py | 12 + .../src/sigmoid_focal_loss.cpp | 43 + .../src/sigmoid_focal_loss_cuda.cu | 169 + CDARTS_detection/mmdet/utils/__init__.py | 10 + .../utils/__pycache__/__init__.cpython-36.pyc | Bin 0 -> 433 bytes .../__pycache__/collect_env.cpython-36.pyc | Bin 0 -> 1776 bytes .../__pycache__/flops_counter.cpython-36.pyc | Bin 0 -> 10788 bytes .../utils/__pycache__/logger.cpython-36.pyc | Bin 0 -> 2348 bytes .../utils/__pycache__/registry.cpython-36.pyc | Bin 0 -> 2652 bytes CDARTS_detection/mmdet/utils/collect_env.py | 63 + .../mmdet/utils/contextmanagers.py | 126 + CDARTS_detection/mmdet/utils/flops_counter.py | 433 + CDARTS_detection/mmdet/utils/logger.py | 66 + CDARTS_detection/mmdet/utils/profiling.py | 41 + CDARTS_detection/mmdet/utils/registry.py | 76 + CDARTS_detection/mmdet/utils/util_mixins.py | 105 + CDARTS_detection/mmdet/version.py | 5 + CDARTS_detection/scripts/train_hit_det.sh | 15 + CDARTS_detection/setup.py | 112 + CDARTS_detection/test.py | 205 + CDARTS_detection/tools/analyze_logs.py | 178 + CDARTS_detection/tools/coco_eval.py | 28 + .../tools/convert_datasets/pascal_voc.py | 140 + CDARTS_detection/tools/detectron2pytorch.py | 88 + CDARTS_detection/tools/dist_test.sh | 10 + CDARTS_detection/tools/dist_train.sh | 9 + CDARTS_detection/tools/get_flops.py | 52 + CDARTS_detection/tools/publish_model.py | 34 + CDARTS_detection/tools/slurm_test.sh | 23 + CDARTS_detection/tools/slurm_train.sh | 23 + CDARTS_detection/tools/test.py | 313 + .../tools/upgrade_model_version.py | 42 + CDARTS_detection/tools/voc_eval.py | 62 + CDARTS_detection/train.py | 112 + CDARTS_detection/train.sh | 14 + CDARTS_segmentation/LICENSE | 21 + CDARTS_segmentation/README.md | 48 + CDARTS_segmentation/configs/ade/cydas.yaml | 37 + .../configs/cityscapes/cydas.yaml | 24 + CDARTS_segmentation/dataloaders/__init__.py | 285 + .../dataloaders/custom_transforms.py | 326 + .../dataloaders/dataloader_utils.py | 104 + .../dataloaders/datasets/__init__.py | 0 .../dataloaders/datasets/cityscapes.py | 153 + .../dataloaders/datasets/coco.py | 160 + .../dataloaders/datasets/combine_dbs.py | 100 + .../dataloaders/datasets/kd.py | 139 + .../dataloaders/datasets/pascal.py | 144 + .../dataloaders/datasets/sbd.py | 128 + .../dataloaders/segdatasets/__init__.py | 4 + .../dataloaders/segdatasets/base_dataset.py | 182 + .../dataloaders/segdatasets/cityscapes.py | 150 + .../segdatasets/cityscapes_panoptic.py | 130 + .../dataloaders/segdatasets/coco_panoptic.py | 299 + .../dataloaders/segdatasets/utils.py | 18 + .../dataloaders/transforms/__init__.py | 3 + .../dataloaders/transforms/build.py | 57 + .../transforms/pre_augmentation_transforms.py | 92 + .../transforms/target_transforms.py | 200 + .../dataloaders/transforms/transforms.py | 172 + CDARTS_segmentation/install.sh | 33 + CDARTS_segmentation/segmentation/__init__.py | 0 .../segmentation/config/__init__.py | 4 + .../segmentation/config/default.py | 306 + .../segmentation/config/hrnet_config.py | 130 + .../segmentation/data/__init__.py | 2 + .../segmentation/data/build.py | 159 + .../segmentation/data/datasets/__init__.py | 4 + .../data/datasets/base_dataset.py | 182 + .../segmentation/data/datasets/cityscapes.py | 150 + .../data/datasets/cityscapes_panoptic.py | 130 + .../data/datasets/coco_panoptic.py | 299 + .../segmentation/data/datasets/utils.py | 18 + .../segmentation/data/samplers/__init__.py | 1 + .../data/samplers/distributed_sampler.py | 90 + .../segmentation/data/transforms/__init__.py | 3 + .../segmentation/data/transforms/build.py | 57 + .../transforms/pre_augmentation_transforms.py | 92 + .../data/transforms/target_transforms.py | 200 + .../data/transforms/transforms.py | 172 + .../segmentation/evaluation/__init__.py | 5 + .../segmentation/evaluation/coco_instance.py | 107 + .../segmentation/evaluation/coco_panoptic.py | 137 + .../segmentation/evaluation/instance.py | 97 + .../segmentation/evaluation/panoptic.py | 127 + .../segmentation/evaluation/semantic.py | 106 + .../segmentation/model/__init__.py | 1 + .../segmentation/model/backbone/__init__.py | 5 + .../segmentation/model/backbone/hrnet.py | 526 + .../segmentation/model/backbone/mnasnet.py | 280 + .../segmentation/model/backbone/mobilenet.py | 214 + .../segmentation/model/backbone/resnet.py | 351 + .../segmentation/model/backbone/xception.py | 237 + .../segmentation/model/build.py | 133 + .../segmentation/model/decoder/__init__.py | 4 + .../segmentation/model/decoder/aspp.py | 76 + .../segmentation/model/decoder/conv_module.py | 73 + .../segmentation/model/decoder/deeplabv3.py | 37 + .../model/decoder/deeplabv3plus.py | 59 + .../model/decoder/panoptic_deeplab.py | 162 + .../segmentation/model/loss/__init__.py | 7 + .../segmentation/model/loss/criterion.py | 112 + .../segmentation/model/meta_arch/__init__.py | 3 + .../segmentation/model/meta_arch/base.py | 64 + .../segmentation/model/meta_arch/deeplabv3.py | 58 + .../model/meta_arch/deeplabv3plus.py | 63 + .../model/meta_arch/panoptic_deeplab.py | 135 + .../model/post_processing/__init__.py | 3 + .../post_processing/evaluation_format.py | 60 + .../instance_post_processing.py | 237 + .../semantic_post_processing.py | 25 + .../segmentation/solver/__init__.py | 3 + .../segmentation/solver/build.py | 185 + .../segmentation/solver/lr_scheduler.py | 162 + .../segmentation/solver/utils.py | 29 + .../segmentation/utils/__init__.py | 6 + .../segmentation/utils/comm.py | 257 + .../segmentation/utils/debug.py | 196 + CDARTS_segmentation/segmentation/utils/env.py | 35 + .../segmentation/utils/flow_vis.py | 134 + .../segmentation/utils/logger.py | 219 + .../segmentation/utils/save_annotation.py | 360 + .../segmentation/utils/test_utils.py | 137 + .../segmentation/utils/utils.py | 52 + CDARTS_segmentation/tools/__init__.py | 0 .../tools/datasets/BaseDataset.py | 161 + .../tools/datasets/__init__.py | 6 + .../tools/datasets/bdd/__init__.py | 3 + CDARTS_segmentation/tools/datasets/bdd/bdd.py | 41 + .../tools/datasets/camvid/__init__.py | 3 + .../tools/datasets/camvid/camvid.py | 17 + .../tools/datasets/cityscapes/__init__.py | 3 + .../tools/datasets/cityscapes/cityscapes.py | 42 + .../datasets/cityscapes/cityscapes_test.txt | 1525 +++ .../cityscapes/cityscapes_train_fine.txt | 2975 +++++ .../cityscapes/cityscapes_val_fine.txt | 500 + .../tools/datasets/coco/__init__.py | 3 + .../tools/datasets/coco/coco.py | 160 + CDARTS_segmentation/tools/engine/__init__.py | 0 CDARTS_segmentation/tools/engine/evaluator.py | 339 + CDARTS_segmentation/tools/engine/logger.py | 90 + CDARTS_segmentation/tools/engine/tester.py | 312 + CDARTS_segmentation/tools/seg_opr/__init__.py | 0 CDARTS_segmentation/tools/seg_opr/loss_opr.py | 199 + CDARTS_segmentation/tools/seg_opr/metric.py | 87 + CDARTS_segmentation/tools/utils/__init__.py | 0 CDARTS_segmentation/tools/utils/cal_model.py | 216 + .../tools/utils/darts_utils.py | 352 + CDARTS_segmentation/tools/utils/dist_utils.py | 81 + CDARTS_segmentation/tools/utils/genotypes.py | 75 + CDARTS_segmentation/tools/utils/img_utils.py | 185 + CDARTS_segmentation/tools/utils/init_func.py | 67 + .../tools/utils/lr_scheduler.py | 56 + CDARTS_segmentation/tools/utils/metrics.py | 46 + CDARTS_segmentation/tools/utils/pyt_utils.py | 291 + CDARTS_segmentation/tools/utils/visualize.py | 89 + .../tools/vis/panoptic_coco_categories.json | 1 + .../tools/vis/vis_cityscapes.py | 106 + CDARTS_segmentation/tools/vis/vis_coco.py | 103 + CDARTS_segmentation/train/_init_paths.py | 19 + CDARTS_segmentation/train/att_sa.py | 231 + CDARTS_segmentation/train/builder.py | 872 ++ CDARTS_segmentation/train/cal_model.py | 68 + CDARTS_segmentation/train/config_test.py | 77 + CDARTS_segmentation/train/config_train.py | 115 + .../train/configs/ADE20K/512.yaml | 42 + .../train/configs/ADE20K/base.yaml | 42 + .../Base-PanopticDeepLab-OS16.yaml | 65 + .../Cityscapes-PanopticSegmentation/base.yaml | 65 + ...s16_mg124_poly_90k_bs32_crop_512_1024.yaml | 20 + ...24_poly_90k_bs32_crop_512_1024_dsconv.yaml | 24 + CDARTS_segmentation/train/cydas.py | 435 + CDARTS_segmentation/train/dataloader.py | 93 + CDARTS_segmentation/train/eval.py | 68 + CDARTS_segmentation/train/genotypes.py | 13 + .../train/latency_lookup_table.npy | Bin 0 -> 50122 bytes CDARTS_segmentation/train/launch.py | 274 + CDARTS_segmentation/train/layers.py | 116 + CDARTS_segmentation/train/loss.py | 81 + CDARTS_segmentation/train/operations.py | 948 ++ CDARTS_segmentation/train/run_det2.sh | 3 + CDARTS_segmentation/train/seg_metrics.py | 98 + CDARTS_segmentation/train/seg_oprs.py | 558 + CDARTS_segmentation/train/slimmable_ops.py | 72 + CDARTS_segmentation/train/test.py | 84 + CDARTS_segmentation/train/test_seg.py | 191 + .../train/train_ade20k_cydas.py | 569 + CDARTS_segmentation/train/train_cydas.py | 516 + CDARTS_segmentation/train/vis_arch.py | 44 + LICENSE | 23 + README.md | 209 + benchmark201/configs/config.py | 228 + benchmark201/core/augment_function.py | 130 + benchmark201/core/pretrain_function.py | 342 + benchmark201/core/search_function.py | 241 + benchmark201/datasets/cifar.py | 103 + benchmark201/datasets/data_utils.py | 393 + benchmark201/datasets/imagenet.py | 102 + benchmark201/models/augment_cells.py | 49 + benchmark201/models/aux_head.py | 99 + benchmark201/models/cdarts_controller.py | 374 + benchmark201/models/loss.py | 36 + benchmark201/models/model_augment.py | 48 + benchmark201/models/model_test.py | 169 + benchmark201/models/ops.py | 184 + benchmark201/models/search_cells.py | 117 + benchmark201/run_search_cifar_1gpu.sh | 20 + benchmark201/search.py | 250 + .../search/cifar10-search/cifar10-search.log | 7157 +++++++++++ .../search/cifar10-search/tb/readme.md | 0 .../imagenet-search/imagenet-search.log | 0 .../search/imagenet-search/tb/readme.md | 0 benchmark201/utils/genotypes.py | 352 + benchmark201/utils/get_info.py | 41 + benchmark201/utils/utils.py | 134 + benchmark201/utils/visualize.py | 74 + .../cifar10-retrain/cifar10-retrain.log | 0 .../retrain/cifar10-retrain/tb/readme.md | 0 .../imagenet-retrain/imagenet-retrain.log | 0 .../retrain/imagenet-retrain/tb/readme.md | 0 .../search/cifar10-search/cifar10-search.log | 0 .../search/cifar10-search/tb/readme.md | 0 .../imagenet-search/imagenet-search.log | 0 .../search/imagenet-search/tb/readme.md | 0 lib/config.py | 229 + lib/core/augment_function.py | 130 + lib/core/pretrain_function.py | 342 + lib/core/search_function.py | 254 + lib/datasets/cifar.py | 102 + lib/datasets/data_utils.py | 393 + lib/datasets/imagenet.py | 102 + lib/models/augment_cells.py | 44 + lib/models/aux_head.py | 99 + lib/models/cdarts_controller.py | 807 ++ lib/models/loss.py | 36 + lib/models/model_augment.py | 57 + lib/models/model_test.py | 167 + lib/models/ops.py | 272 + lib/models/search_cells.py | 53 + lib/utils/count_flops.py | 46 + lib/utils/genotypes.py | 159 + lib/utils/utils.py | 117 + lib/utils/visualize.py | 74 + requirements | 5 + 827 files changed, 94397 insertions(+) create mode 100644 CDARTS/_init_paths.py create mode 100644 CDARTS/cells/cifar_genotype.json create mode 100644 CDARTS/cells/dartsv1_genotype.json create mode 100644 CDARTS/cells/dartsv2_genotype.json create mode 100644 CDARTS/cells/imagenet_genotype.json create mode 100644 CDARTS/cells/pcdarts_cifar_genotype.json create mode 100644 CDARTS/cells/pcdarts_imagenet_genotype.json create mode 100644 CDARTS/cells/pdarts_genotype.json create mode 100644 CDARTS/retrain.py create mode 100644 CDARTS/scripts/run_retrain_cifar_1gpu.sh create mode 100644 CDARTS/scripts/run_retrain_cifar_4gpus.sh create mode 100644 CDARTS/scripts/run_retrain_imagenet.sh create mode 100644 CDARTS/scripts/run_search_cifar_1gpu.sh create mode 100644 CDARTS/scripts/run_search_cifar_4gpus.sh create mode 100644 CDARTS/scripts/run_search_imagenet.sh create mode 100644 CDARTS/scripts/run_test_cifar.sh create mode 100644 CDARTS/scripts/run_test_imagenet.sh create mode 100644 CDARTS/search.py create mode 100644 CDARTS/test.py create mode 100644 CDARTS_detection/README.md create mode 100644 CDARTS_detection/compile.sh create mode 100644 CDARTS_detection/configs/CyDAS_retinanet_1x.py create mode 100644 CDARTS_detection/env.sh create mode 100644 CDARTS_detection/mmcv/__init__.py create mode 100644 CDARTS_detection/mmcv/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/__pycache__/opencv_info.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/__pycache__/version.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/_ext.cpython-36m-x86_64-linux-gnu.so create mode 100644 CDARTS_detection/mmcv/arraymisc/__init__.py create mode 100644 CDARTS_detection/mmcv/arraymisc/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/arraymisc/__pycache__/quantization.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/arraymisc/quantization.py create mode 100644 CDARTS_detection/mmcv/cnn/__init__.py create mode 100644 CDARTS_detection/mmcv/cnn/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/cnn/__pycache__/alexnet.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/cnn/__pycache__/resnet.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/cnn/__pycache__/vgg.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/cnn/__pycache__/weight_init.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/cnn/alexnet.py create mode 100644 CDARTS_detection/mmcv/cnn/resnet.py create mode 100644 CDARTS_detection/mmcv/cnn/vgg.py create mode 100644 CDARTS_detection/mmcv/cnn/weight_init.py create mode 100644 CDARTS_detection/mmcv/fileio/__init__.py create mode 100644 CDARTS_detection/mmcv/fileio/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/__pycache__/io.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/__pycache__/parse.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/handlers/__init__.py create mode 100644 CDARTS_detection/mmcv/fileio/handlers/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/handlers/__pycache__/base.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/handlers/__pycache__/json_handler.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/fileio/handlers/base.py create mode 100644 CDARTS_detection/mmcv/fileio/handlers/json_handler.py create mode 100644 CDARTS_detection/mmcv/fileio/handlers/pickle_handler.py create mode 100644 CDARTS_detection/mmcv/fileio/handlers/yaml_handler.py create mode 100644 CDARTS_detection/mmcv/fileio/io.py create mode 100644 CDARTS_detection/mmcv/fileio/parse.py create mode 100644 CDARTS_detection/mmcv/image/__init__.py create mode 100644 CDARTS_detection/mmcv/image/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/image/__pycache__/io.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/image/io.py create mode 100644 CDARTS_detection/mmcv/image/transforms/__init__.py create mode 100644 CDARTS_detection/mmcv/image/transforms/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/image/transforms/__pycache__/colorspace.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/image/transforms/__pycache__/geometry.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/image/transforms/__pycache__/normalize.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/image/transforms/__pycache__/resize.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/image/transforms/colorspace.py create mode 100644 CDARTS_detection/mmcv/image/transforms/geometry.py create mode 100644 CDARTS_detection/mmcv/image/transforms/normalize.py create mode 100644 CDARTS_detection/mmcv/image/transforms/resize.py create mode 100644 CDARTS_detection/mmcv/opencv_info.py create mode 100644 CDARTS_detection/mmcv/parallel/__init__.py create mode 100644 CDARTS_detection/mmcv/parallel/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/parallel/__pycache__/_functions.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/parallel/__pycache__/collate.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/parallel/__pycache__/data_container.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/parallel/__pycache__/data_parallel.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/parallel/__pycache__/distributed.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/parallel/__pycache__/scatter_gather.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/parallel/_functions.py create mode 100644 CDARTS_detection/mmcv/parallel/collate.py create mode 100644 CDARTS_detection/mmcv/parallel/data_container.py create mode 100644 CDARTS_detection/mmcv/parallel/data_parallel.py create mode 100644 CDARTS_detection/mmcv/parallel/distributed.py create mode 100644 CDARTS_detection/mmcv/parallel/scatter_gather.py create mode 100644 CDARTS_detection/mmcv/runner/__init__.py create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/checkpoint.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/dist_utils.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/log_buffer.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/parallel_test.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/priority.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/runner.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/__pycache__/utils.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/checkpoint.py create mode 100644 CDARTS_detection/mmcv/runner/dist_utils.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/__init__.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/checkpoint.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/closure.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/hook.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/iter_timer.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/lr_updater.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/memory.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/optimizer.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/checkpoint.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/closure.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/hook.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/iter_timer.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/__init__.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/base.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/text.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/base.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/pavi.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/tensorboard.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/logger/text.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/lr_updater.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/memory.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/optimizer.py create mode 100644 CDARTS_detection/mmcv/runner/hooks/sampler_seed.py create mode 100644 CDARTS_detection/mmcv/runner/log_buffer.py create mode 100644 CDARTS_detection/mmcv/runner/parallel_test.py create mode 100644 CDARTS_detection/mmcv/runner/priority.py create mode 100644 CDARTS_detection/mmcv/runner/runner.py create mode 100644 CDARTS_detection/mmcv/runner/utils.py create mode 100644 CDARTS_detection/mmcv/utils/__init__.py create mode 100644 CDARTS_detection/mmcv/utils/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/utils/__pycache__/config.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/utils/__pycache__/misc.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/utils/__pycache__/path.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/utils/__pycache__/progressbar.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/utils/__pycache__/timer.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/utils/config.py create mode 100644 CDARTS_detection/mmcv/utils/misc.py create mode 100644 CDARTS_detection/mmcv/utils/path.py create mode 100644 CDARTS_detection/mmcv/utils/progressbar.py create mode 100644 CDARTS_detection/mmcv/utils/timer.py create mode 100644 CDARTS_detection/mmcv/version.py create mode 100644 CDARTS_detection/mmcv/video/__init__.py create mode 100644 CDARTS_detection/mmcv/video/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/video/__pycache__/io.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/video/__pycache__/optflow.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/video/__pycache__/processing.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/video/io.py create mode 100644 CDARTS_detection/mmcv/video/optflow.py create mode 100644 CDARTS_detection/mmcv/video/optflow_warp/__init__.py create mode 100644 CDARTS_detection/mmcv/video/optflow_warp/flow_warp.cpp create mode 100644 CDARTS_detection/mmcv/video/optflow_warp/flow_warp.hpp create mode 100644 CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.cpp create mode 100644 CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.pyx create mode 100644 CDARTS_detection/mmcv/video/processing.py create mode 100644 CDARTS_detection/mmcv/visualization/__init__.py create mode 100644 CDARTS_detection/mmcv/visualization/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/visualization/__pycache__/color.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/visualization/__pycache__/image.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/visualization/__pycache__/optflow.cpython-36.pyc create mode 100644 CDARTS_detection/mmcv/visualization/color.py create mode 100644 CDARTS_detection/mmcv/visualization/image.py create mode 100644 CDARTS_detection/mmcv/visualization/optflow.py create mode 100644 CDARTS_detection/mmdet.egg-info/PKG-INFO create mode 100644 CDARTS_detection/mmdet.egg-info/SOURCES.txt create mode 100644 CDARTS_detection/mmdet.egg-info/dependency_links.txt create mode 100644 CDARTS_detection/mmdet.egg-info/not-zip-safe create mode 100644 CDARTS_detection/mmdet.egg-info/requires.txt create mode 100644 CDARTS_detection/mmdet.egg-info/top_level.txt create mode 100644 CDARTS_detection/mmdet/__init__.py create mode 100644 CDARTS_detection/mmdet/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/__pycache__/version.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/apis/__init__.py create mode 100644 CDARTS_detection/mmdet/apis/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/apis/__pycache__/env.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/apis/__pycache__/inference.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/apis/__pycache__/train.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/apis/env.py create mode 100644 CDARTS_detection/mmdet/apis/inference.py create mode 100644 CDARTS_detection/mmdet/apis/train.py create mode 100644 CDARTS_detection/mmdet/core/__init__.py create mode 100644 CDARTS_detection/mmdet/core/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/anchor/__init__.py create mode 100644 CDARTS_detection/mmdet/core/anchor/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/anchor/__pycache__/anchor_generator.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/anchor/__pycache__/anchor_target.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/anchor/__pycache__/guided_anchor_target.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/anchor/anchor_generator.py create mode 100644 CDARTS_detection/mmdet/core/anchor/anchor_target.py create mode 100644 CDARTS_detection/mmdet/core/anchor/guided_anchor_target.py create mode 100644 CDARTS_detection/mmdet/core/bbox/__init__.py create mode 100644 CDARTS_detection/mmdet/core/bbox/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/__pycache__/assign_sampling.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/__pycache__/bbox_target.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/__pycache__/geometry.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/__pycache__/transforms.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/assign_sampling.py create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/__init__.py create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/approx_max_iou_assigner.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/base_assigner.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/max_iou_assigner.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/assign_result.py create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/base_assigner.py create mode 100644 CDARTS_detection/mmdet/core/bbox/assigners/max_iou_assigner.py create mode 100644 CDARTS_detection/mmdet/core/bbox/bbox_target.py create mode 100644 CDARTS_detection/mmdet/core/bbox/geometry.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__init__.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/base_sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/ohem_sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/random_sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/base_sampler.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/combined_sampler.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/ohem_sampler.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/pseudo_sampler.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/random_sampler.py create mode 100644 CDARTS_detection/mmdet/core/bbox/samplers/sampling_result.py create mode 100644 CDARTS_detection/mmdet/core/bbox/transforms.py create mode 100644 CDARTS_detection/mmdet/core/evaluation/__init__.py create mode 100644 CDARTS_detection/mmdet/core/evaluation/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/evaluation/__pycache__/class_names.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/evaluation/__pycache__/mean_ap.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/evaluation/__pycache__/recall.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/evaluation/bbox_overlaps.py create mode 100644 CDARTS_detection/mmdet/core/evaluation/class_names.py create mode 100644 CDARTS_detection/mmdet/core/evaluation/coco_utils.py create mode 100644 CDARTS_detection/mmdet/core/evaluation/eval_hooks.py create mode 100644 CDARTS_detection/mmdet/core/evaluation/mean_ap.py create mode 100644 CDARTS_detection/mmdet/core/evaluation/recall.py create mode 100644 CDARTS_detection/mmdet/core/fp16/__init__.py create mode 100644 CDARTS_detection/mmdet/core/fp16/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/fp16/__pycache__/decorators.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/fp16/__pycache__/hooks.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/fp16/__pycache__/utils.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/fp16/decorators.py create mode 100644 CDARTS_detection/mmdet/core/fp16/hooks.py create mode 100644 CDARTS_detection/mmdet/core/fp16/utils.py create mode 100644 CDARTS_detection/mmdet/core/mask/__init__.py create mode 100644 CDARTS_detection/mmdet/core/mask/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/mask/__pycache__/mask_target.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/mask/__pycache__/utils.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/mask/mask_target.py create mode 100644 CDARTS_detection/mmdet/core/mask/utils.py create mode 100644 CDARTS_detection/mmdet/core/post_processing/__init__.py create mode 100644 CDARTS_detection/mmdet/core/post_processing/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/post_processing/__pycache__/merge_augs.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/post_processing/bbox_nms.py create mode 100644 CDARTS_detection/mmdet/core/post_processing/merge_augs.py create mode 100644 CDARTS_detection/mmdet/core/utils/__init__.py create mode 100644 CDARTS_detection/mmdet/core/utils/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/utils/__pycache__/dist_utils.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/utils/__pycache__/misc.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/core/utils/dist_utils.py create mode 100644 CDARTS_detection/mmdet/core/utils/misc.py create mode 100644 CDARTS_detection/mmdet/datasets/__init__.py create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/builder.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/cityscapes.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/coco.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/custom.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/dataset_wrappers.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/registry.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/voc.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/wider_face.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/__pycache__/xml_style.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/builder.py create mode 100644 CDARTS_detection/mmdet/datasets/cityscapes.py create mode 100644 CDARTS_detection/mmdet/datasets/coco.py create mode 100644 CDARTS_detection/mmdet/datasets/custom.py create mode 100644 CDARTS_detection/mmdet/datasets/dataset_wrappers.py create mode 100644 CDARTS_detection/mmdet/datasets/loader/__init__.py create mode 100644 CDARTS_detection/mmdet/datasets/loader/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/loader/__pycache__/build_loader.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/loader/__pycache__/sampler.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/loader/build_loader.py create mode 100644 CDARTS_detection/mmdet/datasets/loader/sampler.py create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/__init__.py create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/__pycache__/compose.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/__pycache__/formating.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/__pycache__/loading.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/__pycache__/test_aug.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/__pycache__/transforms.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/compose.py create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/formating.py create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/loading.py create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/test_aug.py create mode 100644 CDARTS_detection/mmdet/datasets/pipelines/transforms.py create mode 100644 CDARTS_detection/mmdet/datasets/registry.py create mode 100644 CDARTS_detection/mmdet/datasets/transforms.py create mode 100644 CDARTS_detection/mmdet/datasets/utils.py create mode 100644 CDARTS_detection/mmdet/datasets/voc.py create mode 100644 CDARTS_detection/mmdet/datasets/wider_face.py create mode 100644 CDARTS_detection/mmdet/datasets/xml_style.py create mode 100644 CDARTS_detection/mmdet/models/__init__.py create mode 100644 CDARTS_detection/mmdet/models/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/__pycache__/builder.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/__pycache__/registry.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__init__.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/anchor_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/fcos_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/ga_retina_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/ga_rpn_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/guided_anchor_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/retina_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/rpn_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/__pycache__/ssd_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/anchor_head.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/fcos_head.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/ga_retina_head.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/ga_rpn_head.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/guided_anchor_head.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/retina_head.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/rpn_head.py create mode 100644 CDARTS_detection/mmdet/models/anchor_heads/ssd_head.py create mode 100644 CDARTS_detection/mmdet/models/backbones/__init__.py create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/builder.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/detnas.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/dropblock.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/efficientnet.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/efficientnet_builder.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/fbnet.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/fbnet_arch.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/fbnet_blocks.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/feature_hooks.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/hrnet.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/mnasnet.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/mobilenetv2.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/mobilenetv3.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/resnet.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/resnext.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/ssd_vgg.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/__pycache__/utils.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/backbones/builder.py create mode 100644 CDARTS_detection/mmdet/models/backbones/detnas.py create mode 100644 CDARTS_detection/mmdet/models/backbones/dropblock.py create mode 100644 CDARTS_detection/mmdet/models/backbones/efficientnet.py create mode 100644 CDARTS_detection/mmdet/models/backbones/efficientnet_builder.py create mode 100644 CDARTS_detection/mmdet/models/backbones/fbnet.py create mode 100644 CDARTS_detection/mmdet/models/backbones/fbnet_arch.py create mode 100644 CDARTS_detection/mmdet/models/backbones/fbnet_blocks.py create mode 100644 CDARTS_detection/mmdet/models/backbones/feature_hooks.py create mode 100644 CDARTS_detection/mmdet/models/backbones/hrnet.py create mode 100644 CDARTS_detection/mmdet/models/backbones/mnasnet.py create mode 100644 CDARTS_detection/mmdet/models/backbones/mobilenetv2.py create mode 100644 CDARTS_detection/mmdet/models/backbones/mobilenetv3.py create mode 100644 CDARTS_detection/mmdet/models/backbones/resnet.py create mode 100644 CDARTS_detection/mmdet/models/backbones/resnext.py create mode 100644 CDARTS_detection/mmdet/models/backbones/ssd_vgg.py create mode 100644 CDARTS_detection/mmdet/models/backbones/utils.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/__init__.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/__pycache__/bbox_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/__pycache__/convfc_bbox_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/__pycache__/double_bbox_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/__init__.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/build_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/mbblock_head_search.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/mbblock_ops.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/build_head.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_head_search.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_ops.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/bbox_head.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/convfc_bbox_head.py create mode 100644 CDARTS_detection/mmdet/models/bbox_heads/double_bbox_head.py create mode 100644 CDARTS_detection/mmdet/models/builder.py create mode 100644 CDARTS_detection/mmdet/models/detectors/__init__.py create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/base.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/cascade_rcnn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/double_head_rcnn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/fast_rcnn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/faster_rcnn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/fcos.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/grid_rcnn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/htc.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/mask_rcnn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/mask_scoring_rcnn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/retinanet.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/rpn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/single_stage.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/test_mixins.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/__pycache__/two_stage.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/detectors/base.py create mode 100644 CDARTS_detection/mmdet/models/detectors/cascade_rcnn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/double_head_rcnn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/fast_rcnn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/faster_rcnn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/fcos.py create mode 100644 CDARTS_detection/mmdet/models/detectors/grid_rcnn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/htc.py create mode 100644 CDARTS_detection/mmdet/models/detectors/mask_rcnn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/mask_scoring_rcnn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/retinanet.py create mode 100644 CDARTS_detection/mmdet/models/detectors/rpn.py create mode 100644 CDARTS_detection/mmdet/models/detectors/single_stage.py create mode 100644 CDARTS_detection/mmdet/models/detectors/test_mixins.py create mode 100644 CDARTS_detection/mmdet/models/detectors/two_stage.py create mode 100644 CDARTS_detection/mmdet/models/losses/__init__.py create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/accuracy.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/balanced_l1_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/cross_entropy_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/focal_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/ghm_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/iou_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/mse_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/smooth_l1_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/__pycache__/utils.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/losses/accuracy.py create mode 100644 CDARTS_detection/mmdet/models/losses/balanced_l1_loss.py create mode 100644 CDARTS_detection/mmdet/models/losses/cross_entropy_loss.py create mode 100644 CDARTS_detection/mmdet/models/losses/focal_loss.py create mode 100644 CDARTS_detection/mmdet/models/losses/ghm_loss.py create mode 100644 CDARTS_detection/mmdet/models/losses/iou_loss.py create mode 100644 CDARTS_detection/mmdet/models/losses/mse_loss.py create mode 100644 CDARTS_detection/mmdet/models/losses/smooth_l1_loss.py create mode 100644 CDARTS_detection/mmdet/models/losses/utils.py create mode 100644 CDARTS_detection/mmdet/models/mask_heads/__init__.py create mode 100644 CDARTS_detection/mmdet/models/mask_heads/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/mask_heads/__pycache__/fcn_mask_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/mask_heads/__pycache__/fused_semantic_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/mask_heads/__pycache__/grid_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/mask_heads/__pycache__/htc_mask_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/mask_heads/__pycache__/maskiou_head.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/mask_heads/fcn_mask_head.py create mode 100644 CDARTS_detection/mmdet/models/mask_heads/fused_semantic_head.py create mode 100644 CDARTS_detection/mmdet/models/mask_heads/grid_head.py create mode 100644 CDARTS_detection/mmdet/models/mask_heads/htc_mask_head.py create mode 100644 CDARTS_detection/mmdet/models/mask_heads/maskiou_head.py create mode 100644 CDARTS_detection/mmdet/models/necks/__init__.py create mode 100644 CDARTS_detection/mmdet/models/necks/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/__pycache__/bfp.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/__pycache__/fpn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/__pycache__/fpn_panet.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/__pycache__/hrfpn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/__pycache__/nas_fpn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/__pycache__/search_pafpn.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/__init__.py create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/build_neck.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/hit_neck_search.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/hit_ops.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/build_neck.py create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/hit_neck_search.py create mode 100644 CDARTS_detection/mmdet/models/necks/auto_neck/hit_ops.py create mode 100644 CDARTS_detection/mmdet/models/necks/bfp.py create mode 100644 CDARTS_detection/mmdet/models/necks/fpn.py create mode 100644 CDARTS_detection/mmdet/models/necks/fpn_panet.py create mode 100644 CDARTS_detection/mmdet/models/necks/hrfpn.py create mode 100644 CDARTS_detection/mmdet/models/necks/nas_fpn.py create mode 100644 CDARTS_detection/mmdet/models/necks/search_pafpn.py create mode 100644 CDARTS_detection/mmdet/models/plugins/__init__.py create mode 100644 CDARTS_detection/mmdet/models/plugins/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/plugins/__pycache__/generalized_attention.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/plugins/__pycache__/non_local.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/plugins/generalized_attention.py create mode 100644 CDARTS_detection/mmdet/models/plugins/non_local.py create mode 100644 CDARTS_detection/mmdet/models/registry.py create mode 100644 CDARTS_detection/mmdet/models/roi_extractors/__init__.py create mode 100644 CDARTS_detection/mmdet/models/roi_extractors/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/roi_extractors/__pycache__/single_level.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/roi_extractors/single_level.py create mode 100644 CDARTS_detection/mmdet/models/shared_heads/__init__.py create mode 100644 CDARTS_detection/mmdet/models/shared_heads/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/shared_heads/__pycache__/res_layer.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/shared_heads/res_layer.py create mode 100644 CDARTS_detection/mmdet/models/utils/__init__.py create mode 100644 CDARTS_detection/mmdet/models/utils/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/utils/__pycache__/conv_module.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/utils/__pycache__/conv_ws.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/utils/__pycache__/norm.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/utils/__pycache__/quant_conv.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/utils/__pycache__/scale.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/utils/__pycache__/weight_init.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/models/utils/conv_module.py create mode 100644 CDARTS_detection/mmdet/models/utils/conv_ws.py create mode 100644 CDARTS_detection/mmdet/models/utils/norm.py create mode 100644 CDARTS_detection/mmdet/models/utils/quant_conv.py create mode 100644 CDARTS_detection/mmdet/models/utils/scale.py create mode 100644 CDARTS_detection/mmdet/models/utils/weight_init.py create mode 100644 CDARTS_detection/mmdet/ops/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/functions/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/functions/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/functions/__pycache__/deform_conv.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/functions/__pycache__/deform_pool.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/functions/deform_conv.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/functions/deform_pool.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/modules/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/modules/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/modules/__pycache__/deform_conv.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/modules/__pycache__/deform_pool.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/dcn/modules/deform_conv.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/modules/deform_pool.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/setup.py create mode 100644 CDARTS_detection/mmdet/ops/dcn/src/deform_conv_cuda.cpp create mode 100644 CDARTS_detection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu create mode 100644 CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda.cpp create mode 100644 CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu create mode 100644 CDARTS_detection/mmdet/ops/gcb/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/gcb/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/gcb/__pycache__/context_block.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/gcb/context_block.py create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/functions/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/functions/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/functions/__pycache__/masked_conv.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/functions/masked_conv.py create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/modules/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/modules/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/modules/__pycache__/masked_conv.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/modules/masked_conv.py create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/setup.py create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/src/masked_conv2d_cuda.cpp create mode 100644 CDARTS_detection/mmdet/ops/masked_conv/src/masked_conv2d_kernel.cu create mode 100644 CDARTS_detection/mmdet/ops/nms/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/nms/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/nms/__pycache__/nms_wrapper.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/nms/nms_wrapper.py create mode 100644 CDARTS_detection/mmdet/ops/nms/setup.py create mode 100644 CDARTS_detection/mmdet/ops/nms/src/nms_cpu.cpp create mode 100644 CDARTS_detection/mmdet/ops/nms/src/nms_cuda.cpp create mode 100644 CDARTS_detection/mmdet/ops/nms/src/nms_kernel.cu create mode 100644 CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.cpp create mode 100644 CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.pyx create mode 100644 CDARTS_detection/mmdet/ops/roi_align/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/roi_align/__pycache__/roi_align.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/roi_align/functions/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/functions/roi_align.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/gradcheck.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/modules/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/modules/roi_align.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/roi_align.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/setup.py create mode 100644 CDARTS_detection/mmdet/ops/roi_align/src/roi_align_cuda.cpp create mode 100644 CDARTS_detection/mmdet/ops/roi_align/src/roi_align_kernel.cu create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/functions/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/functions/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/functions/__pycache__/roi_pool.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/functions/roi_pool.py create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/gradcheck.py create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/modules/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/modules/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/modules/__pycache__/roi_pool.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/modules/roi_pool.py create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/setup.py create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp create mode 100644 CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_kernel.cu create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/sigmoid_focal_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/__init__.py create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/sigmoid_focal_loss.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/setup.py create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss.cpp create mode 100644 CDARTS_detection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu create mode 100644 CDARTS_detection/mmdet/utils/__init__.py create mode 100644 CDARTS_detection/mmdet/utils/__pycache__/__init__.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/utils/__pycache__/collect_env.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/utils/__pycache__/flops_counter.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/utils/__pycache__/logger.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/utils/__pycache__/registry.cpython-36.pyc create mode 100644 CDARTS_detection/mmdet/utils/collect_env.py create mode 100644 CDARTS_detection/mmdet/utils/contextmanagers.py create mode 100644 CDARTS_detection/mmdet/utils/flops_counter.py create mode 100644 CDARTS_detection/mmdet/utils/logger.py create mode 100644 CDARTS_detection/mmdet/utils/profiling.py create mode 100644 CDARTS_detection/mmdet/utils/registry.py create mode 100644 CDARTS_detection/mmdet/utils/util_mixins.py create mode 100644 CDARTS_detection/mmdet/version.py create mode 100644 CDARTS_detection/scripts/train_hit_det.sh create mode 100644 CDARTS_detection/setup.py create mode 100644 CDARTS_detection/test.py create mode 100644 CDARTS_detection/tools/analyze_logs.py create mode 100644 CDARTS_detection/tools/coco_eval.py create mode 100644 CDARTS_detection/tools/convert_datasets/pascal_voc.py create mode 100644 CDARTS_detection/tools/detectron2pytorch.py create mode 100644 CDARTS_detection/tools/dist_test.sh create mode 100644 CDARTS_detection/tools/dist_train.sh create mode 100644 CDARTS_detection/tools/get_flops.py create mode 100644 CDARTS_detection/tools/publish_model.py create mode 100644 CDARTS_detection/tools/slurm_test.sh create mode 100644 CDARTS_detection/tools/slurm_train.sh create mode 100644 CDARTS_detection/tools/test.py create mode 100644 CDARTS_detection/tools/upgrade_model_version.py create mode 100644 CDARTS_detection/tools/voc_eval.py create mode 100644 CDARTS_detection/train.py create mode 100644 CDARTS_detection/train.sh create mode 100644 CDARTS_segmentation/LICENSE create mode 100644 CDARTS_segmentation/README.md create mode 100644 CDARTS_segmentation/configs/ade/cydas.yaml create mode 100644 CDARTS_segmentation/configs/cityscapes/cydas.yaml create mode 100644 CDARTS_segmentation/dataloaders/__init__.py create mode 100644 CDARTS_segmentation/dataloaders/custom_transforms.py create mode 100644 CDARTS_segmentation/dataloaders/dataloader_utils.py create mode 100644 CDARTS_segmentation/dataloaders/datasets/__init__.py create mode 100644 CDARTS_segmentation/dataloaders/datasets/cityscapes.py create mode 100644 CDARTS_segmentation/dataloaders/datasets/coco.py create mode 100644 CDARTS_segmentation/dataloaders/datasets/combine_dbs.py create mode 100644 CDARTS_segmentation/dataloaders/datasets/kd.py create mode 100644 CDARTS_segmentation/dataloaders/datasets/pascal.py create mode 100644 CDARTS_segmentation/dataloaders/datasets/sbd.py create mode 100644 CDARTS_segmentation/dataloaders/segdatasets/__init__.py create mode 100644 CDARTS_segmentation/dataloaders/segdatasets/base_dataset.py create mode 100644 CDARTS_segmentation/dataloaders/segdatasets/cityscapes.py create mode 100644 CDARTS_segmentation/dataloaders/segdatasets/cityscapes_panoptic.py create mode 100644 CDARTS_segmentation/dataloaders/segdatasets/coco_panoptic.py create mode 100644 CDARTS_segmentation/dataloaders/segdatasets/utils.py create mode 100644 CDARTS_segmentation/dataloaders/transforms/__init__.py create mode 100644 CDARTS_segmentation/dataloaders/transforms/build.py create mode 100644 CDARTS_segmentation/dataloaders/transforms/pre_augmentation_transforms.py create mode 100644 CDARTS_segmentation/dataloaders/transforms/target_transforms.py create mode 100644 CDARTS_segmentation/dataloaders/transforms/transforms.py create mode 100644 CDARTS_segmentation/install.sh create mode 100644 CDARTS_segmentation/segmentation/__init__.py create mode 100644 CDARTS_segmentation/segmentation/config/__init__.py create mode 100644 CDARTS_segmentation/segmentation/config/default.py create mode 100644 CDARTS_segmentation/segmentation/config/hrnet_config.py create mode 100644 CDARTS_segmentation/segmentation/data/__init__.py create mode 100644 CDARTS_segmentation/segmentation/data/build.py create mode 100644 CDARTS_segmentation/segmentation/data/datasets/__init__.py create mode 100644 CDARTS_segmentation/segmentation/data/datasets/base_dataset.py create mode 100644 CDARTS_segmentation/segmentation/data/datasets/cityscapes.py create mode 100644 CDARTS_segmentation/segmentation/data/datasets/cityscapes_panoptic.py create mode 100644 CDARTS_segmentation/segmentation/data/datasets/coco_panoptic.py create mode 100644 CDARTS_segmentation/segmentation/data/datasets/utils.py create mode 100644 CDARTS_segmentation/segmentation/data/samplers/__init__.py create mode 100644 CDARTS_segmentation/segmentation/data/samplers/distributed_sampler.py create mode 100644 CDARTS_segmentation/segmentation/data/transforms/__init__.py create mode 100644 CDARTS_segmentation/segmentation/data/transforms/build.py create mode 100644 CDARTS_segmentation/segmentation/data/transforms/pre_augmentation_transforms.py create mode 100644 CDARTS_segmentation/segmentation/data/transforms/target_transforms.py create mode 100644 CDARTS_segmentation/segmentation/data/transforms/transforms.py create mode 100644 CDARTS_segmentation/segmentation/evaluation/__init__.py create mode 100644 CDARTS_segmentation/segmentation/evaluation/coco_instance.py create mode 100644 CDARTS_segmentation/segmentation/evaluation/coco_panoptic.py create mode 100644 CDARTS_segmentation/segmentation/evaluation/instance.py create mode 100644 CDARTS_segmentation/segmentation/evaluation/panoptic.py create mode 100644 CDARTS_segmentation/segmentation/evaluation/semantic.py create mode 100644 CDARTS_segmentation/segmentation/model/__init__.py create mode 100644 CDARTS_segmentation/segmentation/model/backbone/__init__.py create mode 100644 CDARTS_segmentation/segmentation/model/backbone/hrnet.py create mode 100644 CDARTS_segmentation/segmentation/model/backbone/mnasnet.py create mode 100644 CDARTS_segmentation/segmentation/model/backbone/mobilenet.py create mode 100644 CDARTS_segmentation/segmentation/model/backbone/resnet.py create mode 100644 CDARTS_segmentation/segmentation/model/backbone/xception.py create mode 100644 CDARTS_segmentation/segmentation/model/build.py create mode 100644 CDARTS_segmentation/segmentation/model/decoder/__init__.py create mode 100644 CDARTS_segmentation/segmentation/model/decoder/aspp.py create mode 100644 CDARTS_segmentation/segmentation/model/decoder/conv_module.py create mode 100644 CDARTS_segmentation/segmentation/model/decoder/deeplabv3.py create mode 100644 CDARTS_segmentation/segmentation/model/decoder/deeplabv3plus.py create mode 100644 CDARTS_segmentation/segmentation/model/decoder/panoptic_deeplab.py create mode 100644 CDARTS_segmentation/segmentation/model/loss/__init__.py create mode 100644 CDARTS_segmentation/segmentation/model/loss/criterion.py create mode 100644 CDARTS_segmentation/segmentation/model/meta_arch/__init__.py create mode 100644 CDARTS_segmentation/segmentation/model/meta_arch/base.py create mode 100644 CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3.py create mode 100644 CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3plus.py create mode 100644 CDARTS_segmentation/segmentation/model/meta_arch/panoptic_deeplab.py create mode 100644 CDARTS_segmentation/segmentation/model/post_processing/__init__.py create mode 100644 CDARTS_segmentation/segmentation/model/post_processing/evaluation_format.py create mode 100644 CDARTS_segmentation/segmentation/model/post_processing/instance_post_processing.py create mode 100644 CDARTS_segmentation/segmentation/model/post_processing/semantic_post_processing.py create mode 100644 CDARTS_segmentation/segmentation/solver/__init__.py create mode 100644 CDARTS_segmentation/segmentation/solver/build.py create mode 100644 CDARTS_segmentation/segmentation/solver/lr_scheduler.py create mode 100644 CDARTS_segmentation/segmentation/solver/utils.py create mode 100644 CDARTS_segmentation/segmentation/utils/__init__.py create mode 100644 CDARTS_segmentation/segmentation/utils/comm.py create mode 100644 CDARTS_segmentation/segmentation/utils/debug.py create mode 100644 CDARTS_segmentation/segmentation/utils/env.py create mode 100644 CDARTS_segmentation/segmentation/utils/flow_vis.py create mode 100644 CDARTS_segmentation/segmentation/utils/logger.py create mode 100644 CDARTS_segmentation/segmentation/utils/save_annotation.py create mode 100644 CDARTS_segmentation/segmentation/utils/test_utils.py create mode 100644 CDARTS_segmentation/segmentation/utils/utils.py create mode 100644 CDARTS_segmentation/tools/__init__.py create mode 100644 CDARTS_segmentation/tools/datasets/BaseDataset.py create mode 100644 CDARTS_segmentation/tools/datasets/__init__.py create mode 100644 CDARTS_segmentation/tools/datasets/bdd/__init__.py create mode 100644 CDARTS_segmentation/tools/datasets/bdd/bdd.py create mode 100644 CDARTS_segmentation/tools/datasets/camvid/__init__.py create mode 100644 CDARTS_segmentation/tools/datasets/camvid/camvid.py create mode 100644 CDARTS_segmentation/tools/datasets/cityscapes/__init__.py create mode 100644 CDARTS_segmentation/tools/datasets/cityscapes/cityscapes.py create mode 100644 CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_test.txt create mode 100644 CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_train_fine.txt create mode 100644 CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_val_fine.txt create mode 100644 CDARTS_segmentation/tools/datasets/coco/__init__.py create mode 100644 CDARTS_segmentation/tools/datasets/coco/coco.py create mode 100644 CDARTS_segmentation/tools/engine/__init__.py create mode 100644 CDARTS_segmentation/tools/engine/evaluator.py create mode 100644 CDARTS_segmentation/tools/engine/logger.py create mode 100644 CDARTS_segmentation/tools/engine/tester.py create mode 100644 CDARTS_segmentation/tools/seg_opr/__init__.py create mode 100644 CDARTS_segmentation/tools/seg_opr/loss_opr.py create mode 100644 CDARTS_segmentation/tools/seg_opr/metric.py create mode 100644 CDARTS_segmentation/tools/utils/__init__.py create mode 100644 CDARTS_segmentation/tools/utils/cal_model.py create mode 100644 CDARTS_segmentation/tools/utils/darts_utils.py create mode 100644 CDARTS_segmentation/tools/utils/dist_utils.py create mode 100644 CDARTS_segmentation/tools/utils/genotypes.py create mode 100644 CDARTS_segmentation/tools/utils/img_utils.py create mode 100644 CDARTS_segmentation/tools/utils/init_func.py create mode 100644 CDARTS_segmentation/tools/utils/lr_scheduler.py create mode 100644 CDARTS_segmentation/tools/utils/metrics.py create mode 100644 CDARTS_segmentation/tools/utils/pyt_utils.py create mode 100644 CDARTS_segmentation/tools/utils/visualize.py create mode 100644 CDARTS_segmentation/tools/vis/panoptic_coco_categories.json create mode 100644 CDARTS_segmentation/tools/vis/vis_cityscapes.py create mode 100644 CDARTS_segmentation/tools/vis/vis_coco.py create mode 100644 CDARTS_segmentation/train/_init_paths.py create mode 100644 CDARTS_segmentation/train/att_sa.py create mode 100644 CDARTS_segmentation/train/builder.py create mode 100644 CDARTS_segmentation/train/cal_model.py create mode 100644 CDARTS_segmentation/train/config_test.py create mode 100644 CDARTS_segmentation/train/config_train.py create mode 100644 CDARTS_segmentation/train/configs/ADE20K/512.yaml create mode 100644 CDARTS_segmentation/train/configs/ADE20K/base.yaml create mode 100644 CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml create mode 100644 CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/base.yaml create mode 100644 CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml create mode 100644 CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml create mode 100644 CDARTS_segmentation/train/cydas.py create mode 100644 CDARTS_segmentation/train/dataloader.py create mode 100644 CDARTS_segmentation/train/eval.py create mode 100644 CDARTS_segmentation/train/genotypes.py create mode 100644 CDARTS_segmentation/train/latency_lookup_table.npy create mode 100644 CDARTS_segmentation/train/launch.py create mode 100644 CDARTS_segmentation/train/layers.py create mode 100644 CDARTS_segmentation/train/loss.py create mode 100644 CDARTS_segmentation/train/operations.py create mode 100644 CDARTS_segmentation/train/run_det2.sh create mode 100644 CDARTS_segmentation/train/seg_metrics.py create mode 100644 CDARTS_segmentation/train/seg_oprs.py create mode 100644 CDARTS_segmentation/train/slimmable_ops.py create mode 100644 CDARTS_segmentation/train/test.py create mode 100644 CDARTS_segmentation/train/test_seg.py create mode 100644 CDARTS_segmentation/train/train_ade20k_cydas.py create mode 100644 CDARTS_segmentation/train/train_cydas.py create mode 100644 CDARTS_segmentation/train/vis_arch.py create mode 100644 LICENSE create mode 100644 README.md create mode 100644 benchmark201/configs/config.py create mode 100644 benchmark201/core/augment_function.py create mode 100644 benchmark201/core/pretrain_function.py create mode 100644 benchmark201/core/search_function.py create mode 100644 benchmark201/datasets/cifar.py create mode 100644 benchmark201/datasets/data_utils.py create mode 100644 benchmark201/datasets/imagenet.py create mode 100644 benchmark201/models/augment_cells.py create mode 100644 benchmark201/models/aux_head.py create mode 100644 benchmark201/models/cdarts_controller.py create mode 100644 benchmark201/models/loss.py create mode 100644 benchmark201/models/model_augment.py create mode 100644 benchmark201/models/model_test.py create mode 100644 benchmark201/models/ops.py create mode 100644 benchmark201/models/search_cells.py create mode 100644 benchmark201/run_search_cifar_1gpu.sh create mode 100644 benchmark201/search.py create mode 100644 benchmark201/search/cifar10-search/cifar10-search.log create mode 100644 benchmark201/search/cifar10-search/tb/readme.md create mode 100644 benchmark201/search/imagenet-search/imagenet-search.log create mode 100644 benchmark201/search/imagenet-search/tb/readme.md create mode 100644 benchmark201/utils/genotypes.py create mode 100644 benchmark201/utils/get_info.py create mode 100644 benchmark201/utils/utils.py create mode 100644 benchmark201/utils/visualize.py create mode 100644 experiments/retrain/cifar10-retrain/cifar10-retrain.log create mode 100644 experiments/retrain/cifar10-retrain/tb/readme.md create mode 100644 experiments/retrain/imagenet-retrain/imagenet-retrain.log create mode 100644 experiments/retrain/imagenet-retrain/tb/readme.md create mode 100644 experiments/search/cifar10-search/cifar10-search.log create mode 100644 experiments/search/cifar10-search/tb/readme.md create mode 100644 experiments/search/imagenet-search/imagenet-search.log create mode 100644 experiments/search/imagenet-search/tb/readme.md create mode 100644 lib/config.py create mode 100644 lib/core/augment_function.py create mode 100644 lib/core/pretrain_function.py create mode 100644 lib/core/search_function.py create mode 100644 lib/datasets/cifar.py create mode 100644 lib/datasets/data_utils.py create mode 100644 lib/datasets/imagenet.py create mode 100644 lib/models/augment_cells.py create mode 100644 lib/models/aux_head.py create mode 100644 lib/models/cdarts_controller.py create mode 100644 lib/models/loss.py create mode 100644 lib/models/model_augment.py create mode 100644 lib/models/model_test.py create mode 100644 lib/models/ops.py create mode 100644 lib/models/search_cells.py create mode 100644 lib/utils/count_flops.py create mode 100644 lib/utils/genotypes.py create mode 100644 lib/utils/utils.py create mode 100644 lib/utils/visualize.py create mode 100644 requirements diff --git a/CDARTS/_init_paths.py b/CDARTS/_init_paths.py new file mode 100644 index 0000000..abeae77 --- /dev/null +++ b/CDARTS/_init_paths.py @@ -0,0 +1,19 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path as osp +import sys + + +def add_path(path): + if path not in sys.path: + sys.path.insert(0, path) + + +this_dir = osp.dirname(__file__) +lib_path = osp.join(this_dir, '..', 'lib') +add_path(lib_path) + +lib_path = osp.join(this_dir, '..') +add_path(lib_path) diff --git a/CDARTS/cells/cifar_genotype.json b/CDARTS/cells/cifar_genotype.json new file mode 100644 index 0000000..ff406c7 --- /dev/null +++ b/CDARTS/cells/cifar_genotype.json @@ -0,0 +1 @@ +{"0": "Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('skip_connect', 0), ('skip_connect', 2)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('dil_conv_5x5', 2), ('skip_connect', 0)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 0), ('dil_conv_5x5', 2)]], reduce_concat=range(2, 6))", "1": "Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('skip_connect', 0), ('skip_connect', 2)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('dil_conv_5x5', 2), ('skip_connect', 0)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 0), ('dil_conv_5x5', 2)]], reduce_concat=range(2, 6))", "2": "Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('skip_connect', 0), ('skip_connect', 2)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('dil_conv_5x5', 2), ('skip_connect', 0)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 0), ('dil_conv_5x5', 2)]], reduce_concat=range(2, 6))"} \ No newline at end of file diff --git a/CDARTS/cells/dartsv1_genotype.json b/CDARTS/cells/dartsv1_genotype.json new file mode 100644 index 0000000..9b1a020 --- /dev/null +++ b/CDARTS/cells/dartsv1_genotype.json @@ -0,0 +1 @@ +{"0": "Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_3x3', 0)], [('skip_connect', 0), ('sep_conv_3x3', 1)], [('skip_connect', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 0), ('skip_connect', 2)]], normal_concat=[2, 3, 4, 5], reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], [('skip_connect', 2), ('max_pool_3x3', 0)], [('max_pool_3x3', 0), ('skip_connect', 2)], [('skip_connect', 2), ('avg_pool_3x3', 0)]], reduce_concat=[2, 3, 4, 5])", "1": "Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_3x3', 0)], [('skip_connect', 0), ('sep_conv_3x3', 1)], [('skip_connect', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 0), ('skip_connect', 2)]], normal_concat=[2, 3, 4, 5], reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], [('skip_connect', 2), ('max_pool_3x3', 0)], [('max_pool_3x3', 0), ('skip_connect', 2)], [('skip_connect', 2), ('avg_pool_3x3', 0)]], reduce_concat=[2, 3, 4, 5])", "2": "Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_3x3', 0)], [('skip_connect', 0), ('sep_conv_3x3', 1)], [('skip_connect', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 0), ('skip_connect', 2)]], normal_concat=[2, 3, 4, 5], reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], [('skip_connect', 2), ('max_pool_3x3', 0)], [('max_pool_3x3', 0), ('skip_connect', 2)], [('skip_connect', 2), ('avg_pool_3x3', 0)]], reduce_concat=[2, 3, 4, 5])"} \ No newline at end of file diff --git a/CDARTS/cells/dartsv2_genotype.json b/CDARTS/cells/dartsv2_genotype.json new file mode 100644 index 0000000..a51a1f3 --- /dev/null +++ b/CDARTS/cells/dartsv2_genotype.json @@ -0,0 +1 @@ +{"0": "Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('skip_connect', 0)], [('skip_connect', 0), ('dil_conv_3x3', 2)]], normal_concat=[2, 3, 4, 5], reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], [('skip_connect', 2), ('max_pool_3x3', 1)], [('max_pool_3x3', 0), ('skip_connect', 2)], [('skip_connect', 2), ('max_pool_3x3', 1)]], reduce_concat=[2, 3, 4, 5])", "1": "Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('skip_connect', 0)], [('skip_connect', 0), ('dil_conv_3x3', 2)]], normal_concat=[2, 3, 4, 5], reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], [('skip_connect', 2), ('max_pool_3x3', 1)], [('max_pool_3x3', 0), ('skip_connect', 2)], [('skip_connect', 2), ('max_pool_3x3', 1)]], reduce_concat=[2, 3, 4, 5])", "2": "Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('skip_connect', 0)], [('skip_connect', 0), ('dil_conv_3x3', 2)]], normal_concat=[2, 3, 4, 5], reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], [('skip_connect', 2), ('max_pool_3x3', 1)], [('max_pool_3x3', 0), ('skip_connect', 2)], [('skip_connect', 2), ('max_pool_3x3', 1)]], reduce_concat=[2, 3, 4, 5])"} \ No newline at end of file diff --git a/CDARTS/cells/imagenet_genotype.json b/CDARTS/cells/imagenet_genotype.json new file mode 100644 index 0000000..f952122 --- /dev/null +++ b/CDARTS/cells/imagenet_genotype.json @@ -0,0 +1 @@ +{"0": "Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 2)], [('sep_conv_5x5', 2), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('avg_pool_3x3', 0)], [('max_pool_3x3', 1), ('dil_conv_5x5', 2)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 2)], [('sep_conv_3x3', 3), ('dil_conv_3x3', 4)]], reduce_concat=range(2, 6))", "1": "Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 2)], [('sep_conv_5x5', 2), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('avg_pool_3x3', 0)], [('max_pool_3x3', 1), ('dil_conv_5x5', 2)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 2)], [('sep_conv_3x3', 3), ('dil_conv_3x3', 4)]], reduce_concat=range(2, 6))", "2": "Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 2)], [('sep_conv_5x5', 2), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('avg_pool_3x3', 0)], [('max_pool_3x3', 1), ('dil_conv_5x5', 2)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 2)], [('sep_conv_3x3', 3), ('dil_conv_3x3', 4)]], reduce_concat=range(2, 6))"} \ No newline at end of file diff --git a/CDARTS/cells/pcdarts_cifar_genotype.json b/CDARTS/cells/pcdarts_cifar_genotype.json new file mode 100644 index 0000000..e9cf741 --- /dev/null +++ b/CDARTS/cells/pcdarts_cifar_genotype.json @@ -0,0 +1 @@ +{"0": "Genotype(normal=[[('sep_conv_3x3', 1), ('skip_connect', 0)], [('sep_conv_3x3', 0), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 0), ('sep_conv_3x3', 1)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 1)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 2)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 3)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)]], reduce_concat=range(2, 6))", "1": "Genotype(normal=[[('sep_conv_3x3', 1), ('skip_connect', 0)], [('sep_conv_3x3', 0), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 0), ('sep_conv_3x3', 1)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 1)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 2)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 3)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)]], reduce_concat=range(2, 6))", "2": "Genotype(normal=[[('sep_conv_3x3', 1), ('skip_connect', 0)], [('sep_conv_3x3', 0), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 0), ('sep_conv_3x3', 1)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 1)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 2)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 3)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)]], reduce_concat=range(2, 6))"} \ No newline at end of file diff --git a/CDARTS/cells/pcdarts_imagenet_genotype.json b/CDARTS/cells/pcdarts_imagenet_genotype.json new file mode 100644 index 0000000..1d51b5e --- /dev/null +++ b/CDARTS/cells/pcdarts_imagenet_genotype.json @@ -0,0 +1 @@ +{"0": "Genotype(normal=[[('skip_connect', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('skip_connect', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 3)], [('sep_conv_3x3', 1), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_3x3', 0), ('skip_connect', 1)], [('dil_conv_5x5', 2), ('max_pool_3x3', 1)], [('sep_conv_3x3', 2), ('sep_conv_3x3', 1)], [('sep_conv_5x5', 0), ('sep_conv_3x3', 3)]], reduce_concat=range(2, 6))", "1": "Genotype(normal=[[('skip_connect', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('skip_connect', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 3)], [('sep_conv_3x3', 1), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_3x3', 0), ('skip_connect', 1)], [('dil_conv_5x5', 2), ('max_pool_3x3', 1)], [('sep_conv_3x3', 2), ('sep_conv_3x3', 1)], [('sep_conv_5x5', 0), ('sep_conv_3x3', 3)]], reduce_concat=range(2, 6))", "2": "Genotype(normal=[[('skip_connect', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('skip_connect', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 3)], [('sep_conv_3x3', 1), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_3x3', 0), ('skip_connect', 1)], [('dil_conv_5x5', 2), ('max_pool_3x3', 1)], [('sep_conv_3x3', 2), ('sep_conv_3x3', 1)], [('sep_conv_5x5', 0), ('sep_conv_3x3', 3)]], reduce_concat=range(2, 6))"} \ No newline at end of file diff --git a/CDARTS/cells/pdarts_genotype.json b/CDARTS/cells/pdarts_genotype.json new file mode 100644 index 0000000..763a847 --- /dev/null +++ b/CDARTS/cells/pdarts_genotype.json @@ -0,0 +1 @@ +{"0": "Genotype(normal=[[('skip_connect', 0), ('dil_conv_3x3', 1)], [('skip_connect', 0),('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 3)], [('sep_conv_3x3',0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 0), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 0), ('dil_conv_5x5', 2)], [('max_pool_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 1), ('dil_conv_5x5', 3)]], reduce_concat=range(2, 6))", "1": "Genotype(normal=[[('skip_connect', 0), ('dil_conv_3x3', 1)], [('skip_connect', 0),('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 3)], [('sep_conv_3x3',0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 0), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 0), ('dil_conv_5x5', 2)], [('max_pool_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 1), ('dil_conv_5x5', 3)]], reduce_concat=range(2, 6))", "2": "Genotype(normal=[[('skip_connect', 0), ('dil_conv_3x3', 1)], [('skip_connect', 0),('sep_conv_3x3', 1)], [('sep_conv_3x3', 1), ('sep_conv_3x3', 3)], [('sep_conv_3x3',0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 0), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 0), ('dil_conv_5x5', 2)], [('max_pool_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 1), ('dil_conv_5x5', 3)]], reduce_concat=range(2, 6))"} \ No newline at end of file diff --git a/CDARTS/retrain.py b/CDARTS/retrain.py new file mode 100644 index 0000000..749acac --- /dev/null +++ b/CDARTS/retrain.py @@ -0,0 +1,206 @@ +""" Retrain cell """ +import _init_paths +import os +import torch +import json +import torch.nn as nn +import numpy as np +import lib.utils.genotypes as gt + +from tensorboardX import SummaryWriter +from lib.models.cdarts_controller import CDARTSController +from lib.utils import utils +from lib.config import AugmentConfig +from lib.core.augment_function import train, validate + +# config +config = AugmentConfig() + +# make apex optional +if config.distributed: + # DDP = torch.nn.parallel.DistributedDataParallel + try: + import apex + from apex.parallel import DistributedDataParallel as DDP + from apex import amp, optimizers + from apex.fp16_utils import * + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.") + + +# tensorboard +writer = SummaryWriter(log_dir=os.path.join(config.path, "tb")) +writer.add_text('config', config.as_markdown(), 0) + +logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name))) +if config.local_rank == 0: + config.print_params(logger.info) + +if 'cifar' in config.dataset: + from lib.datasets.cifar import get_augment_datasets +elif 'imagenet' in config.dataset: + from lib.datasets.imagenet import get_augment_datasets +else: + raise Exception("Not support dataset!") + +def main(): + logger.info("Logger is set - training start") + + # set seed + np.random.seed(config.seed) + torch.manual_seed(config.seed) + torch.cuda.manual_seed_all(config.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = True + + if config.distributed: + config.gpu = config.local_rank % torch.cuda.device_count() + torch.cuda.set_device(config.gpu) + # distributed init + torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url, + world_size=config.world_size, rank=config.local_rank) + + config.world_size = torch.distributed.get_world_size() + + config.total_batch_size = config.world_size * config.batch_size + else: + config.total_batch_size = config.batch_size + + + loaders, samplers = get_augment_datasets(config) + train_loader, valid_loader = loaders + train_sampler, valid_sampler = samplers + + net_crit = nn.CrossEntropyLoss().cuda() + controller = CDARTSController(config, net_crit, n_nodes=4, stem_multiplier=config.stem_multiplier) + + file = open(config.cell_file, 'r') + js = file.read() + r_dict = json.loads(js) + if config.local_rank == 0: + logger.info(r_dict) + file.close() + genotypes_dict = {} + for layer_idx, genotype in r_dict.items(): + genotypes_dict[int(layer_idx)] = gt.from_str(genotype) + + controller.build_augment_model(controller.init_channel, genotypes_dict) + resume_state = None + if config.resume: + resume_state = torch.load(config.resume_path, map_location='cpu') + controller.model_main.load_state_dict(resume_state['model_main']) + + controller.model_main = controller.model_main.cuda() + param_size = utils.param_size(controller.model_main) + logger.info("param size = %fMB", param_size) + + # change training hyper parameters according to cell type + if 'cifar' in config.dataset: + if param_size < 3.0: + config.weight_decay = 3e-4 + config.drop_path_prob = 0.2 + elif param_size > 3.0 and param_size < 3.5: + config.weight_decay = 3e-4 + config.drop_path_prob = 0.3 + else: + config.weight_decay = 5e-4 + config.drop_path_prob = 0.3 + + if config.local_rank == 0: + logger.info("Current weight decay: {}".format(config.weight_decay)) + logger.info("Current drop path prob: {}".format(config.drop_path_prob)) + + controller.model_main = apex.parallel.convert_syncbn_model(controller.model_main) + # weights optimizer + optimizer = torch.optim.SGD(controller.model_main.parameters(), lr=config.lr, momentum=config.momentum, weight_decay=config.weight_decay) + # optimizer = torch.optim.SGD(controller.model_main.parameters(), lr=config.lr, momentum=config.momentum, weight_decay=config.weight_decay, nesterov=True) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs) + + if config.use_amp: + controller.model_main, optimizer = amp.initialize(controller.model_main, optimizer, opt_level=config.opt_level) + + if config.distributed: + controller.model_main = DDP(controller.model_main, delay_allreduce=True) + + best_top1 = 0. + best_top5 = 0. + sta_epoch = 0 + # training loop + if config.resume: + optimizer.load_state_dict(resume_state['optimizer']) + lr_scheduler.load_state_dict(resume_state['lr_scheduler']) + best_top1 = resume_state['best_top1'] + best_top5 = resume_state['best_top5'] + sta_epoch = resume_state['sta_epoch'] + + epoch_pool = [220, 230, 235, 240, 245] + for epoch in range(sta_epoch, config.epochs): + # reset iterators + train_sampler.set_epoch(epoch) + valid_sampler.set_epoch(epoch) + current_lr = lr_scheduler.get_lr()[0] + # current_lr = utils.adjust_lr(optimizer, epoch, config) + + if config.local_rank == 0: + logger.info('Epoch: %d lr %e', epoch, current_lr) + if epoch < config.warmup_epochs and config.total_batch_size > 256: + for param_group in optimizer.param_groups: + param_group['lr'] = current_lr * (epoch + 1) / 5.0 + if config.local_rank == 0: + logger.info('Warming-up Epoch: %d, LR: %e', epoch, current_lr * (epoch + 1) / 5.0) + + drop_prob = config.drop_path_prob * epoch / config.epochs + controller.model_main.module.drop_path_prob(drop_prob) + + # training + train(train_loader, controller.model_main, optimizer, epoch, writer, logger, config) + + # validation + cur_step = (epoch+1) * len(train_loader) + top1, top5 = validate(valid_loader, controller.model_main, epoch, cur_step, writer, logger, config) + + if 'cifar' in config.dataset: + lr_scheduler.step() + elif 'imagenet' in config.dataset: + lr_scheduler.step() + # current_lr = utils.adjust_lr(optimizer, epoch, config) + else: + raise Exception('Lr error!') + + # save + if best_top1 < top1: + best_top1 = top1 + best_top5 = top5 + is_best = True + else: + is_best = False + + # save + if config.local_rank == 0: + if ('imagenet' in config.dataset) and ((epoch+1) in epoch_pool) and (not config.resume) and (config.local_rank == 0): + torch.save({ + "model_main":controller.model_main.module.state_dict(), + "optimizer":optimizer.state_dict(), + "lr_scheduler":lr_scheduler.state_dict(), + "best_top1":best_top1, + "best_top5":best_top5, + "sta_epoch":epoch + 1 + }, os.path.join(config.path, "epoch_{}.pth.tar".format(epoch+1))) + utils.save_checkpoint(controller.model_main.module.state_dict(), config.path, is_best) + + torch.save({ + "model_main":controller.model_main.module.state_dict(), + "optimizer":optimizer.state_dict(), + "lr_scheduler":lr_scheduler.state_dict(), + "best_top1":best_top1, + "best_top5":best_top5, + "sta_epoch":epoch + 1 + }, os.path.join(config.path, "retrain_resume.pth.tar")) + utils.save_checkpoint(controller.model_main.module.state_dict(), config.path, is_best) + + if config.local_rank == 0: + logger.info("Final best Prec@1 = {:.4%}, Prec@5 = {:.4%}".format(best_top1, best_top5)) + + +if __name__ == "__main__": + main() diff --git a/CDARTS/scripts/run_retrain_cifar_1gpu.sh b/CDARTS/scripts/run_retrain_cifar_1gpu.sh new file mode 100644 index 0000000..4641a13 --- /dev/null +++ b/CDARTS/scripts/run_retrain_cifar_1gpu.sh @@ -0,0 +1,14 @@ +NGPUS=1 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/retrain.py \ + --name cifar10-retrain --dataset cifar10 --model_type cifar \ + --n_classes 10 --init_channels 36 --stem_multiplier 3 \ + --cell_file './genotypes.json' \ + --batch_size 128 --workers 1 --print_freq 100 \ + --world_size $NGPUS --weight_decay 5e-4 \ + --distributed --dist_url 'tcp://127.0.0.1:26443' \ + --lr 0.025 --warmup_epochs 0 --epochs 600 \ + --cutout_length 16 --aux_weight 0.4 --drop_path_prob 0.3 \ + --label_smooth 0.0 --mixup_alpha 0 \ No newline at end of file diff --git a/CDARTS/scripts/run_retrain_cifar_4gpus.sh b/CDARTS/scripts/run_retrain_cifar_4gpus.sh new file mode 100644 index 0000000..ec00a09 --- /dev/null +++ b/CDARTS/scripts/run_retrain_cifar_4gpus.sh @@ -0,0 +1,15 @@ +NGPUS=4 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/retrain.py \ + --name cifar10-retrain --dataset cifar10 --model_type cifar \ + --n_classes 10 --init_channels 36 --stem_multiplier 3 \ + --cell_file './genotypes.json' \ + --batch_size 128 --workers 1 --print_freq 100 \ + --world_size $NGPUS --weight_decay 5e-4 \ + --distributed --dist_url 'tcp://127.0.0.1:26443' \ + --lr 0.1 --warmup_epochs 0 --epochs 600 \ + --cutout_length 16 --aux_weight 0.4 --drop_path_prob 0.3 \ + --label_smooth 0.0 --mixup_alpha 0 + diff --git a/CDARTS/scripts/run_retrain_imagenet.sh b/CDARTS/scripts/run_retrain_imagenet.sh new file mode 100644 index 0000000..2bb0844 --- /dev/null +++ b/CDARTS/scripts/run_retrain_imagenet.sh @@ -0,0 +1,15 @@ +NGPUS=8 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/retrain.py \ + --name imagenet-retrain --dataset imagenet --model_type imagenet \ + --n_classes 1000 --init_channels 48 --stem_multiplier 1 \ + --batch_size 128 --workers 4 --print_freq 100 \ + --cell_file './genotypes.json' \ + --world_size $NGPUS --weight_decay 3e-5 \ + --distributed --dist_url 'tcp://127.0.0.1:24443' \ + --lr 0.5 --warmup_epochs 5 --epochs 250 \ + --cutout_length 0 --aux_weight 0.4 --drop_path_prob 0.0 \ + --label_smooth 0.1 --mixup_alpha 0 \ + --resume_name "retrain_resume.pth.tar" \ No newline at end of file diff --git a/CDARTS/scripts/run_search_cifar_1gpu.sh b/CDARTS/scripts/run_search_cifar_1gpu.sh new file mode 100644 index 0000000..2db2bb7 --- /dev/null +++ b/CDARTS/scripts/run_search_cifar_1gpu.sh @@ -0,0 +1,20 @@ +NGPUS=1 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/search.py \ + --name cifar10-search --dataset cifar10 --model_type cifar \ + --n_classes 10 --init_channels 16 --layer_num 3 --stem_multiplier 3 \ + --batch_size 64 --sample_ratio 1.0 \ + --workers 1 --print_freq 10 \ + --distributed --world_size $NGPUS --dist_url 'tcp://127.0.0.1:23343' \ + --use_apex --sync_param \ + --regular --regular_ratio 0.667 --regular_coeff 5 \ + --clean_arch --loss_alpha 1 \ + --ensemble_param \ + --w_lr 0.08 --alpha_lr 3e-4 --nasnet_lr 0.08 \ + --w_weight_decay 3e-4 --alpha_weight_decay 0. \ + --one_stage --repeat_cell --fix_head \ + --interactive_type 3 \ + --pretrain_epochs 2 --pretrain_decay 0 \ + --search_iter 32 --search_iter_epochs 1 --nasnet_warmup 1 \ No newline at end of file diff --git a/CDARTS/scripts/run_search_cifar_4gpus.sh b/CDARTS/scripts/run_search_cifar_4gpus.sh new file mode 100644 index 0000000..2a617e8 --- /dev/null +++ b/CDARTS/scripts/run_search_cifar_4gpus.sh @@ -0,0 +1,20 @@ +NGPUS=4 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/search.py \ + --name cifar10-search --dataset cifar10 --model_type cifar \ + --n_classes 10 --init_channels 16 --layer_num 3 --stem_multiplier 3 \ + --batch_size 64 --sample_ratio 1.0 \ + --workers 1 --print_freq 10 \ + --distributed --world_size $NGPUS --dist_url 'tcp://127.0.0.1:23343' \ + --use_apex --sync_param \ + --regular --regular_ratio 0.667 --regular_coeff 5 \ + --clean_arch --loss_alpha 1 \ + --ensemble_param \ + --w_lr 0.2 --alpha_lr 3e-4 --nasnet_lr 0.2 \ + --w_weight_decay 3e-4 --alpha_weight_decay 0. \ + --one_stage --repeat_cell --fix_head \ + --interactive_type 3 \ + --pretrain_epochs 2 --pretrain_decay 0 \ + --search_iter 32 --search_iter_epochs 1 --nasnet_warmup 1 \ No newline at end of file diff --git a/CDARTS/scripts/run_search_imagenet.sh b/CDARTS/scripts/run_search_imagenet.sh new file mode 100644 index 0000000..8be6066 --- /dev/null +++ b/CDARTS/scripts/run_search_imagenet.sh @@ -0,0 +1,20 @@ +NGPUS=8 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/search.py \ + --name imagenet-search --dataset imagenet --model_type imagenet \ + --n_classes 1000 --init_channels 16 --stem_multiplier 1 \ + --distributed --world_size $NGPUS --dist_url 'tcp://127.0.0.1:23343' \ + --batch_size 128 --sample_ratio 0.2 \ + --workers 4 --print_freq 10 \ + --use_apex --sync_param \ + --regular --regular_ratio 0.5 --regular_coeff 5 \ + --clean_arch --loss_alpha 1 \ + --ensemble_param \ + --w_lr 0.8 --alpha_lr 3e-4 --nasnet_lr 0.8 \ + --w_weight_decay 1e-5 --alpha_weight_decay 1e-5 \ + --one_stage --repeat_cell \ + --interactive_type 0 \ + --pretrain_epochs 10 --pretrain_decay 3 \ + --search_iter 40 --search_iter_epochs 1 --nasnet_warmup 3 \ No newline at end of file diff --git a/CDARTS/scripts/run_test_cifar.sh b/CDARTS/scripts/run_test_cifar.sh new file mode 100644 index 0000000..ce0c94f --- /dev/null +++ b/CDARTS/scripts/run_test_cifar.sh @@ -0,0 +1,11 @@ +NGPUS=4 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/test.py \ + --name cifar10-retrain --dataset cifar10 --model_type cifar \ + --n_classes 10 --init_channels 36 --stem_multiplier 3 \ + --batch_size 128 --workers 1 --print_freq 100 \ + --cell_file 'cells/cifar_genotype.json' \ + --world_size $NGPUS --distributed --dist_url 'tcp://127.0.0.1:24443' \ + --resume --resume_name 'c10.pth.tar' diff --git a/CDARTS/scripts/run_test_imagenet.sh b/CDARTS/scripts/run_test_imagenet.sh new file mode 100644 index 0000000..adfc597 --- /dev/null +++ b/CDARTS/scripts/run_test_imagenet.sh @@ -0,0 +1,11 @@ +NGPUS=4 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS ./CDARTS/test.py \ + --name imagenet-retrain --dataset imagenet --model_type imagenet \ + --n_classes 1000 --init_channels 48 --stem_multiplier 1 \ + --batch_size 128 --workers 4 --print_freq 10 \ + --cell_file 'cells/imagenet_genotype.json' \ + --world_size $NGPUS --distributed --dist_url 'tcp://127.0.0.1:24443' \ + --resume --resume_name 'imagenet.pth.tar' diff --git a/CDARTS/search.py b/CDARTS/search.py new file mode 100644 index 0000000..02575a8 --- /dev/null +++ b/CDARTS/search.py @@ -0,0 +1,403 @@ +""" Search cell """ +import _init_paths +import os +import copy +import json +import torch +import time +import math +import torch.nn as nn +import numpy as np + +from tensorboardX import SummaryWriter +from lib.models.cdarts_controller import CDARTSController +from lib.utils.visualize import plot +from lib.utils import utils +from lib.core.search_function import search, retrain_warmup + +from lib.config import SearchConfig +config = SearchConfig() + +if 'cifar' in config.dataset: + from lib.datasets.cifar import get_search_datasets +elif 'imagenet' in config.dataset: + from lib.datasets.imagenet import get_search_datasets + +# tensorboard +writer = SummaryWriter(log_dir=os.path.join(config.path, "tb")) +writer.add_text('config', config.as_markdown(), 0) + +logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name))) +if config.local_rank == 0: + config.print_params(logger.info) + +try: + os.makedirs(config.retrain_path) +except: + pass + +if config.use_apex: + import apex + from apex.parallel import DistributedDataParallel as DDP +else: + DDP = torch.nn.parallel.DistributedDataParallel + + +def main(): + logger.info("Logger is set - training start") + + + # set seed + np.random.seed(config.seed) + torch.manual_seed(config.seed) + torch.cuda.manual_seed_all(config.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = True + + if config.distributed: + config.gpu = config.local_rank % torch.cuda.device_count() + torch.cuda.set_device(config.gpu) + # distributed init + torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url, + world_size=config.world_size, rank=config.local_rank) + + config.world_size = torch.distributed.get_world_size() + + config.total_batch_size = config.world_size * config.batch_size + else: + config.total_batch_size = config.batch_size + + + loaders, samplers = get_search_datasets(config) + train_loader, valid_loader = loaders + train_sampler, valid_sampler = samplers + + net_crit = nn.CrossEntropyLoss().cuda() + controller = CDARTSController(config, net_crit, n_nodes=4, stem_multiplier=config.stem_multiplier) + if config.param_pool_path is not None: + param_pool = torch.load(config.param_pool_path, map_location='cpu') + controller.load_state_dict(param_pool, strict=False) + + resume_state = None + if config.resume: + resume_state = torch.load(config.resume_path, map_location='cpu') + + sta_layer_idx = 0 + if config.resume: + controller.load_state_dict(resume_state['controller']) + sta_layer_idx = resume_state['sta_layer_idx'] + + controller = controller.cuda() + if config.sync_bn: + if config.use_apex: + controller = apex.parallel.convert_syncbn_model(controller) + else: + controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller) + + if config.use_apex: + controller = DDP(controller, delay_allreduce=True) + else: + controller = DDP(controller, device_ids=[config.gpu]) + + # warm up model_search + layer_idx=0 + if config.ensemble_param: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr}, + {"params": controller.module.super_layers[layer_idx+1:].parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.ensemble_param}, + {"params": controller.module.nas_layers[:layer_idx].parameters()}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + else: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr}, + {"params": controller.module.super_layers[layer_idx+1:].parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.nas_layers[:layer_idx].parameters()}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + + + + for layer_idx in range(sta_layer_idx, config.layer_num): + if config.one_stage: + if layer_idx > 0: + break + + # clean arch params in model_search + if config.clean_arch: + controller.module.init_arch_params(layer_idx) + + # search training loop + best_top1 = 0. + best_genotypes = [] + best_connects = [] + sta_search_iter, sta_search_epoch = 0, 0 + is_best = True + if (layer_idx == sta_layer_idx) and (resume_state is not None): + sta_search_iter = resume_state['sta_search_iter'] + sta_search_epoch = resume_state['sta_search_epoch'] + best_top1 = resume_state['best_top1'] + best_genotypes = resume_state['best_genotypes'] + best_connects = resume_state['best_connects'] + else: + # init model main + if config.gumbel_sample: + genotype, connect = controller.module.generate_genotype_gumbel(0) + else: + genotype, connect = controller.module.generate_genotype(0) + for i in range(config.layer_num): + best_genotypes.append(genotype) + best_connects.append(connect) + + for i in range(config.layer_num): + controller.module.genotypes[i] = best_genotypes[i] + controller.module.connects[i] = best_connects[i] + + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min) + lr_scheduler_retrain = nn.ModuleList() + alpha_optim = nn.ModuleList() + optimizer = nn.ModuleList() + sub_epoch = 0 + + for search_iter in range(sta_search_iter, config.search_iter): + if search_iter < config.pretrain_epochs: + if config.local_rank == 0: + logger.info("####### Super model warmup #######") + train_sampler.set_epoch(search_iter) + retrain_warmup(train_loader, controller, w_optim, layer_idx, search_iter, writer, logger, True, config.pretrain_epochs, config) + #lr_scheduler.step() + else: + # build new controller + for i, genotype in enumerate(best_genotypes): + controller.module.build_nas_layers(i, genotype, config.same_structure) + + controller_b = copy.deepcopy(controller.module) + del controller + controller = controller_b.cuda() + controller.fix_pre_layers(layer_idx) + + #if search_iter > config.regular_ratio * config.search_iter: + # config.regular = False + + # sync params from super layer pool + for i in range(layer_idx, config.layer_num): + controller.copy_params_from_super_layer(i) + + if config.sync_bn: + if config.use_apex: + controller = apex.parallel.convert_syncbn_model(controller) + else: + controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller) + + if config.use_apex: + controller = DDP(controller, delay_allreduce=True) + else: + controller = DDP(controller, device_ids=[config.gpu]) + + # weights optimizer + if config.ensemble_param: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr}, + {"params": controller.module.super_layers[layer_idx+1:].parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.ensemble_param}, + {"params": controller.module.nas_layers[:layer_idx].parameters()}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + else: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr}, + {"params": controller.module.super_layers[layer_idx+1:].parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.nas_layers[:layer_idx].parameters()}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + # arch_params optimizer + if config.repeat_cell: + alpha_optim = torch.optim.Adam(controller.module.super_layers_arch[0].parameters(), config.alpha_lr, betas=(0.5, 0.999), + weight_decay=config.alpha_weight_decay) + else: + alpha_optim = torch.optim.Adam(controller.module.super_layers_arch[layer_idx:].parameters(), config.alpha_lr, betas=(0.5, 0.999), + weight_decay=config.alpha_weight_decay) + + if config.ensemble_param: + optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.nas_layers.parameters(), 'lr':config.nasnet_lr*0.1 if config.param_pool_path else config.nasnet_lr}, + {"params": controller.module.ensemble_param}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.fc_nas.parameters()}], + lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + else: + optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.nas_layers.parameters(), 'lr':config.nasnet_lr*0.1 if config.param_pool_path else config.nasnet_lr}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.fc_nas.parameters()}], + lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + + lr_scheduler_retrain = torch.optim.lr_scheduler.CosineAnnealingLR( + optimizer, config.search_iter_epochs, eta_min=config.w_lr_min) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min) + + if (layer_idx == sta_layer_idx) and (resume_state is not None) and (resume_state['sta_search_epoch'] > config.pretrain_epochs): + w_optim.load_state_dict(resume_state['w_optim']) + alpha_optim.load_state_dict(resume_state['alpha_optim']) + lr_scheduler.load_state_dict(resume_state['lr_scheduler']) + lr_scheduler_retrain.load_state_dict(resume_state['lr_scheduler_retrain']) + else: + # lr_scheduler + pass + #for i in range(search_iter * config.search_iter_epochs): + # lr_scheduler.step() + + # warmup model main + if config.local_rank == 0: + logger.info("####### Sub model warmup #######") + for warmup_epoch in range(config.nasnet_warmup): + valid_sampler.set_epoch(warmup_epoch) + retrain_warmup(valid_loader, controller, optimizer, layer_idx, warmup_epoch, writer, logger, False, config.nasnet_warmup, config) + + + best_top1 = 0. + sub_epoch = 0 + + for sub_epoch in range(sta_search_epoch, config.search_iter_epochs): + + lr_search = lr_scheduler.get_lr()[0] + lr_main = lr_scheduler_retrain.get_lr()[0] + + search_epoch = search_iter * config.search_iter_epochs + sub_epoch + + # reset iterators + train_sampler.set_epoch(search_epoch) + valid_sampler.set_epoch(search_epoch) + + # training + search(train_loader, valid_loader, controller, optimizer, w_optim, alpha_optim, layer_idx, search_epoch, writer, logger, config) + + # validation + step_num = len(valid_loader) + cur_step = (search_epoch+1) * step_num + top1 = 1. + + genotypes = [] + connects = [] + + if config.gumbel_sample: + genotype, connect = controller.module.generate_genotype_gumbel(0) + else: + genotype, connect = controller.module.generate_genotype(0) + + for i in range(config.layer_num): + genotypes.append(genotype) + connects.append(connect) + + if config.local_rank == 0: + # for i in range(config.layer_num - layer_idx): + # logger.info ("Stage: {} Layer: {}".format(layer_idx, i+layer_idx+1)) + logger.info ("Genotypes: ") + # controller.module.print_arch_params(logger, i+layer_idx) + controller.module.print_arch_params(logger, 0) + + for i in range(config.layer_num - layer_idx): + if config.local_rank == 0: + # genotype + genotype = genotypes[i] + logger.info("Stage: {} Layer: {} genotype = {}".format(layer_idx, i+layer_idx+1, genotype)) + # genotype as a image + plot_path = os.path.join(config.plot_path, "Stage_{}_Layer_{}_EP_{:02d}".format(layer_idx, layer_idx+i+1, search_epoch+1)) + caption = "Stage_{}_Layer_{}_Epoch_{}".format(layer_idx, layer_idx+i+1, search_epoch+1) + plot(genotype.normal, plot_path + "-normal", caption) + plot(genotype.reduce, plot_path + "-reduce", caption) + + + # sync params to super layer pool + for i in range(layer_idx, config.layer_num): + controller.module.copy_params_from_nas_layer(i) + + # save + best_top1 = top1 + best_genotypes = genotypes + best_connects = connects + + for i in range(config.layer_num): + controller.module.genotypes[i] = best_genotypes[i] + controller.module.connects[i] = best_connects[i] + + #lr_scheduler.step() + #lr_scheduler_retrain.step() + + if config.local_rank == 0: + utils.save_checkpoint(controller.module, config.path, is_best) + torch.save({ + 'controller': controller.module.state_dict(), + 'sta_layer_idx': layer_idx, + 'w_optim': w_optim.state_dict(), + 'alpha_optim': alpha_optim.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'sta_search_iter': search_iter, + 'sta_search_epoch': sub_epoch + 1, + 'best_top1': best_top1, + 'best_genotypes': best_genotypes, + 'best_connects': best_connects, + 'lr_scheduler_retrain': lr_scheduler_retrain.state_dict(), + 'optimizer': optimizer.state_dict() + }, os.path.join(config.path, 'search_resume.pth.tar')) + + + torch.cuda.empty_cache() + sta_search_epoch = 0 + + # clean + del w_optim + del alpha_optim + del optimizer + torch.cuda.empty_cache() + config.pretrain_epochs = max(config.pretrain_epochs - config.pretrain_decay, 0) + + + # genotype as a image + for i in range(config.layer_num): + genotype, connect = controller.module.generate_genotype(i) + controller.module.genotypes[i] = genotype + controller.module.connects[i] = connect + + if config.local_rank == 0: + for layer_idx, genotype in controller.module.genotypes.items(): + logger.info("layer_idx : {}".format(layer_idx+1)) + logger.info("genotype = {}".format(genotype)) + + plot_path = os.path.join(config.plot_path, "Final_Layer_{}_genotype".format(layer_idx+1)) + caption = "Layer_{}".format(layer_idx+1) + plot(genotype.normal, plot_path + "-normal", caption) + plot(genotype.reduce, plot_path + "-reduce", caption) + + + # save dict as json + if config.local_rank == 0: + for layer_idx, genotype in controller.module.genotypes.items(): + controller.module.genotypes[layer_idx] = str(genotype) + + js = json.dumps(controller.module.genotypes) + file = open('genotypes.json', 'w') + file.write(js) + file.close() + +if __name__ == "__main__": + sta_time = time.time() + main() + search_time = time.time() - sta_time + search_hour = math.floor(search_time / 3600) + search_min = math.floor(search_time / 60 - search_hour * 60) + if config.local_rank==0: + logger.info("Search time: hour: {} minute: {}".format(search_hour, search_min)) diff --git a/CDARTS/test.py b/CDARTS/test.py new file mode 100644 index 0000000..e7e1771 --- /dev/null +++ b/CDARTS/test.py @@ -0,0 +1,96 @@ +""" Search cell """ +import _init_paths +import os +import torch +import json +import numpy as np +import lib.utils.genotypes as gt + +from tensorboardX import SummaryWriter +from lib.models.model_test import ModelTest +from lib.utils import utils +from lib.config import AugmentConfig +from lib.core.augment_function import validate + +# config +config = AugmentConfig() + +# make apex optional +if config.distributed: + # DDP = torch.nn.parallel.DistributedDataParallel + try: + import apex + from apex.parallel import DistributedDataParallel as DDP + from apex import amp, optimizers + from apex.fp16_utils import * + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.") + + +# tensorboard +writer = SummaryWriter(log_dir=os.path.join(config.path, "tb")) +writer.add_text('config', config.as_markdown(), 0) + +logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name))) +if config.local_rank == 0: + config.print_params(logger.info) + +if 'cifar' in config.dataset: + from lib.datasets.cifar import get_augment_datasets +elif 'imagenet' in config.dataset: + from lib.datasets.imagenet import get_augment_datasets +else: + raise Exception("Not support dataser!") + +def main(): + logger.info("Logger is set - training start") + + # set seed + np.random.seed(config.seed) + torch.manual_seed(config.seed) + torch.cuda.manual_seed_all(config.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = True + + if config.distributed: + config.gpu = config.local_rank % torch.cuda.device_count() + torch.cuda.set_device(config.gpu) + # distributed init + torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url, + world_size=config.world_size, rank=config.local_rank) + + config.world_size = torch.distributed.get_world_size() + + config.total_batch_size = config.world_size * config.batch_size + else: + config.total_batch_size = config.batch_size + + loaders, samplers = get_augment_datasets(config) + train_loader, valid_loader = loaders + train_sampler, valid_sampler = samplers + + file = open(config.cell_file, 'r') + js = file.read() + r_dict = json.loads(js) + if config.local_rank == 0: + logger.info(r_dict) + file.close() + genotypes_dict = {} + for layer_idx, genotype in r_dict.items(): + genotypes_dict[int(layer_idx)] = gt.from_str(genotype) + + model_main = ModelTest(genotypes_dict, config.model_type, config.res_stem, init_channel=config.init_channels, \ + stem_multiplier=config.stem_multiplier, n_nodes=4, num_classes=config.n_classes) + resume_state = torch.load(config.resume_path, map_location='cpu') + model_main.load_state_dict(resume_state, strict=False) + model_main = model_main.cuda() + + if config.distributed: + model_main = DDP(model_main, delay_allreduce=True) + + top1, top5 = validate(valid_loader, model_main, 0, 0, writer, logger, config) + if config.local_rank == 0: + print("Final best Prec@1 = {:.4%}, Prec@5 = {:.4%}".format(top1, top5)) + +if __name__ == "__main__": + main() diff --git a/CDARTS_detection/README.md b/CDARTS_detection/README.md new file mode 100644 index 0000000..9b16a2d --- /dev/null +++ b/CDARTS_detection/README.md @@ -0,0 +1,58 @@ +# CyDAS Detection Code Base + +### Environments +- Python 3.7 +- Pytorch>=1.8.2 +- Torchvision == 0.9.2 + +You can directly run the code ```sh env.sh``` and ```sh compile.sh``` to setup the running environment. +We use 8 GPUs (24GB RTX 3090) to train our detector, you can adjust the batch size in configs by yourselves. + +### Data Preparatoin + +Your directory tree should be look like this: + +````bash +$HitDet.pytorch/data +├── coco +│   ├── annotations +│   ├── train2017 +│   └── val2017 +│ +├── VOCdevkit +│   ├── VOC2007 +│   │   ├── Annotations +│   │ ├── ImageSets +│   │ ├── JPEGImages +│   │ ├── SegmentationClass +│   │   └── SegmentationObject +│   └── VOC2012 +│      ├── Annotations +│   ├── ImageSets +│   ├── JPEGImages +│   ├── SegmentationClass +│      └── SegmentationObject +```` + +### Getting Start + +Our pretrained backbone params can be found in [GoogleDrive](https://drive.google.com/drive/folders/1CkFp24bEDq0wUp504BQ68jn5Vs069qox) + +Installation +* Clone this repo: +```bash +cd CyDAS_detection +``` +* Install dependencies: +```bash +bash env.sh +bash compile.sh +``` + +Train: +``` +sh train.sh +``` + +## Acknowledgement +Our code is based on the open source project [MMDetection](https://github.com/open-mmlab/mmdetection). diff --git a/CDARTS_detection/compile.sh b/CDARTS_detection/compile.sh new file mode 100644 index 0000000..babfdb9 --- /dev/null +++ b/CDARTS_detection/compile.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +echo "Building roi align op..." +cd mmdet/ops/roi_align +if [ -d "build" ]; then + rm -r build +fi +python setup.py build_ext --inplace + +echo "Building roi pool op..." +cd ../roi_pool +if [ -d "build" ]; then + rm -r build +fi +python setup.py build_ext --inplace + +echo "Building nms op..." +cd ../nms +if [ -d "build" ]; then + rm -r build +fi +python setup.py build_ext --inplace + +echo "Building dcn..." +cd ../dcn +if [ -d "build" ]; then + rm -r build +fi +python setup.py build_ext --inplace + +echo "Building sigmoid focal loss op..." +cd ../sigmoid_focal_loss +if [ -d "build" ]; then + rm -r build +fi +python setup.py build_ext --inplace + +echo "Building masked conv op..." +cd ../masked_conv +if [ -d "build" ]; then + rm -r build +fi +python setup.py build_ext --inplace diff --git a/CDARTS_detection/configs/CyDAS_retinanet_1x.py b/CDARTS_detection/configs/CyDAS_retinanet_1x.py new file mode 100644 index 0000000..5ac2f1c --- /dev/null +++ b/CDARTS_detection/configs/CyDAS_retinanet_1x.py @@ -0,0 +1,129 @@ +# model settings +input_size = 300 +model = dict( + type='RetinaNet', + pretrained='/home2/hongyuan/cydas/spos/mmdetection/390.pth.tar', + backbone=dict( + type='SSDMobilenetV3', + input_size=input_size, + activation_type='relu6', + single_scale=True + ), + neck=dict( + type='FPN', + in_channels=[24, 40, 96, 960], + out_channels=256, + start_level=1, + add_extra_convs=True, + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=81, + in_channels=256, + stacked_convs=4, + feat_channels=256, + octave_base_scale=4, + scales_per_octave=3, + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[8, 16, 32, 64, 128], + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0))) +# training and testing settings +train_cfg = dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False) +test_cfg = dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_thr=0.5), + max_per_img=100) +# dataset settings +dataset_type = 'CocoDataset' +data_root = '/home2/hongyuan/data/coco/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_train2017.json', + img_prefix=data_root + 'train2017/', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + ann_file=data_root + 'annotations/instances_val2017.json', + img_prefix=data_root + 'val2017/', + pipeline=test_pipeline)) +# optimizer +optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +# learning policy +lr_config = dict( + policy='step', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + step=[8, 11]) +checkpoint_config = dict(interval=1) +# yapf:disable +log_config = dict( + interval=100, + hooks=[ + dict(type='TextLoggerHook'), + # dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +total_epochs = 12 +dist_params = dict(backend='nccl') +log_level = 'INFO' +work_dir = './work_dirs/CyDAS_390' +load_from = None +resume_from = None +workflow = [('train', 1)] diff --git a/CDARTS_detection/env.sh b/CDARTS_detection/env.sh new file mode 100644 index 0000000..dcf5f52 --- /dev/null +++ b/CDARTS_detection/env.sh @@ -0,0 +1,14 @@ +# environment +pip install Cython -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install pycocotools -i https://pypi.tuna.tsinghua.edu.cn/simple +# pip install torchvision==0.3.0 -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install terminaltables==3.1.0 -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install addict -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install pytest-runner -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install imgaug==0.2.5 -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install opencv-python==4.1.1.26 -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install albumentations==0.4.3 -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install Pillow==6.2.1 -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install numpy==1.17.4 -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install imagecorruptions -i https://pypi.tuna.tsinghua.edu.cn/simple +pip install requests -i https://pypi.tuna.tsinghua.edu.cn/simple diff --git a/CDARTS_detection/mmcv/__init__.py b/CDARTS_detection/mmcv/__init__.py new file mode 100644 index 0000000..796b1de --- /dev/null +++ b/CDARTS_detection/mmcv/__init__.py @@ -0,0 +1,13 @@ +# flake8: noqa +from .arraymisc import * +from .utils import * +from .fileio import * +from .opencv_info import * +from .image import * +from .video import * +from .visualization import * +from .version import __version__ +# The following modules are not imported to this level, so mmcv may be used +# without PyTorch. +# - runner +# - parallel diff --git a/CDARTS_detection/mmcv/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..471a09c58e86c9cd084c9a3f6ef2740f6c461cde GIT binary patch literal 324 zcmYk2y-ve06h`f&NmHuu9_fH37+4T6GO@tq#jTII$Goh*!$W#4B{-ws`2Z z?m2h*KFJSld-HNSN=^y+Ay-r2{fZmk5SnOe$>+amc9C6Xud=J``r^W_UBeqnbooKL z)i3stjxKbE*b0%%5FvOW-n$Lo$YGGPgQ%8j8o@?h@4*@fd>eY>RTAL$fmh(S5XWbd6k=jM^`O2%KpKePK}3bpYZdBeE+J%B`<30Wn+5{*RA0td+N-C& zf%N2TMRDN&+u5Dj|9+fyyTR3Vl79ifC+RO7%C1TJn81MH0#aZObKfCFj%!FAhGU4l z4b@>(SlU2RNE(pz8KLdfFvA&juaP-Nu!2VrhaS}KJWwY{8xWYgf@cuU6TAWQ<_`IG zMh5%Jx@|my!6fpGTQrFYv#jAtPo>B*)6vypTARJQw&Fvjq>9jV%X}u)Z_@{;fDE?h z@kEw9A&S>_nTxn+S*~MUNgbCZ<25g8@}#_IVp(xfG}%j*fm60gHz`*brh~a<{$Z`PUVh$jY!Vtxf!Whh;$yCK=pl76KXym8Kc#Ate zzAUwcGmQcF^k hOMtq7?Bp_is1bSvmA5!-a`RJ4b5iXB&-`_9V)Ae4x zdiCnnt5>h8tD7aI{&79h(p=VGPuKY_Lajq2rr69l@JdfJ<#H9f3S39xH`{f(pmp0h zzfNSYmpJS0a(M)TbsT|f?<)et-6Im#eOGUZKhRg+t#Um8vRwKDjTb2$742y{67Nh$FEMneuqzbpm+RVnx^N+IV*=*YwI z@0S99VG8_9Ddf2=MZHhKXbxBJhbh|oW(xd|DfstG!GA{z`G1>2{zWP9_ot|LGK^rL z(-6$`o)rCZW(q#rQndG(6!L6GJ^c<-Z*2-bM;>{2{kNykTS*FhO$vEFhAhV(MxL8e z=zn$!J)EC{eoqQ{_N9>L#uR#3oT6XuOhLaNZ91IY?o3hd-V}1SrocayLJyy&z@L|* z-nA*@*HX0ix)gf8GX;Nt3j9~#e>gkbnWEmcDcaRL1$`y-?>UTJjYy%-o+G zQ^H@%n=54EThEY)kyH%+=u`W-j&kkI7NGr8ZPuGC&p6o0v92ta>m84vg&5-Rc>MNr z^>xkslZh~I&kB4W*Q%iskpsICe-#yT>Khv>T7s3$!HNo3MQuZE&{Z)TH&=ykYFR~f zpgAz7wj~&7o?14xuCXC7wQ^QnK;|7jr=n^>B^gxK)m{U_3+Gi#HaU!~t88frw74oR z%_|&JTUXl~v3 zn;V;nYi_QX5(rKSHrF=H0qk#Vm{Z|x;mypQSXC8jZVpsinNt@w1(*e9Oxlb9mjs%c z0}urwK-31wku?w+*VtTN88n&Nz_fbR-#kl6idRquzeN!X4M&3|j;;gFzRl$n! zfuJ`S6jGzI>6LXMyY`7wDq0$v(1ze_vj$dKx&RtLH&@NAsH&M;F}t$1&Q(#qaB59+ zpt5>OkUE>zP*oGCnj5GFbCev{Tv;Eem=K`4rZ!hr1;((j3WH$5gd8QcRl#y03R9HK z7Yx)VM4iw`ZqCNpEgs9}${NZ7^)M9Cf1swK#@4|;SXITi%DR>SG!m*JC$nbsl+XYP zrZ$HX`Q%h)F-nLzCNz6?fC`fdR0jSn>^`Rs*mQCnOHE7;pu1bpgZ|oj3=4=gud=R! zKNOrAO`@C^7>kOXghGiXm87*6>@9~?{th9^tVR;CAph5?OP7!0&HjNjW(ZPsFONeqlc zURh&x2<;vVGY$sM1{)Ir))bC4$`RzPu9jIqbvGe8bZTWw4InF9_@#z~ zFc?@(3l~(>OG#>5D(Wk18!GDStL9Z$Ej49;P*Mg{G@(9I^R^VS!^$z(P^X}PshdMe zbgUdOB1a~>`3PBoKQJ#)Cz}Z_hapI`4)t>&+4YaDY-*uO9nNiXs3BOJFf|$@F4Mtw z7t&@!B3Z9xp%!CQSl$TtBt~Px($N+S6gl?C*Uk&TWtml!RR-aiTFkr&be^1C?0jZe zwDjP@Tg{RRq$dH&Rs~_3*ufamQVX@SLP2v#)z!{w307B~dFHT|#$o5W0M^^7IaO5^ zErjxjZ^EnsFG@I#7JeRJ(@PG}Z@(*H@kuY6&zCuWYES zYHX;kJZE^IVP4Dd@`Y0{Qs$m@_ONq?ojn}PTw`a<@Q(3SW2|JqvYAog-SZQ?>F)*j^o&03S*+HbV>3Cqp+>wvi*Y2hj zcCsJ|zssb*D;aOfvmyz<(xh)o!dv#aH3^?(>PJt)Z#DVvO~U7x^cin<*PCUxSxNXN zQ=Wn({2G&gaT0!}$$w@New9gIlY~z<>6azpSDN(8lkhIn-c}{ySD5r`lJL2v{JWCy zcbW8ilJITMN&9!b)m=Z!P5SgCeC--ZpOb`NX42;-;V&@h%aib}CjE>gyzB3h|AHj^ z0+YTq3EyhUzak0WWYVup!XNm%-C_s6 zpMzd?;D6)5mpkyMIPfzZ_yG?5Ob33T17G97=Q!|94!k*4i`)ecyt(Bf@U0H~*$zI- z9QaWV{Bj3=v;%*a1Ao2)zrum{I`AtU_!0+xl>t1;4t$3L zKgEIH<-kvK;P*K2(;ax-fuG^P?{(lWb>I&;@RvF8Ssfk$F$^=ex&-WT;1ltv%a!B6 zJLAk;2Ofc-{a4_?b8J|D#SXj~0*Wlvfw#sgGs+!!1djIK3oQtOMWbz#r$pFLU7gJMhaL_)G`>E(iX22Y!VEpXI=>bl^{L z;8!{DCpz$J9QczQ_%;Wgd(PJ1RtNr63yFJ&1MhL*cRBE1E1%>s}6jX17GgIpX0#KaNtKc@G~9wd!?z%O&)M>_D!9rz*#{w@dp0tbGD17GaGuXNzYIPj|+_!fY)bE-1h1pF@Ho`hEk zcqd^_S;h(hZzs&D%2+Pojf6Qx8La|dN0{n0ngsj|VNOZLOacFeFsCA;T)+bU$J5Wlxi zx#VAqhbQ{OU$r;fmgWm@PTx01$>_fy`LBb+jA;k(d+f9W-op0KZg1E~^M((0h3@c% zJCXXaONm|zRb6(4_j2zQ-YdNoFUb~G>_Tf_GWrTNe!X*_Qn3Sw?Z)x8UZ+iwO@!(| zOOl}*pqbY#-EF#B>TX^Ga@%C%k>7^=t<>DShP}+E#$izrgzTQR)d4-+q zR}Mhh9m>*wA{PZeVa~@uy`qHI0R>r?;Z_p6JlnHRId*DxMoG+{eR4@`YIYXj%d^v? zJC(>mkc6KXlxa%z7|5Vjy{>AIw-&f-c-XI(;!_&uea5BpEpsXYo9U$RyV9; z{&A38v`MI5x#0x#M``p2a$md}d2LLwWgC@KDO&iYw8EDmJ_HNx?N(Fh&M-8Z29ZW< zK_vXyxkwk_KB90$wi3My6E0#e)nWIko4WHrSC3v`Hg z$-Y2;2;#1R=DIr9(@t2(WYmi4T;2KXf7fCadd?y<>od#k-vtp$&~EK}w7Y7nTDVn- zRDwO4&=qRcKS6bHu1cgCI8m=L7OCZ`Ry>nVcbclTvSOxvT^+cov6q4uAUj9?5(&%h{Y%^wD_FBZ>BXJ%_fHHU1?C568VG~(cMbqb|n0Y5BRf)8dmsaX2gS%b& zkzk?+&qG7!{mH`?-j5cRVeH2Yf9xaK&Z#s5N)_r$R%+~P=m_Hx_Nv`1np3Tw(?_&V ziE@0a*KcC09Yamn6U7Qacb1wv*IVhAq5JNxHpQ)1u7-t@&@K}9+G>!6J8W;aEPVRg~3lt=+q zZ80McgEznIQ7Ah4gA(C+_Y%#W54be8ffKc|>;UL}RU6a|dwLc6k4RT;P6JUMeB!oH zKVvsCA;F@;_<}TV;lag&OJZAx9Efe)@WrWV9Xk%B9Z;(};4XS;Q;$zC+`jm2FSr&S zT=Gug4(0xh%IckI9o|ArHBW(!dKaru)Akms=}PpE!1%QsU$_*er)8cFiSoAjWB-P> z^>f+#atKd|)m%q2J!c;c4hEq_GO@a3zXtokm*jNoaSS%m(-`-;-7~xPh<*_)LhU}- z6s-GjWR`stmm?@#gqX5Sn?J+XK3?n+;64NWhYl>sPKRyG1Y)Lz$k2a*kd#}?4?cqR zbz~~m$#jRT|w!e4dRzR*5VS_%`BmZG46jnFKsX_J>#o#opQfj`0Z++Vu3i zwnSeU6XEsf&+~TsW1s62WP5zEO-f`ra#);u1DH)lUBz3`1&Bjq@1WaY&$-{Ai(*^K zXlIj21v`Tp{951N;>O`nn%9=MEpKPu4|!eQNz)MNOfBc1K6@`qZuV)tve9ehYFJN$ zx_q%-*-9i1WAufGq2Gf@mC>->rYQ6)sHEv(k8A&Kq2KF4qUB!M6*xY z=%~vx(v@3ZM=4r&Tj*q?9GSu{lw0yqG=2?i{D2IMN@5#_9E$DOV4Rw^eaE4+LzuzT z4IlQ>{ORe1FE4t{8|&zN9qSvd9L}j7lZIIStJ)-hunjf#8gMFRl60dH;;P!kA|pdZ zM3io3WvB=j(v9=joUhQ3M<1fme2ryVtkW2Z@lOZ5UFc?u@d&7V;i1`>Io#+tA6cW1 zXu`&^KzL){73NoxLzGK^lK8x02Pu^jeS`K@?SPNq0QVNY@~zV7&)BOR+b83?&k-GaV>8mrVmYz= z&#@vB}{2ir(OUJa!?{2ycB9)`3Dv~Ta8b^pxma$j#i>Ig6V~#i?UF#Ym(@@EMp=veA+l@ zdOzoYMGnmqt`drW!(a>v&a3bAxH=zxk#DQ%4_X>O70Y7idiw(&SG)Up{1!vZcK1_A zq5Uo>(I1m;(D)9Kov-Q*s@oUqKo=OlXRm7oP_v5JR*CKe1)I&vw;-)+Uy}Pq<0wdA zdNt<#3wa#Jpv{7Af^$?YLoGZM91eTg+q$Hr#jjnMuFWr2HH=S}vb3Zp2EyuzC0MAR zf^~^n)n}QSwzX@IsvU{JUMPaTbe1d8#uqC~2MO}|Bwr5?pjM4o1;5_42kZUJz9f$K z%|+ze`A;Oy6d`O6%=3-2kpnq2_oCl>kV0H!%>7EtaG*Py1>RClZG}?E;0?lC67VI2 zS0~_?3I90(KS}t`1l*VKjS2X3L>Wd4)Fkv0@7qSWDgoa`cuE3pB+MYwr2i-3a}w}0 z@Hb9Rz;`nLxCDGP;hqUtCHyV=)8zLVVCZifZqXl9rF5(DmdLc)e~ZkYpU7X`Eq|%Z zrI}`c-c;9CcctF+t?trS}`TwYf8$>iOl>S4fwQm8sDwQrdIx+rVN36T?;~vK9q`wzH|~BLHEC1T%iT?`oNUzaxK|Gk^bF=*iMM z=v^SR^$s|lu%&mv3Smp{fL|bN=^bz;VN36TzbF5X&3*tJV*cw1xP|ab3HU?OKbwF@ zGykCkJem2on=tHl9P@j_F|eP(|6IZ?X1=m|eo0iatI8KaL4v~>+ zcu5v!gX7>Mid%naQ6f7)k5rS4r%RO8rQ@;3P#S5|E}o%8HXta%>iU*)+{1^`{MwSN zpx3WFy~Gnf)FXHvQ#s*7J%eX4l^Z_PD|k9n1>r;P;0a6>hYuYQRG3o3hmH)onJV8> zI^LzWm*&z@#iy^=%8FYLNRf;hs9F}(G0q=%{}IlsEIuDAsWV3xE0L=~CG2kWl}e-EN4)w|&~Q3@>s>Z#0 z5q#`bW2IT0JB8HGQ$x1w(GrfapBMi4DPc}i;b{d5dX81G2C@0Y4+2fFsya)?S< zj?YWPW(<_=;UaCTx_>`Ii=)-FgX)kSqWiS60;(_iI=Gq@{D5|&f+v$I5dDjGTz`u_ zME+gz>$YILT>ROuS|c3bTG}RREieM8%@$ZFjsi~v_C=xN3(=V`3KjFAnRl&9ZNG(B z#=F+Xyx|WOZ+OF4V~xn2#@r{7TuVNkx7n5KMOS!O$rs36KL?pFW{`G| z?3^d!cB4)}qvW{1^;2n5uoQSIc7ZV&rRf*tI5Vz5X|FI;W3osJ>Bozt5Pvk1V!p%_ z*It^18eO5$TS`k@?WM&85J5bK_{yHIRBZ};=)!b={E`fR>8yES~?_l@!_i`OO|v@;5)RC@unp>1@76Otn@tnijcM$k)IW2bxih z6nH!Y9;O4#09RUS7A+Oy2bL-IG9TPU=r7DQgXWrLWXtkZlp&t~w<{3B$F@oO@wPNC zh_KG#pXB~em%aa^#!oql@mKiEEDn9w=tc9hmDL?^(YLEwULNm%8%xKh%1oEcnWu*j zbt#eID6CfP^u;dD>AYU@&HIY$`6IB6i+wyL(if`20vF-#X7C-K3n|8zdY6qKM{(fE z8;h}^&Y?bmEHUVyEI!_X@g<>WyrUY6Lr=(J1>?ts9%%(9U--QexfWP|>@)IF+0$I!ZVv6i zirDZ%4lCXgU9)U_sc}AtIL|?rk-m6oTJRX@Kk)RWwP0juzdv4+W}E;bsUKf>Ne-3= zVzG>nAJrhT$NyT*41F1PDk=Bv?A5$3W&JU0*m8B9>wI}QO?`cjdRj*WlfaY6XgxSY zi9C#iSX2E7%GQG(W$8X7I)CwE4d4p)Ms*BM8bHMs{Z`hXM9zZZWnPp!0NC&9l=6WJ zDlYKFuSnAmVE74Z=n{%Bbs5_g9EJT4%h7&MM|*~vX28+newEKSn*2YglB+c6r$ zLQG}r$E9IEB`s{^#+y12iD7ztYyQcyeewP=6_~Q*}#z8o*ve>7vBe4@9I`mWEF!TzHe<8vB z&8*l#sB^ARr!blF8SrDfmqN=TdfWDaFjpn|I0_gS!H0sMvicZh{SK77{Ze*=`w5%; z9*|SIxwOsY$od_#;`t*5VS#?t9&q6fhG>tnzN2$3en3|%>7J2vm!l$Yzp%}J-_BYLgqedit+&qUV~mv#Cy}G zs0hRnY{mAk<3n{g(iv*(-W;q7zCE3Csg$6?Zj3PH?sO@8f5NfOk43JS~@Ds5+g77X! zUk(2Z;|N}fs5J}wb?2g{Scf0aV50v3TaK zG2{8o=z?|doa_fED*PU*2rqKE#Kaq1i}tiF17Tq)!t@78hKYFsh(vzp-HCPk-O!q) zf85PxAnb~jVSlslHRKX2D;7(6>_7CawGfkO67jxej7Q?`d650-nG4s}3dR z-`J0wd;QuROzZOzJU4puWuVj+c;XkQ#mYUXXeJaiDnq$dN2|S<`}=e6>RzgVyMPzv zGKNti9|_f!W~kxKc(lg7(`}`(1Cu7>nM3)syq#CU@&3R)+N<8DVmBd1e@P!;)-&0a*jwAbN6XD<(b zadziFIS+-e%l2T&a0&ImJ=h_hZpF7h_lK zOH`Qm1NL5T6O;6EYzTj4oC2l{lwhfwv-G3T2tEnehv||#n6b_JZ-Ejs<`yM#HU=6a zrgSyFj9cA#ZDrB6#iv;HXj3<%AwBS5s{aN&0!@QNp)c~Fsr|YOTzuNOP?Q%BqqsGW zd~XcNC}Ni5l?b1TVb7Q-akxwPP1zr8oN=}A!mXfkl@?x?oe@;5rlJuukg1okc?sF^ zbWFdBD&}mzC3uZP!g`y~1`C9(&6)IvNre4B~*9oc(XF#fz?0;j6qifC2@0^=AQjmkzD9i(N^Pn5_9 z-O5~-T>xv)ue1t;xFpt2c}nx(RwUwfJVJZz9aGhKCix>N!mc##iDDypw4h9E6@P+I z6}#(Q9lqFxGA@6|0%1O1x(ikNG%p_@C7 zD7S9wPm#n*0T03UcVP~CN%x~ng18HW=wVk82XtJ0b;%JM4)lokDK6Zu+9^KrECdBjKnkw@-MZUX>QsK}@+}#LtyZdUWinCL@ zy9%jOj$|13DmLL`?umenMkqEhU!v5(n>?cPXc8+>j(>dS++p4+E68@zn1J2|P>E*2 zYJq$KW$o#TiDKk}tR%aMKQ$n^N0ckcUSRYVmd*!vP!c<@JXa{ud}!4wipJfKO`+_M zgL7|S+TDLb>gOZiDAwEP50;<_~0)-v+1*6(UJpPQoIrv(-Mz<)UTbf%@b3{;- zKyZU)-tU&OFTmzDuAmq#B)1e_y{=4qf8_5#qbolC7n)f9G#%&I>;XR=xDTf zl=~zBdGAns}-Fm4wWoE3L(HkXccboQZRyN zb9%rFP(|KOf9x0A{gr`M5Ow!_O{V7?QCzrBx%mKu5$x}WK?==8HUlLe8b8j=Q77Xx zR&7tDX!_a>%PEkSe?ioPPW%AkE0Jd*nr#-55FjygjB@Y0hOO_z!lT^p31B{SgTT5* z6#NS-;JF=g`4a#J8)9OSPr{Ezqs)or0!js)+Gn+@#mq4dF`ji+>u;|10HE~v} z*wx-E8#23=%SIh9>cmM0E&p0Zt!RV8!VjQEHbvIIj`hoq94w?iNa;tp`w8F;G}La& zaM>=|lz{-m`6YISHC`gGh}Q6F{DbfeRFU17!N&-MqcIndc|0?M$lKAFRyM?|jmz4< zi($g40uGPPMl3l1-atPnElP<_L2qXv#|c#fhFXVj9*b7$U6UE)|br3Vj>Tibv|c}XP-8m^LlJU_``U3 zL#q-!0)s5CYo*5@dqT!DxF0}+_&hb#%cm{Q%G;*K|0eR*p_^RZt{pH9ak%O?!ajM1 zC0ZfIsu=6^jXdfu*2C;6Djc|Rz&Xz!4VAE6<{85JnP?${cjM4cT#q=)@0Rp7KOXH^ zp4X)pzM2@Z^};1>p_do~Mm|k}+1PI2H+Vzr}+NFly=spxm@1MC)qdme7~z<~Qeqw{{^Of#iR25xL_@k#N~3zb0Jv zBS5yhAB6-6s*t)HsqxbN-h`y_Gd8S5JK(Pu3fgOdmhQbCsXR%0C6aX2aKM-oy7_BF zqQ*r0nD&G|(>q>4-3db)0%h>@q7tn}N*~w(kUoOXAB^Mi!}fu*sZVjV>HD5+j2PGt zVm?b6*cU)oiJ_PV$n;SY=FvxGQ0P(aLIG$5EFX0)!XMj54FRy8mf6isokVW(eAO}t z(alYr)JGs3Bl~+YjAyRrMoPj>l_2od7G<$r-Z0u1{Vm7y5Oztl&HEw%Oke%*3$6FO zkp1Gm-s3``fz*C9mmht@q^ct+MTt(s)*ahOQ8^?4O8Y_-3_mc<_5)QN-dH=Gz>Tnu zB_R);z)HwNCvf1OP&gk!!w*nC@BR;r%LNEIllTaKB3UVs^FY-3wCEo`zu@z0>3{ik zOpV7ywJ8}`l>5XnEp1UoIgt2au>U$LRl_~gW?U+x%;3pr^s6-6LPR=BBrG(@(D4$Q zjTO!W2ZHQTL1u=^ky=4%Mvy~E9NT3KfhL5Wll&p|AU744K&{xH;1*lw37cKF)ek1y!5vdmR0}K;gaRnfwRScF0ve~~ApucyxYu-vY*^ip~ zqX@w;y5hSQ(G{nRm0eK}@x`H8%fFJJaWMsw{QNjKg^s=fRigCqK~Z`{nJhh=r3b)) zh=;#o8KBSIE?Nb*CDsB;WW={Xm;p$ZSjIU5kPF~}Ww~!(7E#C>kT0>mN-W0OFdP_) zU@m`66p+iY7f>oe{-1)9xtJ(LP@Z?E)J`~^OV8-EysJW;6(ex{J}%_SRZi`;c@v4W0G(-q8=-}(!*PY-Z(Es zw+dqt`LKxEakPlJh=`>>yaK87U=bp|_X98*zqk8K^o0IdyV(f{0!2K8-he~=8hsvN&PY>kDTT#BRhYZfbYD0Plh;3uK@bG%8^_d*HY88; zdhah2ddoe4blhDa1Re?5q{4@rj%X#HkQ0m*a34xSIPe8^kVoEB8#ff(E`D&Rq!DF@23oSu} z8(}H!?q#S=if|25(;$Kz^fi=1!!UxsltKOxlz{5|J`E>=nYk1K-i^ z-i>fo&J_PdszT~(J(5Nqj`t<(;o8l#hb@Ln*eLX{D5D3)i|B90D&=;tattKmdUm1U zf33~G9;r&n|4Q;-?6fzUwY~|G+m1GJx7{Co*dHV1z)~W=0B_uA_lJ<?GX6NK>TLP0ha zWs?N_9$d|9aKKzEN%&f#_5a2U>s`JobB(fJS> zgz0sg-ND@Sdzw8w3E2t%&-s4YdgwG!{xo)_7;l#$rN7J3$ho)-kaz=G+6NaM1CW<+ zt&5-x{c)2#`Y_Lt2Xyd)X6N*98`f<47No8RrdI}L@Yt#@ct!fW1(8OITS-V9LH zymEf%56AAb;9h{~tmXUw3;tHj4}7T*;SXM7qTO0=US`83b+_9`rQLph5x81*Yr0$0 ztUkeH=7{m%1PEi2(?;j+!`M~whS%-0(eily^Jq17)d$Kkr-aTwk`pI`+YiGhF&Q+& zjyj*C;VI8U6^V>XkU_%>-K7V>7DgAm#pG=iF;8=sp?1IKh2Hy~2v-9^dQuSUZN*z& zK#$*un8#Ky|6K;Bg1^N|I^pm)Vw)KVI2234JZ$QZz3s;{$P8VKpW1O(>5=K}{h@IOX|Z7uIfYgrQTpblXzPn^@uTCRXi(^~k?t(ZNu34x2F zhn@pyx8|rMb2$ieNH{#aPN{b{mj}>#+gy4GH#gSd=1g;W#RnBw=2EI%%Sp?}*DIsr zgtd%?!xpcMo+qqjY{IGGTyVmviR}>1S54JuZan{PG}Vu%{&-vZH8i0>--GV<<5lX6 z&Ieda=e=I-SG*F_)HO7dzhaIO-)(^ef|tH~E%YI%1S=6COtcK=27M?j0EQ#-lt?2o zSi5l+5@xRi=Rw4;s_Y&)@$7Cr@-l>EkMJLwkn`5#V*J|Mntv4F#B}3Iu5C2;I@Q(~!6x$^-y+Hz&uqx+ChMs} zR$-xdHV9wkH*MGE4*fIAu9tWuR=TJktqJIJese1{A`R|x9Md?2vW7|hO3@V8`m(4-ywhD)>0y5_9&YN51R%c-B)e5uDTnG8 zLK*f@-3`#TBtPXubxI`02|~U${>4mD#Yt8b7@<601|^xphf^S`=!c=>3lBjR&)BU< zG~hnqcGv3k6Nz3rQ2tKL_&IUAF%3 zq@wsNB&8UNl45sNFP1eb(SOUiJXcocHY>Xtl^L&#EORxsPqeOrMo~c0MxTU zDaO*%fY2%Ff?xi7=RzX5WgnDm+<`2!Ka4Ad!0VVD&wmKD+OiH4vYwpXd7HJr! zAavIEHQvGpa2PiK1^7$6;`9YE%Eaw><(o&<%F(EEAT?h8!1p}*;nly)TS*}-`kLp^ zXzc%^B=-LiZ)1NHsr?B2I4g|X~~p_=O;ts-mxZtUxbZ|+pW4E4WI z-_bVsZxBT$;1}AD!V$N-mx6y^q5XwOZGz(0^Jg7#=z^I*8cPIkmsTOa3-U3Zy1u01 zZR@I+)WVm7cR|WyFNc&k-+LxyEYNTI8{B%#vd8Y_L?Il8LnEm9i%dxOaj6*#3^Dt3B;}mwu zy_eB!UHE6bEP=&GHq1b||x}9gF_Zd#^%CtO3xy*z0)?+L3$B$5Gc;qRqln z^9_9C0c7gCpO*CEp;IJ4^^0eoFOz&@@?Beu=Px|-@JNW!p%*i`0@o-2^W>|}@e3)PtoVj!|nSTX+ z+wXZAiTZ3qa?Gk;EzW=YC}O7t8r4D@m@24!O>da)luXH&yx21d^BMJF3ZTWCmi7!K;lMf)K zLMO9F^?#8O_bxZFmvfcK$RnwP{7)o}{=Cc=hNVRQ0N$1%-0~CqR?FXLQr;#h%|53^ zlG1n(R2Uz4&(7c(B{jBB?h9$R$$e@V$t=i-DJ9g2htDHsaIgAOAt?P7w(!c4ExukL z^n94|!yVxl$zO6W zJBNTcetK%FNNOXp{rt4d7aSJrpW+;?M1Ep-X+4E8p6I5+Fc5;^8{4H8?pI^qgffiV zP#pW=g=Z^~rR0fEf$_Ce(N64{$-n?1oY)bFz1QgzdVv`itKr2tuHYPH^*B#>&&<&? zmB@``pp7fWVPrGr3vUKDoB^XWe7S@Y6#k+_{wS(ilH*b$e?}6oM@b%`KXZU|KFo(L z9{-R%IJ@5~*BAhwhc^|(*=c>l6KoV(Ax=lLb~Ho^5M4s&$miFb=gnu~t>6zBLcnW( zhhZFTTUe=I%kd1Uq9es%5K8&R@F&XS4Fwh29P$xts(-F=FM3HIC#oi=bY->IYV~~< z2Q0^mA&YqFI`p`#(r>fj;TV7aTqRmAtVY)8FZ@`E@RbEMe%3D7ROg9Of1B`CD6*yg z_&)5ZVx|^tK+h?Wts+zC??WVQ{V9>VYtSLB*K6nc<)>0ALX|IjoK>dfCD zhpPVkld!~RXrP*VnaujLne_m(&Vwb%IKzx-`49IxH#pu0oY+3<5V# zDA8*niM>Pd5v;NM9+HU1;+aQsrDa7P=0a@=HeYehko6;ME{T21hYCFqIhJZ6Y#O|e zaSL8}kMP@W+m3rsj7!DN?T6VHG9Tb?;RF1QX!JQ#g+4Anoy{uHpFmHWwetg5-ten} zm2sUgkYbz<2xWENbQr$eA|O*GQVY_~N4Vc~|6|Z=BBLA`V!vsb{yhzfW8)eU-;&rL zwU>)eb7FHW7w-q0(DUy^uF&(wDW;xRLbbM@`R_h<*Ymp^4e`upxmvOGoUF%hy6JI& zsYfnZu+IzwpG}pd++`LzT@9UnFxf*JOCQK46zDS^w{<$qp;Ml*koruuzby61D6jLe z?)%`+VeJpgFqkL(64af$vYWbxnCiyKNTl=|Acau*Wq^!2QLq^DaXiA`VfK}yn)NPrYutU>Z#ku%euXvpW+L@0CZP5Fk%4{8{S$&m<0P>t zbi2q8%Ll~!b>Mjw78scqOXkzS3+s#<0D0G-Z>cxbG)&g?_d%>_Em-wN7S4Vn^$;q# z1oQm8NQ=s@Tu+q2J$JsHV|*tDwQ!esMHrn|iZvpFuneOgNG!kjufGVtSg%(hA^hS% zs!dK7-xxtK(N!yJxok(lB(x)*-*<=*Q(p)njS#Z*Q!PS1Xo4LmlJFBuTC{_55)MUH z=zgwlpmz6HidLwjP>^Qy9Y0h}O*% zzSqs>;vv$#f?hgV<3}{v9ulXEBJNqNQf`jkIsmK-RWekTZu)BRl4_&GYQo%F9+L$~$UJ>(M49KzjYdvxag zAb0d>kB%LEemA!iFJHj}cD{d@=b%m(w;oC^pXi}>7oxTFXY7j2k77(cNG9Ax|J4)P zgwMJb*3kJ6#uhsthJq6rd~he6*?oEr#~t?9@HNy|Ddl%F@lldLJm7 zy17T_hR?$E6TsQp8@>va?kZ zM(L&E`Hx7~3m4LfZZjqbU*KK>GIQ(L&!HC2&p#PA>uE+CNIU-kb{8k;vL#(#C*56S zxf@2P5945igwbtsr)L1=i06Ns4X&`Vibp{Bl*p1JRrsOHFbCMv+-vA^=QPLjIP60C zKnjECDM4Tw$aDa-#m_jSXbNXs>lq2o@_vi>-7!q~`8Tm+{5d2LiFv45zXn1HnQMi` zxUcdHnJ+M9eq87aS+*hmon|MOWWN=(nHQhft((%pk=+y}p?Hq5PBhUr#>bs4{!Fk- z42ib@qfKUuHz<+KfWaCgsPs9b$X~F?0o#Le+do1-t6HkaFa|F%Sa4Jc==DjIGEr#oQyZ`~H>L@%(#G zt1adrh{+l|Z{w#UCw*zlt3*B*>JlsFrGi1`8Z4b0^46jncF%Jll>W34{oyc_nE=h^ z2j`kSJzBD=@6Kv~$!Y;jOFV2HB${o1L-Tl!iY$5qj*4_ZXm+}6_IszvX8Wk}uM*8R zd>;1BI%Z@46E)h+9`*o6WGJgkvdas&;~Jrj`{UKwGx5cuh_Jd$4YQ9gW~i}u#1}%bw+H7m0LAjU z%(EAt=@s{jo{EVPhxe!9blTmFZy_L3e?#iEeyrO42$FaO>Mh>*&d_;WCWVV$0PMmM zW36ulDJ}CR=EsYEg1mV@=qnfjYDEtaiOvfh$qh|B8d2NbF&4xh_g!xmt#)j^790cS z*KeYl9J{ab{0JL88I2yJM0-Q+eE(qWeWLpzvt|p6KBa=U=vSfH z#vrs&#Nn;i4#fTleHzY=Bf`V{%{L?PW*)u+h4)I4k+)6%GuWv($I3klYz(jmZ--&( z6aEOicw_bnatObaqiSc0*ReMNR#uPo96k25(7iTW@YMgpMu1o6Gh&?@cgNu0+T9vr zPY8w7BBZ+dfnChqfTXeMXUG04;`mDh{be?N8B!gR{(REk(TyIh(c38|sx(HU%cULY zr=wqu3ls25W)qDp;u7Ec$9Et~VVW-bhu7R_^C2=bqoL=HgqZPs)Cy_a_51G?S$O4^ zc&+Gd5xSn@5;uGrGKQD`60iK;45YsXK{dPtVf1JW6JG%+8=LUZf!ET{n|H=2mmQj_hM+#Pb(Pfa=@V% z3Dl!OP-YSQA_Oo{^3;DP3B|{O#{l77Lw}-j4%;Zc zKI~ws6t12r`ASH<=m{(#^l{Q`#EIYY`hsUX|1ZGPr#$`#8M@#MIgZ$&7V#GDbP{Rq ze;0dPzQS>M$v;L(Gvvztrz2o z2_T<#r+lb_x9)a>9Iwb_@J%}ZsOvW3DT;7oIM9W2mrK4h9z}JrjFMN>VMeOPa<`k zq`r^T-(fE-Q9tHCL9f~Li;&u3&VQuelth1tpufzfFGFglIscLVjwJdag8odK{uHDR zNcv+)U!6pM5P9wHAAMG9ze4Jeq<X!L6!r|LxSu?q&$+W zH_0j`*}mh+BMq3fg6!Mz9@lA->=Pu7ktE~yOXS;++vMnYUX*$n1TTnEFCaBgmU@b% zdP(N|fJ3|cVPLihvOAGFU6L&&*?ZrL7F`QZgcuMQI47hUks2+@W+Exq>whI}V)N)h zS@axH!x&KUb1d!d5lCgrqJxk$uI*l`4B|Uhl@ANhKF0SN8^`G(babN0(D-hJtlt}F z$@dykBV{FRNE&>lLVn)Idxvzs@T;F82cOZ9KKdO1Vp_jC9RXlQ(NQi;37UH&X`);h zbuGW^2z>bGKyP55Aoj-nM-sanScU_y0K&O^B{~-gPB8fV9lygK*$IttngJ}{-hCD@ zC!5~pJa-3SEiq2{Iu<7NgZ`J>&`>j;Jw`~FKb!KfxE=migjkv{{785RO(AI9QCFJos~2>g3Cw}MUh z+H%;$;9e+(9eyml@VVRFumYZv$klU~%YKnh67lLTzvtV_7d{f^#7fwyy|RP7RD5=b zm|TH@GA$H7vZGyFgSwj#6D;t@u3ezUE~>%uj@$ic{Y6duM;jD#&rXfuA9)C{cI=M= zkLM!hH(c=4xe9oM3BjXfxIqvB+&GvRg1qr#5T@{%js1L#T0p7&Zg9iD{*%t`F~Z%01*_d^`YL{ell4lxOH$ z!CrhK*cZ-$yYX>*9sh(wc`kKH*m6p+{E1>-xDpEGEbW3NP0rIex>iT{yhoyE{<3xNrW+h7c7JLmJ>d(g2tE+ zB-NPko0cM^J%R2NL5CN&iiqgrn@yuIJ`-Nb-7G14BCKXBo<%DReo_PP zgzGng*mzaY(bYhWeY@|N(O(=M&E1@2>bP4Mo^$^N9v2s~h<%Y-D$VR&B>83-4!2;g zf-vR_B;udb#A|;dz%LsWY)63qrkF&{03U;=aL3|JI869;^bZ~244h- z#NeAJvaM5D!6F_w;o?D+yce3hafW5dmpqFlj!RrOLg@1pm;|*Hr($}cAN}~}8MRjS zg*XE=nuYN?YbUyXRK9{j_`InxfLOfWc#hz44s<{sKJsW2)0F!?nf1P8kNuF*8ZsY3 zW|_8{+{K#d{eys`KajhBRMB5i2HZP)%MS_Zg{J-e{TCE{7Nn>$D)~;g`F?T}GWq6{ z3Nlb^W+OhO#)lq|(fO?K=X!jZsF91?Xf^To@`a8fp%n8UsIk105fxa= z9T~R(bPPL!)Cr&0hwXn}mMIv|W(B~gzHuJvt!+T**&#>~@XQM|x3D=VT)q%3Y8d9K zs2DVCL`$-u#C4a{YS+BVx=;XuHhStRo98g)scLKh;~m7eNa*s#^lLbHRe30W!% znf2N9gQ^n+T1+bFaaw~d^6bWDv_e=Ql!#txu7u*7M?!!qvv!wD+LNccF(A9TIxxGo z0hWzoFv`m2YEO4ILN1^@b?lky#?Y*~fG210oWVne4Wo#M)yF6gqzZaw*9Pif*exDN z)i`^0OF;C!&<v>uEG4834oz@n0d5ok8` zWXe%lb#nndHk~9IkYi)X` zo#eC(NmznNvpMYjSqp=KK`lcPmMBJ`O)AEblhEvE&-|KNbXZko9i0N~)--6#SN+dk zplRU(vNY|$@{Oh+go~6Z*Mw<)Kg^>gR2THjuZ1s$LkKiCV?e-0$`?))F0x`Q#E}BZ zVmU)R!OFQ9VuOYi;0MiZYLr%4SqB@N6XNVOXjmRJ$ILmk^8yW0UxQjumR_PpEy{53J zN=$`=F=RMCsv4W&oH3bjmUMK#ATo_k&Ye;(=5P(d=h@R6{40z?dS&v@!&Eys*&v!j z4M~LrLzrRH!eNjiFidDl3|!CbN*G7AnBnT_I9#p@@8rqeODeqOK5KF-UpTh13H^x) z+cCeHQkY^w#d9h-v3tB@e73x~3kEF^-YZxGr{ zM_S5Ch}nW(oZHYizriEDr>x!ON=+}_s7XFlw5K`{lyh`-EoVj8rkULkKnMfxZFT~f zTLW*PN+%=5@_ z$ue5ZM~(=p8yDF^MlhnTU|@ifa`A#RJ&kSIfGh84uS`QPF#aI6ApF_ZmFHa z_|Akvn0cKr8q*vEm5sL$sK|+yASXj7)`Z0zdAK|=d!hjQtuC;@gP5d}Y^}S6m-8z! z?1YsgSlifckO(?M2-Lc}!ba=~Wm0+lW3BY3h#+-vLSRERO^5@a$t zsCw8S_7|$Jg|};HaaDIq5>f>1w(?>4{KUb?Gob-_}YeA#P9+&n=1h} zdRB8|Wpx!rU|54$-4&W0YN(QBk{yS`O*oRmTxU08#&;rNYGP8bv@1PPt?)Ny6Dg0! zR*Xw_o3z`4Xjj)vTzev2T{*b##r+fPSDbv?|9`!aLJymO|C=N4VMqE7q$9Y34qUY( z{eRKxRFoTvD}o(ImaE3S1lI^$*(Q$o{*E-^UVy(d>ty<7q%X()|LfNuQ|RFd;GcKo zed$R5gmfS3*P*|mj`aUUr=wBs4ergF{OfT43Gm6d4>fV*H`Qhk(D^k*7lsRzB0#W#Rtc z-dR5{i$Cl91=oM7pLIFQFkOIljxjH1+QRWZ3|C*!TDS+$4riLM6KCc9U+=7+m&J>9 zX8)J^S%)8!aPIk^9~E7L*aO#ioX|1F=qI{t$UI_&Dd@%Ufz+e|(gsBab0R-H_< z{8mT%ocxG)<{d8W#O0)r*Oi2~>di6hB@gQ&&PrQ%;w-$Cw&1O1UaNU`=DYCF-w89l z4Eo-MgTgCtb>LWV1`d>aaP`OA7`<@afg^O)k6_-wRgCvfa&hGV-->%1?)6V$lN{HV zxSq$g8JF>NSJx(7m8)Q5xX#D5>aSg0i*cRx4EW+Y8rOusb#;x!H4s-E*PXboSl!iC zgzFVt58-+Y*Q)jSXQgm$eF5X}MT}jfBe-9Os~4`n;p&fT*~YG}Z*l(u*N3=v;ks}O z_~Yt<>yNk|#C219SJ!l0>03dA`}?@uFJu1Pj@T8K7wPf1GGBq;#cB;jaOYABhzAUkhb$$}?EeI7 z=Y$$V);>upCjZy{rDQ%6@OYsCOBAe`xdZ0Gb`F-W-S#?@i*$qktNw<1S_0S~!=53R z@z|u28sl<_f7Wh++^VcT2hj9387tlDnq!`ALG@g?&GBaVN4}iPfEi6YE?=;D#ob`v0)^=h0CWTN^NbsypdE4J6$p%}Hm1 zW)5?hLO?=70wh9!q%#nNgvcbb5fE`=Py|#|P*hMR~|K4u-5ct>Y#orfQ+rQk`( zL!>RlJV#l!2ssDuE(;bT2cT?N0y&mT!ADtt8uAa$EUaIFd5SWOdrsR8KHe$LLfMEl zmWP@TZTRDl!zj0I#64;pj#KPH{zAF)dFaQ%3tmB8Sn9NhrIrR%dMmHJ9&L02Jp+G- z=IXjuVqjEaXtclODfS90Kkb;fYy=iQ&S;P+?-AFwq{)vqZGxdi;gp zo3Bw?!xICmUBeSYXS-eNTOq0kl{-wPUi7S+Q zk7|*a3mcBs?og=6fZdH4gCA%=+CAGfA~Eb5u3J5}<#G*_w5*F#ilY?W_sOWY0%P|f z>o?gmGG@<+SpP>Su6Mb9II20LTDq1eZgL%)xYacg<#0$s9eu+8iyS~q^yD#%CjCwQ zZPGs`vEKFbQPq!VIsT~h;+EA%rI)mHSF6NXV-p*OCoV078w(THDj0>oP59f2za9A7 zsmc=TVHV?KIt<=)ChnsVi_wd*aR%stl~@mHfBFB<@hXP>A7QX|W3-+^m^_C#c^-lC z0wU$bM7uDt{%r4R&${>v;x2SwME@`z-X^f-k3U8+)|>ouH2;rDEOUSBLWkk9Bf?=6 z!^>hk^rZdku*ap|rZ|iwHn_VWltLgO97nWt?M_@e9G;1=z`IFa)A2Wta}xV%{xre= zU{7&knX4kP2HhT>IBSIXnjt$LhF4JIO7B?4?!3y<2~ z$d>MVT0-Jj1P>b9p~j-}8EE2qw6_%dmOPxAZRWRQWAmsuHjl~^54qf3k3O%9Te_FU zB_4K_C$2zFSc^Qg9)FwM2wME@z~4^%?ZRK9dmQwjv45_AJn)YP{_(&+9{9%t|9IdZ z5B%eSe?0Jy2mbNEKOXqU1OMOjKv{QfcfgdaWrzzU3{~q36O<;(PIO$yZtc;WtL)I&AnE29NT;|NV~> zNjhWXXZ&yfvOEmh= z=l`D`u%8bfwPu&jnw(?GQKqam2 zH02Ugt~TXmro6$F_nGo3Q@&=(PfYoPDLq-HeN%QZWsWIFnX=lHGfla~l&ej7nJI5D z<$b1n%9O8}@)J}3U`mfZu5CG{>|)9sQ;srawJB$svf$;gHvDfsF^~UWUHjh9`VW}$ zGgCI2@^6}rk+a{_^;g`dNqmmo{EFuFEF)KNujca6aq}xG7dCS9c4=-bPez56mJAuW zUH5B7H2+7%EF%|)wrlJ&%R`fxfovnU$>@u&A5DVBLL)cQ*yo7}7{Ii9*!b@=qc58O zW8y~ckdZ4hawh%8#G?I(j>}pjhwm!>`7-*V^wk@EzCTN0!=T4wr;)Rb+~3)^!^l}C4to7X zpC)WIa@oed=sqAS9*xsYMy}DsbBCyaDWh`hjoj>An(=q>yw=F&8@a!Ws})AhGWMDD z*3`Obc&U+F@i+EGH(U)y?y%AKcmA7Y-rNKmIQMVio_FKve(#B;M!dKTAZDa@g4QXYT(VOTS6W9y0Iwj*OoH zN3?7I?^XYglJdn1BUM~fG$fo=H5JDuP7V+3J)n1Xubh(w>$|LPPVejiz5Dj+;Y2ZKc4R0CMvrIdCigDBFk{8`$G zN(*3l8Ne=w_bbd%ZWkJXZo<^<2msvz=-U8S>s}BNX&&onCfzzJEr8|y57@5sWPFMx z^;%jCSPI4rG}3~nEHr4fgoiBEo(E*29+k5eq;BbsP;7-`ED#{5suJcwooY{Qsxo1x zR28H=VOkRhFRd-rfp+mqJk=n_0BMCoGe8GYTaAAy_vKhGcs<1MT8bTr+SCbpiAW(D#u zGQi`A&Lj@XRujw)NYQ-dZ$ zZKF{kO2JZFK8Hv2&<@f=86|?T2J=1^or69$rCtCc#p0SnSFjY<=#sL=WPGGGuz=E{K;0nNj6~Ns>53C^11JMI3=;g@zdSC?&VUxsx z6=aa#3~XL(U$S_R>`OZV(`%PoFK=$;xm<|xefy(QWe%FinvY6xdNFj{@#i5CxYlFmW|xOV3OTtyIyxf;gXuCE4yb}MNf`xbJ{s#!h)^arI{;%G=_?*-G_0^oknHzT3g zPQtA5e9IOsyB3eyJU?zHXx|wGe3)>+K8UV(ej*&Q`QQ3HKNAky4e5Y?A)ICZjA8ct zN;umN!7rZQ2KXi+!yz_dJQ}ba0mX9vuQCNsR)s!uLWVkYrT}hVq2rH5vkDASB*kDPgt(rFd@Ex36f!1mb~a@(R>%RUS9RKs!hKFHV>J8Ih>#NdGCkFW=qd41UJggjd~7WPf3 z=edfYWj_QPJXdp6Z2RKQfUl)(0gMj+ZL`N_|HL+WS8OAdrU;e)V@IgTpiTx7U_Y?7 zU|@kI(ICrp2vHqB^9q0gaxfS8D`2i_OcLJ{n&SPufQqj{rDUnjpJz|n9QzPO!3s=+ z7K!REC}|Tn7N2cP-o~=$OpXKW3_(;W=Qaen%jpM6xAQq3<;FR!5w7u0KAxL+96xfk z*ZKD_^w(L1MB#Jz>*Fn)JE5Va^Dg#Ct(M zlAK0NpJb;x7wd%7tm ziPV{abk){rK!~?<($QkrN$+l1?Va%$y$;UqPL|ct`3}~0axx&<*_jLtU7UkxtEDv-t zgm*q2j?Ig6J@k|~r+RS|+&KebI@-AyV>HIO4*e~4?txFnIxi!Ok8}7-$>W_t@LQR4 z3#=-4&V#iP=V^>rg_92pE1e2Vm?|d=qgL&_3qMbAK7+M2&U#oi(V2i4IL^t%vxZ5| z%^2~?&Ta5vt+NU-U+1()z!u4w4NK}BKlC5(ybl|uI*0H%&EfwXobGJExX*9~LUN|_ zDZF}uvlMz}IVZ!a+0HK*@j1>e*f7_57UMq834k)+=?4!iaIS|pPIT_UG-z=4Lc>YU zXRvCa(I*#~XsI=8?x=Q%q%;)mdz5g5UB&iUxe1x^me{X(ZT+P%oJu^g;-x+97< zI3=+5V&@hY)^Nvz$lvIE0m`M$Hzk&JnUfAZmpiv(_HA-rg@(<}LClvcoZrxkE1geZ z+ZLw=KD^2~3-NZfbH@nFy2jZD8@4)py|vAm1CL$n6u~psIeigh*E=bQ*zL|(jKU4h zuZWf#of8lnJDij7d6V-PY`fXnk!4x8IHw~TZ*@+E4YxVH;ep$oNwDM&Ck>w5>109v zPUk=?%eu?y1O0b9!_eYAPA)Xu>$njQ_c^Vg;eKZ~`m)Q3haVnrCc*;`I$aPU4>?z( zFAqCkLE9tFVR-dljvI0NsIv!tYjifEUynHx(U-@a;TX-`&L&7c;XDjqJ?Z>}ww`h# z82vp?Kg94C&IuTcFCCu1Kj=IM$*-IejNsSK<|xGNH_o;2?YGW_ahCO8XAeHV zbJ{`w_s&6h=?AARy!xXPSA@-_*<+=gm-m?E#f=dEOY-5>IT?1fit4d~P z*KQ14tJ>cX#%_JD?$$T(-U7@kJ7Lf$gq3|U;v-@3d}y)k^I=oM5W;at*6qPgp-gD; zd@DEc9R7-$9fDzrYqaFdf$`|8a`AZ$<)GlhpHoE#D)^BL2yW+CsiGOc{Tz zqK2^Va^wPoWcm7`P5<#~H?(H^-U|bnsty3j^*zLfrm5D5odRDRBEmmi6#*&n#bHwW zXQ(Nd6=l9z!+@NiP6MaL_bh_ZKUd8Fr`{LFxcld+xj<(5Zo`!G&sQe_Y4Fvsos-mR z)LH85M+FO2M;O1t_wUg_7KygCzEg^TELMXcw%+#(yKu5v0A!QzVg!eOsX7JPw)*yw zb2_%qm`c7E+3ho>+dF;x5!wEgst6Nqm#+i~$-hdCMLUhYz3jqTbtAOx^*uz}&KK|O z^L3-IFH|psv)}hBITtCu?mXbT6+`8}SUreFKlAZCzW);SB#=YC_xl6csDkM5;j}Rr zG5@79B9=;fo|ayw_H(>c+JQn~m#hCkUqGe(j>+TSq<#PvQfW(yfo;}fq|y>$v;PX^ zg8nR(R@M>Nm5RS%o~_ajVi^5fR3flkm6lD(tJDn`t^$>IC=1xt(#sN+_8jY7BdwRI zw6!#It6()Mtu4E=O|W{EHvU*(*9taErLAHM*9q33()MAB`mdK5S&G(?xcoP&v2gba zmDZ9vZxY^Gg$c!Yb@gC`E#O>g5%M?qFxwfEw#4U2S1~;8c^r*}49I}xY6%Jdq8(s( zY_eTnF$`|{TnJy|vo-zqJN}XgSgswI-2R(=7)p=r$EWZaLBcmebC59nxAYdF0Z}0i zf0K}rT)|MkwMHcCC^;XE%#TW1u2<>C+m;G{8TqvPf+l_s4~a3}i?!vES1 zILj{OSU*bj*>+b(NF(7~d$1euV}uLrAF!bKA7{HI7!B%n6@cO2O^aPmV5#yyc{f0h z{Qy3t*DnLmeG!Q5a^xxh3FpgGABz0v_{{j}PqLP4DMRCnI0U`NZh=ox*cL#Ob1c`r zVF-QMO@#VHg_s7kkT}WpBT}{h;5ZSgj0(+<35iaT%nU34p_8#fl^_e+edvep8H|=4 z$H4i4ux&q>1^7q80lOo#_zx2f*{?DU{zN!zx1?P^Gf=baR?G#zybF4^od@OqUkT^h z+0^+P;Q~7q3zz?Q#%ziGB+`Wc57Ntg+gyMx^*fLn-<~2MN|1Wr7aSRvAhUb}8FFqx z8ho=y0ErW1sV|7-$RDq$ZH2EV6?o*{ajoxV#wOcMn|)GelkaXC*+P)5zK)!~ zEd|-(%Vl?439{4oAlHBd$>+O#pRvPmhl zkR-?fUoO^Ae^8Lmd=D^dw-Mx!kEzk`2y!^>W%egU5G37OIrCGc4@kO~7XV8a9+K|B zj=(Ym3n?pj3OkjlIP7i9{O#0v+uYN7-7XMVEInsjBGEJY~P949{4*b&i{b# z7>;5`K|;RGTogKqBg4MLbReA-(?FK*3#Q2~!pZg>rg>czBRkhuOl{pnb%F20cp%+H zTZt7si7x7)Dq&9BkiVC5!0n9a@J;FsMsH&q|4)s-kFo9ETp-zsX(eCWeyU1OCBk7N*P(! zbiwi+W|SPG_5-o4;2ch?p=u$-f=S&0=c~H_d+c5K%-}{t-%f;8ZsfMxUbdLWR#grWE!uT##T%3aUv(d5YVo|Y) zDr`hDcNM1M10x_XPTeg6{1+f%6*m{U4W{TM=|=GWkq{lP^={^_tW3kdU`y^VS6s(} z2O0AbZO0(azzPk&I}mWCN(0+thw&-RaJ!Jioncd3R`7Txh$=NwM8`x$xtG(TvRHX+ zhKa{!!t>ai4<4J5=dn5Z-W<3i9LV8L$nJ(13k>2m)3Ues0GwNgX>QvA!m{TM*dC;@ zz%jBFg^Dv#t=$Dk;~u*apttt`%#hYCxqGs|Meww4C1aI-!i>dM>x4JpZaW>n@@chB z{2g#0efo*htk!TnK1IQbZjyVI=>0}*@41sYdrR)-7W*5nZe zPqjMK_5*&b!I6DrKcAeeWW(eOB5x%JWqTO#O$q~PqZn)!*a8>ZhcU9I~HA45V~rurqL|%ylyAq-14)?V5u)2_)(L z<%uB3bh-e5o=H?8yE;9~U7QbrJjQj4?7*at;Gp**A+`r5>;3Kk40fQFl@tXj8zePZ2g?jXX-$Zk$r`8-m?1p*`06AbM zF(7*D{oOV2b*PVMwmP!#*sSZKo>2C;RQWx690(1aLzSyA3qyI5itMQfh0w4~hzi^C zZo>qh6&n2^h4v%vLSu#iwCtxbbfHpC>MXl6w1vhJ&bB{+PebEq1D?3khVkrmf&ESz z;4-Qyu|LG@50!I6R%XA5X%&iak5gl(urC$7m8plhBO%+OA+^m*-SD6WlS#Ur>gaAC zL{O)uuwMatJyfTL2#4%bk&;tWrQ7y4^e;7yaF%@yhBP&uaJIdTbu$R(+6R$wQZoq` z*e8ty+?H^O{Zkg;c7)4pcPGGM!Zr5O-2t~JTyG~L=2JTmo@KfI4J%W-eNLOR(pt?d zvaC0wHm4xv+n(?f>8nVz|j>xNy99;C}C2JG=@ zKV3#KWRHPG=`xC8dlNj7E~A)b55S0~%WTQECqhGdXIh1ICvF z*JaA$Mb-g3y;6+tH5mK}1Wo^Hbe~eNFr`mC8}xvEDs@cKqq~UqPG*~7doxBTy_O#A zWVc5or%xf=*=~VpnLeE|S+^=1B z9Nq7U^y@srW%e}cnNPUJu0rspFCbiR$Cm?V+l2{$PhAZ9CVLs%J)JUJ?Vo7R8I;*!y9xnc%6;ZeyR1Lp z%Qit~m;Jm4@FvPM+I*>+zL|CR+P`8Bq+i7(wa@mm?^mCT{_gKQ4;s?9QhuM^d8Hf3 zHX{3NjL>Un&y`0d{W(}3dnu|UFj>)8vJU-jxqcW0fsxmP;<0bWr)(;YlG zX@oQEJCOl1(+P*|6SDwk5bk8B5|*rxWq(Z>$)wr#S?vMK9L%*ddjgg@SYW%64Krm9 zme_YABWKDSEVJLFOcu?ovCF-HyRkp@_5;HKcPBi{emMlV2jK?$ZLD0Gz37;w_Ao3s znY~G0VK1XS{YYO6H-TZfR-pfx{rkgFuJ;ga>djKL;<5SmRs1{`fOa7hoq2F`2Xc#g zYc>e_g953(-C@7oIUVw$oU+*-&BWhsME5RLo@wjb8&2R_x z7PJK)MxVod)j%lf{yc_193G_R0P&?_Ov1V9TELcXO$#7{)ea!GuMb=hK1SUR1oyzR zdH@@s_&zjaB|eh)fuQMzA~*`85gw>60oWxsB(31-J;7hAwjwfu7jT@GkR%=uUkGY^L27Ua-F$_b8kIkt@_+WS75s_qZ4sg1 zB=+MfbxE|sov{is?y+FBf?fvRcJ)Y9@~xO;Ovr@jYgl)}H!41;2)@}J@J;H!5Y9;I z{1;=iZF=07=0fxq)fQ42c`?yH8!xlN3f{%(cbhsEQsS1=|192Qx|r7V!Q^H2T&lwN ztBx4-#x97M>$^eMzZ7B1*8}T#_$kG&H|_8hFk<$ow}I^R{YvC{^#PDwzMUN77u07! z8htNvj9*b-0om)j0I4qgruq)ZK3^?1ci{u-XCV81FVWMVDhuO(z<00^$U)@+@|kar z2grA-6_7)|02agW&ngMX;j|f4V7Wp-@Km%0P7HfoeASAlqE*Dat^vTpDy;|hrD4Ag zG?liT#yYOi;1#H}3w^*+TxG!UO6gg~bjVc=46l@aVeivilY!xt(%5`p>8`23>Q&me zoQN5&S-|j0X)Q;!gNr?DP*%b|_7H!P5rdeEOa&gBNz!989b!i;6lOM$%>?AJ&y&vw z<&#;!V{_7aY$h^~T`e#pFrhcv zo1ql!%qiZ<#W?AnfqWMTua)WQeChHuB1u=Z3YdbUsdf-Rm!&ga~2>Fho=dW=wI>SD` zjR{{bNS3zZerY({*Pi+K0Wm38r=y2NL4ogghR`EAYv9Q@EC~P0)kynZ=Y)7hjPcmx zAm*_-aeDDQyKQa2B<$T_;}O@JeLB2(64=@83l?F)&TfDH2~u3J2@67Y`wMTE;-Y7y zSpOd>HpCxmSv(-j!m{?^5i|L;}ce|1_UwWw(E3k`&J_l;XK_rFi~EDPDL=iWfhUVxJpP z)3?2?x=QI&g;KUqQ&IL_kFo2REyqARJb^T6Wp^651&r)YIZq>BWOo|$AqrOnd#esl zE+)To#5a&d#qZ>Iu9RW!Jh`0^XAG8NR)rL^XQ8nAbcvgZz-XJLy0~sGu&iGYGd&tC zVBAStJ#a9-i;K4*b`KoZ?&2ap;CmOrXXqhcS1dlNtBXhQ!oKTU#gO*C09`kZ~CFI+n`HOum6c?yxWk|VMzbZV*nl_;39~1%l{4z=|2dmty=-91IFNEK6+?* zQ;{{GfoBGchMy>_H3<0k8aODHmoHSIAq71#8t4q8LBPM)aEoZL@q`2#CN^m}6Qg4^ z2>ACJCQ-w{En0(V|E#9k0{(q%hK*(A%ti(88pU{FGSKffa{lmw`lXB1CrIT7atu%X z=6J9QbYCG)teG&9_~=@yX{ns)7y<9kQkR(|iI21m>q4>uZn{5AmV6l9T|fv>HfLcA zNR5*m$mAn+ja@yVp!-BQ8_R^GS=2Jf%8A2-^e&TX%u;{iAt_qkgJKg?mo_Ii5rgIZ zuTXO`LVz!pBe;!}VCW|Su#$+P@64Y$H;sZ+gGe#k>AoK!nsXyo9q&9bQX#b^@dFCH zC<3D8BDk&lo1&#J1;m2_HE)n=b7s(Ezle#YcZ+2z^G8^xRpqRqg^i+wt3Z>5hJP)Fs{2e)!sS8p#fqHSBT@BX zsmdjzIqOk`Y4=^?bS@)$S1qyGNcRlR!)BoS4;%_EFWR!Oa7JNwl}&4n?3G zTfCeNNYvgA(j%^OB4y>gHV`d)WqNYiYie4e*%~~T$@3=K%;m7DeZ%G#QMx}$!?{cz zSv%+FFxbCJn|z+4y(o|vDz3yEO&~dE^gu&y5#<`&)RL8RAJ#zclZtPnvSKVk=pUQT z`8*E-&nbSPh%2(LD*`!hv2LdHkE?Vu3pM{RtG=$7-ne`>RW&K(1$y~y#RSfE{Yd`c zw&QHh?SFxjkLE!sN5Gc_0@Q_0DHIYUD-A67n<2D zN(a~FfIWh2?j!!fu4KktNjCQvnk|v5P3py|Ss{&c;~|YBAI8HF?>Vmdu$mhYLmrZV zyJPQ%{NyNcZ=$WGLhG29uqJwaiUrx+vov+a8ZsBpHN6M1Fj}q zW%Gzp4|$PLZ6|gETu1r@`xfdso^Xvlhz0}xFP|ZceoO9`>HwI>so`W$a#}6ceJu%TZ8vd z(JidX-IFE;CP6%~7$0ui-5Z(O9dHlDxVgjbO;~H%&Qf0Yd?eE(OhX?}G$`9GU6O}8 z%I7Wy1Lue^iB${9&jH^b=(1|*9t%#$$K!aam7C_*_#P<&lHeBC`MUN3k|;>6+}tU@ zh}B!&r(+olsn()g9#E+y_iU&$CM1gqw$DLARzck=Tt*ucpwy}bAUDQdE67*gD&XFN z9(!yug3n?#Q*GRoZW~aJdp3sfIv~C;O29~Q6SjQkBPdizbn^!IMAn=ONtNn8oi*|J zz&#h^6&VEKGIk~1O(DxSHyucZn~aceARa%cOi>f|)fWS4D@c|v7>*&?J{ya_YA2jr z-~7=)!s3epUr$=wUc5BI_cdZgbr58v@B4Tl9R(TXt4B=Yt6lVLG0eS*=FW$CsSJ&jDgbbC6zwZ$eU)#7~Dn_cvi$Kph+ZZ`QmB z$xn?IVas=85Xcxo;(UJI@sx@b)m#kewjos$?}k$o5#91cK~0S39giiq{06aoJb`Ht zJfn_B1$A8f8&puECdIcx?kc2$N5f!D=Dr!o<8(`HJkMF;oK_-aR9$=;#M^hruCUGq4;|b#!9H{E}c+OATcPi$pni_vDIQ)7QTS%atoADQooo%czEq)wGEm~!? z%tiFR2G|~xgy(Xt=rPOw&I34saGX62$tfdId(t%o(i#5kkWXNxZ}2C2lP%X!)*r}s z-FXKcG%+!gbySDJ`0~#2MERI@*wiRwe`f4`>2M?3QG;9y$tnT0q^J%-LM{2xsYS$$V04`Az0B5l1Q!Hy z3;DPhFfA;Ue9$f^8a`!5bR}!}#xRhEp!$Nfr{U|e4M9exm6sxet5vrh0w!J(tK)*) z;1@}3O%h%{z7;=N^jdivQlV?Mu2LMWGFw-9iE%ndcvhafC6EDvxK}}NM4ZYAo(f>( z$^H9TYEbY~xMSqWxt7Wel5gc#SU84WLok8gTiXaohRAK@t>k<OhM6#Br*JH$!-uDnBZm%ydN@P*!Y<#5KRVvM<3pE2qR?2^^SjG}Fb5A|L;@3o80 z3N*oz2@eRpxE)j!4aTadvNEWyI2&*gPqC;T8IQrqR&imB(ldkSjjZCSF-mp@b9q70 zTB_Fw_-DoVgEL@Ll&=x+uZ{5wo7dL}_>af2hw_?6A;YXp23J0*RnZC+m^ zG)H(do7dF{ZOzAjFtOCR@@6)F@q9dpIZw{J6cqI=L?cq(6u5XCDsy6AEmaEg7g}nosD-cR7~Ty{yd8#DkdI#(xGja> zVkyXnPj`w4dNTY4Yz=S>PF+Q-c={d)&$C;@r?v*CzvI3ioWe7hH2Z5#$=m2!uLJyK z0&sr-fn%pkSp*t!fYShqqw>~vN4+&Ve|5sp48uQ&CP6YvBd3~ypJ{^g@js<4wN@%w zc^~FL=pwCQTuj47S_4mKfT^?B%U&S6<-Naq6(kF@I}738nbCH4%kiorsr7`w*6_J7 zS-qU%ek@kwO&9PSMi9+!WqaIhEO<|4*bw_ zTo&jTb8HQK8-w9P4;@svmJz1mQIT}O*Lz3J^r5`iJE}kNZQi0(gqHe98Y#LZ1o&eQ z*Q%m2EI{fL4;Qeab=-!1s_EAl*6RRjBV|ekt*&|0Lr_V zuF4Tgfu7x|8qu;Ep~gx|KgK-R!P^FH6cnXktW>Aw(i)-kSuyGE&82%bm(~cSH&R+` z8!Q4=@eMJ`P(#@jqf{EoGciiFp}ZZVOf!_PVw4$%VquJQTMG>(DMndrDE&y0?yVAv zNp%{5y^$FIlIDDkfWI)t-`Je55%4dL@h@%8*9iD`#rT&s=W7J~&&WTy1KfYP@QYO2 zCU_~Qs-bu1+}q^kOo|TJ_U0Nj0%ihttQ=NZfPphaI!AG34S&{#rVYtz4QP7nar<)MWw}rNS1qNy9HOhBu2IYxu96oEL@MKd=Q! z8W)9{Qou!WcCb)VfQBUnXt4NI)G69BlG=1aDgHD@xk6J4H2+GCS$S2xAiYLz5>=oG zU&EFOe_oQ6x1M<EDfvK5YS$106_eE?uzO8zt~Ru4G5XDGTf4WtM4f>^-? z^dSxN1iN}z!&PyB|E1yOlxY+=AHQGHC=KRMx71@A!ka<$xU9kX3&7v4mt$EvpO9L` z`LH~{;Y2J5?`k=$ml}$Ol`VZY6GbEF{_tH6Z0ZpT~OQ4dutU>P4|YmOi}~ED*~IlNnA04(k@m=Ab|-4P&2%Yj5(k zdK8vshdXR`R6DK!lbwQmv1`1;hSBsn*i5MhB%sxQ(RAUb9M1=3b6)rkvZ#6}S?Jri zYCNpzyzfi$uqiD#YGfzSS_elxh+Vfz2#$K2t4WJs z{@fY(`I*N=pWcvZSo{5PJ!52Vvs>CPve@Pm0vEpqL-VI`bM~oNj$x@q{HgDSjV_Y) zqh@jEL_>#nqeSbF`WE|QJqWF3cIa8AVVPxyJQ8w{XKK)$IXP7HJ?5E85?b-O;1}H! z4?<^6m`B2wbYC~w5Wp(g{0$Jc36^|8;Q8V!DW18i6jeIxT6=)Ch+-9wV)SaOy4B!rEhnAoqXQu*D}b1(v5lPi7)KtU`XTK|LuOH{F9@GK+3zn%*P(HJ!NjYIp(1 z@@Wm<&)9e-lkF5`vd7QL?QC&A39?kbrZ;@Wm1FV$5|w^nM* z8pHckdDjEeN<3Gp8^0M#oi9;}mZmT?7 z;pa4vn<7a-UqG#jY*PbcDc55#C2|6|oW@qgEW?JeavdOhAlMDUUTWBBhW!WG#b9qF zyL=~_OJZ{!LAsx$%AKZ0XHz4>UvxMGR>fn61DDEG!-@fLlaa)#o-^EeZ-ZHNufa|s zW>xSF6)*EUs@6A{7m+Zk4no!Ho&Y8^V)PCeu>&v*cba!Xs+9YL1(2#CG4d9$M}S2> z2Jj<+Zva%_ImJW{|1@kicrDTep0TuD10~0$quVXqYj4!#YmV~+R}~eP8gW#Yi?1vq zL%`b!%A}hNce1K-QDN-`hHw@-RSwa~s#5ixtjc}veODO`b*~ti3m^l&&%N|&%@Vn& z1(Q|87c&R$Dl)a`yt+DrY>ui}XY7WeilAY3Hx0fGJE{i{g}DO_Cu(k8RFtt9C00fQ zu)V1-;rVZL_b;5k#%nd>wVL5t&1kLTV#h0H=nm6Y67!Db)Vxb3y4r(@pcCy~lBRSz zdML9<($3u?q*K#90vF$m3VOmlAcSaAe^@Xx`!s^KrZABoF#Z1!al%V&xp+L{=K-*O zLjOvi1W>X9P|Dk4 z;5s17@!}XQL0Ow*bYG7ExDBFYoB@A#G4!3FM}@8O6;SvI750ObYp75TIva({QIMqA^kNu+bZx*yAA@SqM=c60bax8(D5Rlb$#AVzyLRMl~j;T$`z> z8uACI=}2y58aOG~3{DChVUkrlvY8L=gci1OCVVs5&>w;iUNrPY;8wcO)7q%5oP8EX zUuIy*HQE@=K)4Z84~izMiTF)Et9F5Dm=$_NE9|@auNAD?twwYYM5l@<(*j$z_sZlx z9IF++}hs&*TRcWHFqG+?S)wjC;1hG`>Ppz2h)9okhG zV-{Tx$EgW=pYbx{R@G?usiY?OSZs}>^bqSFm&#?MpePRcLrrhK*Em5CT5weYc`C@D=V^pJ{-_%o zy(o3vAQVH{0eLZVkqknSys=p?OuZf7AapBlejA9Nrbp^B){^%yr={|3O`$wd*G#QV z!$k`@BBx8G{Q0wSQ()j3ylK&}m3IdgU-i7UXLZ(-rCVL{Dy-Hgwp1rD>XRp?N!40N{io~PWh)0W`MRom@ z>Q1R*Y`9ySh!Or40jus86_}yDx&XtE&h&!`xa8joqf}caPsBb!9IDdjtRa|3)5NJt zzL|7J7`QmrCHRFff0{VgCGzBKVJJFqP%J6Rm12z zK<9VAO0NSDk6BnlZsZ|gYk)=e0C<_ezX8-CXGPuv@H~Mp08H!w-F(m-)twy&-IG(T zDyX~{^ot-+dJ};A30wu>Jp%k1h_@$fDX&L`jgYQB40RPD(^0Z`bt5MpnZ=sGRQwJ` znb%IXGCj3{t@H&w5Avm*0W1PgLvADw*iFEyJ~k@3UU6S+RD1@s&NjUN0)cPiHtbsN zK~v1R+l{jhu&KqVm2}o=puf+i768cZ1z;+GnFOi<+zX(F@{x0Z@f=`e1Az8Ou92$% z%mYv!i0A18P;Lk1eo$)4K#4pA>>w~m@VjjTDDeVl9s96Dlz0o+L||-#_fe{V#bxPfKVbdMr^WyK3Qg3G^?Cp4GhJt58?rmH-X8yRdk z*lHA-&T&_FHfm8NTCHs`^bYXDLz?b$Uw5N!h8qR$R&bB8DYlJ?@OGq@ck;Y?quwXF zo@^}mfqez`IsB->4x{cVV>FwNP2ve$S>88F4$$L?X(`BCrDuZvBzjYN5`X~^s3Eud zTwsw)@o^nUk!t{a319-V(@#cAnHO5_NWHuUm6@Topk2vOhY2u4RStqWib;mr2~09n z2e?@>)I;L$@~x0~3R)u1p_!h8F;`kal-FSN7EWog%>0z{fKC_`IgAcwhKS$;-H7Z& z)$yoW`HbkqF5hq?kASlf9H?Dpcq2q@6@Kp<6UeGM*iXxj2f6k^lY5T?QVh-X;Bp!v zBLgp6b^LM_c~0qS!{?`$LZW;hJk$-0>2h)=sSQugv={>Xyj0Xl7W@+66Q-ig zHb!T_1~1qq0rg6kfzBT{EL{X(3jk}@b|V(MceS?C0$w!@s5(6}6ZPZTaMiSi0&hSb zRq=vP1}<%lHA;4}t%NqGM%&s20ck6khuX8!*dnf2TYF7-gO>La9H$P7kJO`3AU@N$ z_Sr%0v%JYjMe6Dxavk2cMFp>J=G?1|U=poarv+P2G8)%46I^Em@8od3r3HsX1>b5W z_?8x&y}bY_uICPTIqeJ*;e2(0urw?JaLJf_QNL&IMO zZ@L_((s(hPI$h3jY5X4Gsc(>2ktLWO$7jM1Q3sH{9GP9nE`-TM_pLzw_xdepz{kybt?@s z*39~?MhubC2;Dj=2T2s(4B_c=bS1Y9AJsnz8_Nq?VCAZXQ*H!T<%=Vx7eK!%5O@H5 zHGu;)-&lMA6(ARnI;Fipp8$c<4gekkAQnd@GoluMt}X73DN}c@QFAnlXRw{U@G`H0 zmS|^W8);)EYDyc=0FXBLxI(0Kffu{MdGt!%<|7^7%p=wW#_&O7!B#dfq|F9)L3vZ8+#x&OMimEX#gg0l-3t$L*xkB6~XVIRNM1_ zVH~BubzdNIc*P#uG;S3f9~Hd~FKj3|XnVSx#~Ur<@-$ku%K4V8EXHXN&X?rWrW!vm z)6IeebLA2wRQ|phFWQ3%z6^T}wNidIM7;np{d#Dn%oUtV!hPk+HawV=&ofGAOM>CC zs#OyCMRN4Wz%m2Q5xO;dVPB}dRmAxdnY>0JL-0~t-k%zslebDc*~P%AUst#Y=yZPL zQr#xn{!@aE@_fXLQxK-oWOsR|G=qnyvJiE=0tu+l-6`z@+Goci`bntfZyQbI0j#K4 zfe1r%PH;QamHtZJXsk^~3Pil9S_V!e8NgKlpfe1ex&`2ZNefl{sZU&3a;bVNGNXdfc>zigGEhGXn}_wRoAHt3A(57CPZf zu%b?w(@f>1&FG&PIt{!>=x|O=toODVc&dA@saoBBLuc-ZS~z7Rf{s7!z^k`Qmukxy z(_!Ftuz2HAdMAKtY)(tR1aK{Yv4;U%fa%QZ_ya(!<6p(6`}n8vIrO+k@Y%v$zduaA zbxp;orh)qe$Iow1!xF2|3-^Zg%O~>He^g?Jkx*sM76_HD1W#c*T6!{o3IhCn%_RV= zs_w=B&NgcTkB3D~TF;QLbq-L&&7g0FeBE6}yx|?)eWYD$B9FQ?8b)@s?U9aWI%=*5 z??tv$JKflS2CDV|3*K}8S5nEMTcId^G@+x1sW^fU}F^4PVn1enYaittl7`6cCa<7!52ZeI_ zVT(S^fP->ZIegHfUlL#K@F7dl*+o!#jXv9e6=Q3&^92z(0QIsoOC zA5T=EKJpzXUxG5}5K=TRPOD`otL`_($PE(L5MI1NT7Y_#;!P`;1xC*@e9CQ7z^GXZ zL9GfxxPLM|TwvH`XkWeL%;gxchaHYHJse4ZJv^NNd-x@Q8p=x#xgV4s&LzMe-a~*r zbWaAr9u5Iez5+GUpk8`-HYk(aSbKQIdbr3KBRyPg5caSP^(e)e@s}Pp{%sGd)|zm+ z1UhOzG04?GZZ!(&tV(f~zI_)T*2t~qL^!GH+hz)?IvSr-Tir&3+yn%s`P?h6)4jn~ zE1i3y28niet66vgD{sR!L7xCPNl%5mGYcR&LY|Pjb0{fipENb49#!wm&lDLmq9<*h0PGDL*bD=}@FptWgOYdA)0ld~j41vWE! zxFFE?eJ|)+p!FrqMqWW$(daOorPmOT?Vi}Y_Ciy83{Rm3Ofc;4sNJZx_%B1iy$jX@ z2$s_S5U2(41A#gKu6#^*?Cg$v85GaY{JdstB8b0p1;jPZ0W!8NK2{=Rc}2PaIduxM zPd@;@4F}G=<`Vmoy{eu+KIH~lKU(OG%M86*=#9%FQvsA<;fpMwz%&5-t>aA2*wp}@ zVPNy8?Vq2Eq;fTY>YMp-A3n=IIOpSsvES z2fc2aaR+zM2ru`By~`M>vNTWy)Ha~vbyWL?Q7bFY7NZsqljKq^?~17EyF~TNN2>np z%-F2bswXfNe_+J+(Y^%lc66-tzo74jk{S{te*hb>0Ey($p?JcMHJsN0il5H1GZ9dhNF?nq$sLtHd*9SdPvy9f#K z9PG1r>F2yPWvC!cp2_OOq{+JBrYaXvRQ0((+M(;oXWciZpL|+sG#fkBI4!q=cKW^E zdB)DWp_lA@BjGb4M zU(R2C9uIcpH?ZeGbxk-4!YvA-PzoSi6PRF*HLaFGL}hvx(7k2wiICV=q^A;|=D zijh1-$6vInor(nkMx(BlLf2aK#BP0h~_8KI1;}LT*hVl?P za~sq|o`OdnM?WTI8)Ni1A`g9R+|!5_K~#Ow`oxN%c!Xw#sE-%~-;GZh!>~I8yu(mc z+5jM9DS+t!<^rfZvPxtvIM;y_*$Ch*0$Tz6PT*Dm?N5b8KbdymK=+3I$W-M#|HG{f ztearWfXNtVo{llT z+-Q+8e#jsnq0ib`9AQ49*5?bF$yR+_t(%u2%&gT2hp;^i`D^9cUyTSOqCkr@kBCdB zXbm#H_ZZ};jA@CM}eE{SJF|$lJ)PRcWO0Nr(q>fc0ao7P zT@q^(5_K0J+2&qok~f@DPQXF6tGspSro7>NhAr?7XYvXFQvpmSz&D)R0n|`l-f(^e zOx|z~UI~D2I4=iKUIzYmutDB%z7I;x3fABo&Xlt(T;)4~JM=2waE=A7<^U!5hVycf zsN4&XSBft5+s@~~t9;&YlA;_wKsW@>r1uPm(TtJeRTUi$-a}XwxUrI}i!QADXv62j z(bKb`Io@#F8*bzWa3+H@se|EAGpe8m0T}ifYfpg8a>&$>5}9tWNe@S~64t;NqeeaW zX^SDlpX<7pHMX+GDqvr*jh~y2h`j1eoK>WFvq5@<}!!#_Nu>a-}(1;8Jq@VBdl z^)u>@Sq*1QGzvNy6DOSvE9OFMBe;`JGhFR%&xn$pY3;7Kn9U@upY z!d?o6y}TZj3B37AF&b9TRga~tqN~0JeGjXA4B$5aHRMKq0v2A&2?cBx1S4^{+o}d- z(m11q9$^sF@%aX?XW!8Sr5_JSeAGpod|mToH7!2?ox7m3`j!Idd$U?<%NU;ta01V? zZZK{Sz!R#vWb~040}9S70V5$z2UsaiUEw_LU5}tG+ZvoR$YU^P5~C3Y*u+V z4Gx00s>7(&bo$yeRcl-iHcnp;ITilo>Fdj&w}wPHeI0)x0G__iC&1I!{{}FDr>|c> zK`Yov1-G{Sbtx3^!sAv$Pc4E!VXWlS9|;^%&{)h8vBX+IU0Yq3fq0 z%IS6Fq3g3|YgMtks{hTQ>-kxxzX5dAIHxM!bV2qroeUD&&#b%A*lZ$jtkH0Q9bT6) zG#Y_j(32VxB?2EaQcV%~0{9Yv#Tx)H0tu9_NXO>%G*B3U50P>Jl)GS_L|u!E(R#%; zqb+Pqd5LUB-AG{{GHjn=A0nGkH%r(*8Fp90_Om%g-8!->3#Ms$u3<~m-2qm`1jC6& zT@@=b>Rtx7YNp}ps4Fp8H0orUmj^N|>m_9SBciU08Q*%a5p_FHH&OQk^wy9lQP=Yl z0F1in1Q>O11CXfu1Gjp-)>FZ)Z5L7jqpozLiMovdDtgV=QWzAuLJ*Uvdj{M|g@(&8 zV1B9XW(bVBp%6V<)Lmm_{%=uNV*0yUv`TucDm9uU>gF2cZ=()B>L6DmJG?IACiqnH z-a+)FhD3?Fca2n2)E#>%y3D+{i2$RHKzRW9>);unFzP-6rQ&W=wWV&y8bB`dUfat+ zsrZ*+hYh=eY)0TjVgF{>(Fj~fHY4y1VSCJ6?PzLmAe#}mmF&uHh8+!1iNM`pRTLOb zG@riCij2Tdz^xi%xH6rKW1qVNg=j6(i$utXv6jd(Ru!L4oIIui;Qg*QV$a^JTA$`66N60CZD$;5RE z0&Rh*$3WW-&d~zx10(Z)3$*)8m%bLQ64zCmj3x=R`weomKr0Wo#T#mPLB3mJ)xBYq zneURgPq920tZ0n2(|y-rSho2noWF+TkzXeH&V)_On&w|7u_`wkc69#g@4Vb@sL^?A zRYsGo)7+#*H0emrUvu-3CcSQw<ima^oB5%;dZ0y`zcyav#qtKN|OSJ0d>6hyOq$$t#t!*$fi>OZaVjo z!cC_D+;nDNiD})*D2Pt@`_P_jt=EA&sl;$~cVu#)&fz-UXw%QQCS6{1%aGPAx%S!O1mjbYt zOG#lb1z<0?fFfJ#IYvW0UG-SXdvq1I){n9Zx7ObSs3BLj)}gDAFxCUxO&htj9tX;# zZAMEp*6LOoLUe1bA9*gPk#*4~qg(5~AeJ{mXYyGHm=_D6uf+sP#&$q@Yh7pDeh8v! zy0_!`u#j8plVCv&i4wKnPSjPKmKg54WQlQY1;8bSK>5xvUYnf_3YVDEwqYC^OjTQJ zkQ;AVB>oGoRk7c&(+vAQm@A7!%C%$%j??t$2){};7pNh^4jXossr?<*TlILq5c!T!9&gi&=D?k0}dBFKhu39nflnm@Pee~@NR$XHpRRQ_MPS?w=<@dn)ma1>fdtAM;F6bd zZ!ZzJ7nF)trfMtWt5k9sLn$|cQqkA2!-kzpHe)DX*eeX%W7reO<|b#VuvZ&)N5fu1 zHa9t|$*$aM*aHk(HaRzeRq>wTWE#%HtjGv_4&17*3|B|sK7&Oga2e~A??X@f{xt%t z=4g!pn1l%YWi|ZC4bCCxtszk&uyh9iM&K#}jKE(3$OflBp3U>Br-EDCK0*bIz*}wt zz?-}O0w`Y(?liC@>So;x`P!MLBBN{#SVxPpoCeV?2b#eD-+gO#{wbzUFNs!plvLGV zHYyTj7a8PlqwHd%p^=?hmyvp&iLwh|c@2pYWkEAfnxZW07IcwOb_xMT8G-UZC#0aY zpfJzv2BqRL(|}j^&sx3)Tt-=sTS2LK->}1meKpyPvI=2$ovY~{!+wNpM%fZ!_cCnQ zDc9R%Gs>hU2$+-*pCv9ED&Gj}lSA%a+23n_`UA&{yeQDSWkFp0IB2{6_=+-_oRoe7yn zD!8?62^BEb9)^I#THGDz#cM{Yh3Umx>_tzAO$QfyZB+h>BqqQ$B-K7)YA_U&p!8^= z7%?&Pe@l3KO_%FLD_*nhg_o*+H|}{bnWs|KI&bAL*{DuI{d`uBtxf4N;~6-c6J&sD&>fV~b_$0{UVMM87!Etdp?6kQ!Lt|O)=p_UQzwLc)l>0QX;9aT3R z$lU(;=X2sqpR#XM8%XUZq0wxG58WxT+Nw8J%2ZfJKkiZ}SUsp?C5kNja3P|M-(4UP zG*%mt z7pP=xIRmvG3y{BYN!LO{;vg;la4%1PMD>yr)XV7vp5Gi7bsR*9nrr6^dOfKriQ@$y>9HEs%P;dP*5 zEnEkx5(2Uo{uVjb!Vf4xTDbgx(!xbb85RV6m!|DjeOkj@x#GXdST)i^tYaC3UZf+dI0AQ^~R6`?s>HJ52_O{HRs{2?`?H|AvP z+`AduR!UVkbu*lgmr8Vk{G)w`>F1b}j@8HSQC)AWBNLbcU{=1S%7q(UlrR-%>DQxE z^mWITKVKjjDx0A3trqi^ZXrl5cbt|BYQ9Mm0LNAIO-cHwuUe>^H1rJ(6SIkm-FaHO zS?mZEd%Jfrx(rih-VlDzfiItc9?ggmgLcarv@(xVrs&e!6B3OOr1-I72_|(azFhT? zGG!Z8Eyq#d8!q7JyJhdj>-yl!;B**YJWz?5i<)P){FMxE!)P>n6Tf`+OTVmO(op#q zS9=~roX4-%l@zJZA^%oZxWuC8k^gW(IDhyByxd^~rkmvr+wecr$_P_4B(Nue1#Zl)k%D zwD4S9GQI_TUZVlM+V-Q4NDX|d+)2n^W24_6TlE@j~a(Ck~QMq zT#Oj0)D`#VV#LI7ri5EM@WO!Q&*z6}ZN5Ezs*p$R2B6kA0ABbtDY8>;)QyJ#-vC43DLos>j z!%VeO`Y{5tTjL}~v&0S=Ou}pjF6A90nvhtA#Hv3jPT}z@Kh^lbd<<4S?DsytN&#ME z75Qxk0%v|0Y%J<9$`m}6tydBzp0!Mhox7V($#>OwH8kP+51f(?T%T8yWGEVNU$_BU z*@;HkNty!BMc}LYoMLM5{h-OpmUKYamgj9h+zENmdZ7*SC?Ozwr1GF-8;RE94r6dA zT1!5H1Vw9*54a5wiq^7@V$Of5Du7Mk&jq&vOACTGfz_N#73UEOeIcPKTKk^S zn-n@hp>H8Hg<~V%rP4bTI!>XVAT$MY3kdC^JLB}3F8^p8tyKW!%utw73iC4+q-gC@ zV2d3JOX>xsRXK{*(8WXO;`IlN1Y z;$?*jK;hd5AU#$1w!XL4{hEtG#jDj^AsMZ@TxG&RN$*9$ZC8*;2|wqr<; zwzVL^+P0!yGVl|FuF>tpfVAy%5U{q*{t)f&P*i6~Vr??WSljA=U3r(nlD5%AEZeLA zq-}X1J(ad)ULtvgZQQTXw))36$x=5mSGibdQ+mr9xKm|*Q3Kyl9Ow;&(7;dbKtIXQ z{RTZLAs}mD#>KLphz9-xc-Fx4jw3;ajuJ&*qQp--03i*$3=rp8s%VVlpk)!34BY`> zHRpJR9<9(-geDFAFQW~GHZ`!F(4>K*Kcdneg&w6!-$7{7z(s_1wJG#Og=P)(0Oh<{ zVd50#Qz}RrxEa{uyA)Px;FT(8Y9JfeqUkg6Rnj|9ik78aC>2?jKGr6=`auGlw**vd z^Nxe6gn+DtBTgVe=VkLLL0b3_60C(wFO>{-5QDDKF~oqhaO}rQ3)dpy+@PpTEqq`n z_>j%p3GB*i6;}FSXkkDBNDJQr>8Z5vxT5*rwD4Wk-%)6%Qu^YX6erfg$5iH2T3B=l zovcPlxk@~f>3GSgP#vAD;qqvikO3IhLv64L7dG0@fuI z+LW?v=5Pfy=eXuFjdYJS^L!(FGfioIrOZ1uVG=DnQ1An)4Q3Fu=`LVtc31ulH9IFM&bsO<-DhCS zav7tiE3{eqX+o1_d4|#D3T;||zYv-%%W*=x%2jj&AIu*%9k#!t+MvRiHfJognM}uI zV2j%nmQ6>oN9D+Lpo=?Z;nqc0j~I#6davqd`E0hq^wWiGgY&?!gn%r_try9bL*PDD(tE(HQNV0vN7?=st4w$@_l`sNC=Ls$!&jg6CS=MQ zcsqJhLO|BQF`H!TA{zK#;8_E+zeIvGkP<~5DA8v(Af$me0OI^!RS+XNXgRxpB@H|T ztmd>R&It;=hR~#ev0o87O`&ZHeFdRO0}~mYq0nO$dKaNd1095RRV(yFg=P(G0Lr;h zVd50#b1Fz0coVS2n-x}SpjYKg4df0L9YRlQAT5ppoMJOpWw+sa`$f1F2(EAP2!(6ICP<`?hxb$d)D5iG-)krEFmCk(zVF3CLN>% zY0?GXDouJ!MJrztgRaq~#DMHsHwf6CE&C3$;ctrSOx63J;fGkOUI2FGPf-yM8RIhg zek6TJ%f^D^skE$61#kb8mKASlk(^nh_Nq)+K2f%-BmvG(4$>MWMj9mc6)rRI7$KRd zNT@fI2gn4TG7?%9k_^HP=%F*kHs!pjvBjT6Wi;MnH8-hqRVyrqY|rn3)M=0PQ^=L~ zSWo{K612xUof5Ri>Y@bgv93qL)td?LvEBs?@39_3V$}`nWCdYKC)_0gz4lmp?WlG@ zBRW)&QVwBS4%<>5K-Dh*F22VNk)257K7oGX66p2LatG zN%=PtbhBhN5+#J?>nfedId`ivQb(KWoOCM+A2ALQs|7bDPVd_~CNVu;Jlpqlhghj9_QxytpLF5w4s{g7^g=PO` zwCv}?0?W^GlJ=xVnK^VGq;|x%W3Q@er&$%Z;Gf9JHvLygJ6V;BgI|NRCr~+(_8}0k zv~53-Ujde-or;_@U6q-k8t@8w%F+%1%hKMBgyy>Gm!;hs6_IvtOb==Qf%wwLX;*?f zTjsSUYB)*z3y^SM<9|egq)iYhZ9^40m9%eFojSF&7lox=c0^4HIvo20YK3Wt9FFm6 z!;U*uqsmSMWQyMT^KVIOmG4xNFEI;h>*&2ac(YkC&d-$P<8!b6dBbt<_d#q&Z2^lu z@)z(XntA*r?E`RamL2>{N)`j~jES)T>>PES73tRohtmr)?i`2A-Td20^eQ6YEjIZ> zbIm9xe`xM>{LtL`RAAyHW?LB3229R$VC-@AXo;Bh5X$fo+J$lQ=Y(u#Yd-s z@Rqp6KPTbuuDA?7>dNMV89}%XOKu$fhC>Gb(qx`Y^Dj;2%k&naDUkI1OOu5XemB93 zWSW0z(jn7N5ZuY>C6E6OjHiqTU{xHwcWp^60r+k5O-!L)$|N(uUlGQykH*Uk@VmqK z=4iak;MEiI7Yc4*9LSG`$sda*ml@!{591$?#>))wBOqAW+$W;(GDFpISN28YWCo*> zR*Dg_KE^-3ZMhAM&JTt(>l2*M&?W%4uP+P!ntn*{=k$^bsX}_OzV-JE(3V^ihP)~v zvX9myGKXIRAuI7H|9G0^JrFPAUrl|Kf7Wc#-_Bqp)kAMaeQX4sOw#Yt%9NJ2VGsyj zm!-=dL23Reg4d&dR`KAPA3zHTuUTWCr7SEQ}Y? zc$ooSL(gUTKGArY0X`v&?;nkq8Q>R&@dKjqG85L5fzddbfu5wNjzssjj}T`*YKnh* zl=!!E7v*oF$TW*zCgpFUbTUZ>i&Ujqq%ytaFX6`B&xC4{$P60)V;KKHG+t)Hjo%xM zlNmHVuo%YPvTZOaSi)rh;FAU`NgvB?w;jUK{tW6~Fuv$=36*xKHol5&&of-|e7FT^ ze-5yzTTVuGoMK0BAGf?d_=?Bi^C(?D7f7+?(8lsiP8{3lyhV}DkyU63pe-+A?3WwM zey^}E{TYmap}0{ReD#YwbMKbr=)jVGZl@$hOu^cPKJ)`Xa6+r-y94xj3p`Ms-=NP- z@M<3f$n{ZGxKtJVqd%6J9F*lLSA`lZGw5ymv+?@}s!YQB;D1#V18N_?HBc3HhW(u8 zoTH@JUp7a30F}uf((v)GLJs;Fh~?kGfCM*7lD#B8OMKDe5Fe%UPRoFBej|~dP8jPR zl$!_Vk}*#OpR#?5#Z1*?z90BNgAw#XJ-1R@e$ zZvt&Ox#GVoRMO1fc$DU5B#gVP( z7ZU%N!EfwS;&_l)6EN~}yb?v)A)9g$k)vr@!szI!(UUlG9 z&2iB^O!|zJf8?NC1s*41To(lnMZmv_0$&vY-xmeGBLe;?3jBlu z&(c<%d$nvJ{rU-Y1?Cv9-GL@pX0bIRTRa0K%exn2y>^Wx_LIQQi*J7#BTer>e;UJ) zI}(Y}eUK1HT#Q5sk>yWB&XI(F{sNF=J`$JpMS@=3ooPeDiNs5kFpyYxngzEfP;Bc{ zDCGylrh^ed92-@x{#B(Xn?NI7fwuDLZL(!5kG0m#s;z@1Li;muyc#?jz|qwk>$n-1 zH-K45UnHOpak_{SWwRAz>-+d}o~)Cse7*v22e{Z!nO(?~wW-WQ$fQ0kS;=n`)14pl z$MeX8l|XfhFTa_pffR|+bt*$nVbv~`Ax6}mUt)qd8ebndA7>b-O-A@>>@ike86_8* z+RJH;^k>z+g(zWJ9RJ)ONbix(y9>x$ucfwN;JT2>dl>(`ev^dcJ&BB=GWpLVv+NJ} z$8i+@d;`%rP9X6@3=&@=aYH|gw(JMO>sUja9}DmhN}P^F0umYhkvNwi12EtdzZ@?5L$jd^(a`d^PoIIl(JzeM2ZG!#B;KOL4kZ3diR+LU8)w0VB2})R zY`C@u1sw;0T@0+|{7*0R9|-*((6xki^-&TqOFKRV>S{u1%V{AwK;iw=s`|LkpN04+ z?N|)te?jLcLLzw(5@kqyhD6bqz-Jr)jTew72Sd+p-lp+~#?B5_?B;OU z$D+_nO!V<^&2K9-xu$9pz3U#yQY$<6Hkl&yr6ziP7`-eCz1u`jQfM-1c>DTAQ8aIu zG)KenZBgITV=$`Psv8wuc-Q4$W+R~m`3rE(K?!L`I_NKn>;D3D(*XPsh9!3`5{@CT zT!eKrA@?kD%f}({x+EBkuvnnw>ofe{-i2A zM-4+C)W|z$cL2*fXYV4Rx!zTSH&)R_?NjzfMRv}5V|whI{Xl%>XVqzHK>qQ5l*aMQ z(LA(yaQF3+u$;tla76ACfSo-YlGua9Y9vYs<9GqN2a$8Uj)Z*#L_&jN7T!UHpBcfD zuLF_?KrB}AWhCh*2BvXkZKAS1Rs zzOg)!69+bypT~)V8y$<0_!CMw97qfrO+$jj1xOU_1)uX?L_2+eYz0JfR^K2w4wiqk zQbJ!0^dpR3uh8)dy`Iq9fY#1N=_2|~javwGH$Zs+l_X>#_dDd;i2&a(NB&8opf5%~ zK@^*a;!WhfkQ957|C}g325fu+D2@?DH*zbHTUoMQ)@}M2&1p(uxN6iS$kEaF5xhze z^e(!>OrQSp4^F;;c^DYC;7YQ=?$%<=)Xo8Ke-Zv~2540YKS z_tHVA3%y6W5{&sB!w*n`-Z4Dx9Egp`_#MM5kmGj@KZ->0SvL|x`L^d^mdfXqe?lz8$|FI87-*cwwvkR(6Gg?iNZqN2%~NP9n5#sP@CC{Uy}y>Wy`L1Weil z%zWqGPOG)glVk5O8xnxw5-q0m&EmbbFT%i0S(V>L*gb%G$OH|5^Ion9|>3Q zvW}a9u}p;O-i^dKN<4_f1xPrXRORDUJ!ev^{(E3sfpuM^un7w5cpaEKfPutsQ@GIz z=c1Cwfq#KY?o`+_%#vRK^C_2f?Nhj+ChjNTVke>0GYVr0$r1Y~?!N%D(p;;^vEYKm z<|=~T9=yu*0CW%rq8KswUd3K>nuRASNE{YMj;A2|BC7OguZ}og0j7Z}{Z3)PPu|nO zU2_F8SzSP^{(=HG6D#dzS;Ht*k>l^cJWZ@dDGXTk*>V%VA;f9IOgY>`ynaN9kBHYf zszg{8{ou50lff$`!b`?UtBMrl6rI`P$*+U;y^esh08?!(C z0?j`a&2UWRaeq`zIB+Dvnx1hC$hQl{p7R3e6(Rc1sGaEIkVCGtovE zy;h;8q4c$khH3GJ(M4BFJ}>2B;bE3GnWJAZ;)7k1g1le9a)BGsQANHsi(I2Pa*_8V zMXY+cR<L-HSRh;=1KT&UP; zWeHm)Em{6)CfZa<{3@N&^GuX3Vl*^%;n4&xq}1+lMMG_>Wg6Lx*A<*Mn!bu6x;Bc) zT+Z;Je5fK7U;WuNMPSR|=b1?Eres5TGG6rmcl`1y(hdLyUL&@j4ifs|@WgWn-2Xl_ z=or{GYN57afX;mfnb0F~aCfH@LFhSz3rT~w02f*jNlt^QA2yB&zJ-MwK9!Tto`&B8 z8o8|m~cR0{i%vXhhB&qVSb zPHO2$-pk2O)RuA|C+{DGQX-B3CFoxi^q>hn8*R6|ub>A^=wD!$EgvZ8<0f?Sxqu#1(7g(paoKc0KV+!? zatxH^xRqKTBf89T*YpMSBL&@MLXXV@^n^7LvgsH8o$TBF&!BsjkFE4`#rTS^BJNLF z@s_&lVbwBTq|W?g9SBg|*Kb07=p=odNpPCxzotJ3F7Go0pcqm9rj(uE76a&J6Kb~P zI%;)(-_AKiTd1HJ4-#!Z+a$CvL&IQOhRgJ8&}GXAnZ9~5(r0km{|r>hawexS!Avf; z^8g(wDKf|>CD_VlbDPu@T%SoD9?M^TjuG0svZWcDp-7eq{9WP%z%S|)QkM$9!ubpb?`_;y%S{yWhD%M@ECKry23aSRPUaifv-zetRy+6+e3Mx*)@ zmypRt*ZiBtf%IJ4l}uW0l2#FymL#C2+3sdkVKj=l>@&Ia)%@@G18Jh|5R+z^r1uh+ z?=b}|Nw$A5>iiz4^I-%m(`~kQs0Z_UpfWKxEy=bqjG7UR;<{%8&ewzbY5s3W&MCHB zCY@xGPC%AP-6m;_=5M5dnqg~V(h(+U5wiZ9pl6nuwyliPd!UYv1ZtLT52L<+ObMDx zjhheDY}-LbeQKhpZvUEvK+Umz#Hgb^P>*5^EOTu?G3sD6id!6iSamK|^Zzgdr1NZ( z0g4fio1_9+|2mpy=h-qDwJRFMTuMzYr)&PM;UJxF^D*g7CaE1+CcVTY?XUS?NC9c8 z?G`58Y?6jd(g#e^IL#kVW0Yolfl2))>Fp-zdnV~%&A*)lzrgk-lNOq!ub8BLkEkAx z)clL)fb@LZNPzHbl_u%8Ch2sObON*v?!vOrwwOsZlQaQY?$%0^be!h@$p+GN+d3va z@TeNY9Fz1ilXSl3zl-d^BHL|Dy2&JMG)W&cNylpbr^(1>*j{4Ng(m4vll0Ff>A9Le zZwyE?ZQV?&nWTR(Nl!bfBrrzvSHy!f%Qgz27;)ec)z*K3)c=$nsKvIqjM@~9;vVOj zT+Y$_6Uodiu~jnZLX$KNS^rg}&`XtlnXc@MzleN;T|pLLVUe zSEitwP3QvhV5<~#lL>9dPM^h4(Df$t0_t(Og8EJ9yxD-d6?C-;eU^Grp`hg^^d0ib zl?qy7LKh7Ov`Rq>P3UH_1s(-ms-PJLA}~uWLo-&SA-zVXYbm`}rq@%tPNpxXbiGX9 zKxr?h{dZH^$7y(Lll#L2^-Joe5lF9-X?|;eK&JDS0v_bF|9kTJt+s2>fk;r@pR#-Z zq$Von(e)(xX9w*7C`Kfh{aZ2=&_jdPpc+iyBT|R^Yg2IyW|keT`KL_*)yISW%w>1sGw>|D@H6~77^=|PJQmX>8~%0Erk0Bm zk@g^Z`nk2)jpProV#I|%tsCnAx(^c^tmxC)@*c*Ko|I)R_W9UA!!u-fOEMxldS(r5 z-7^Y50f#3=v$Q-mQq$;3akL&qoJ&vq)7F-BMC0_#&TU-{m8WOmP;2YGn6C7sd|_#! zpIfG9R_rEAd^U>VX}J-R3q2{zTfab?=oxsfwN=2l(KB$AwPhc~L(iPWts~CCH{b9K z9A<5u*hkaoiT8Zs#h77&f^0|iB65*L(qg3lV7;Ads z|7~sUgwD|u-&b#GLm%mxW3>1%()7$(+d^*>re}U;tJQ`j1fKXEeOm8Fpia-g$<~%_ z{jf!hCv`^ab@1f$#D6PVAB6RyC;l;O%RP`8J+mjZ?8?B_DxNvFwR}Df`}cU}>}Yv& zJYJfCXV{FEm%)mj;WJy##Q4)QVtC7Hw2YoPSuMYXD$q0g{FbMOVEYMAO0b2#FiFqs zKSKO$DihOJm7z>OWSEdLvC-wI{)HoMTyzE%K0Q*#iXQfll%ni_C@fk$aAvpyE3$FO zswyZuD1wDe7-dP0r3L77ra#7yN` zfSserOr5TWCv7q(`_lYPK|!Xa*3F2towN`eE||!e)o@VZ#-QV~!*dsnN##j92WqU% zCFNyq42O9S6Jm@d;dv1d*$WAr9|od3CNMS3jSIuTCCa9Pxim8ilq~=Nhv90jb7mo~ zLTTwHy`)~GW-k+Ds5#1+Or_*3=d6gFahIu|lqAMyNi3_=(r{Cw?43Pgln`;C3=IX! z(zxZCHYcwitP5@}Y5A1E*WG54wSmFLiwux}l#9rk(5N^FLUzg-u_Z2< z#APiF;Hc%~ENUd1LqrZVLu(rL;c?MWT#SnQ4@$39HNuPQxO7 zQU<%6+Xp^wwwP#og(-Py*oi}BMEcZ~s)+n4>xphNZYT6Cuj;9Z3^$coE!%YcqBE%1 zwPq1J+xW0ISrbtUDzr9Q2sk077Q<0WR@6>!Z$4_^e5NMQnE9hxkAANsu_ec|Ef0kC z1F9vvuQ&u{CEFH_pn5`z1$0|ZwT1{!31~!2q%}pzNGH}u6OlV?o~bQ~_YNS%*`SgL zdoEN-l($Jm<5#7e)Ph$Dd25|CQaP+J$8R_k2= zZurDlH%*Z#F~%}v*(}kVoxG&Jk5$xmz?zQGOORZ>56J{;k;T4e`Td^#nzfJBw$COu zSSQ2F_OWgkF`b~Q=mOOk>m&=(D-w~OR9&z*9~oyGGKq(ga%5AgnoFt2#h5+?iODspGwF7PuF?uThZPL5xufE5&kLmTh=+8I!N7y=T`Yh{|p+^WJ>aFRP zzv-7EZLw7|8sQQUI`Ol5=pW8|L{s8N;tcCK7SVjdI(C?KJS2PqM%k*RVSxSznJ2CG z)q9fa-P0O^o#p>9tK2h^Jl@7&Qem^x?+&b;mXbIlF(s+4wj#;j99r%3&Pbe{ z6s!$-rup5KYuz=TV3N0?-rt;5=?i$0YQ2?p4OJcq(vlj20WMinS(&tc?rdZB%xQJC z-iGzlYP=0evrKA9XBIt)uA(*+>_O5ab28sxGsu5^&&+zmy{qrZzjEr-q**;dCpD32 zl1^%9l7v@zDjI5fa`?X&@T?C-slQS0t7@q8nA5^A>Kp1pweCQ`-E6r1wTY`S@LDib zRpqHhTEK)f`#nJjDNw5vs_|8<@l+~&y$1?Y6%DEK1S4Ra2;!x-F*7VM! zDFKh4+D}br@YnS$fo4aSkiv_GR5y4lqw|$@?qDz);BO9w+#%{2DyR;)>w8o{O_V6i zmTE7s$U)Rzf3s2PtFQNYk;jN&CeaX?A_b(9<*s4ngyE;cwKX7(fzu-eK@w|<$~s@A zyUxQzO_fxbk|&u$f53y`uJwAV440ot@ndu4CRvMvz8Tj<1)Jrbz3bfa4 zDqV!Tzqziq+EdwFX*P?x(qp8d9Ki;Eq!RKe#}buK`O6%Oh$|8E^}*ID@#1qWQS59z zEZ)`hYH>@e-Yq`UQpHWJ2gRqF-XXRD^-ln{wU*#-qR3Ykd@#&*pK6;Je^^}Br%S|U z>qBOX^{JUD;`oIJ*-_Z^M4Pyyk6yh)r0bnq!~nP^{bKQai#5dp#_RON(kI)Or-|X! zTifTSt=?l#TfJk2eg;_Guu}hCPfM~U4irzd>LGEq9{N(mbc?}oWY$UXqQ8Dnw6E6_ z^rq5jY2xbEYW)FG6VhM(G-*!h*B{Ln{Yyl@PJL&Z_^(!?r_E1GO1lKDwd|=OSEHZ3 z=WYFxx5X0|wu!4mw)pO%P;Pt6io8~Fg(xXa3)YDKZP&GncQ5KJ&dJHO=a$aT(R1_$ z@kAdTUnGVLYqN>3TlR^#M17`sUQ4{>yahFviGKRNEh6r)b!@!eF5)}45T)4C+Qxss z&_d$+)$p^x;7LD5INDfAPRm`49J*dW*EQRa+p@w-;N zD`S7|$2U|qGzz<(DCTC*PqU|OpK)GJT8(%VqZF4Y#_iJ&h=*EFta>L&H0XoHy~{#+ znm9bUq*UK8wppy_T0Rn&w|0x_HnE`N_AL#!-=3DH|1K>}d|0fPh|8v8s2lXecxtPy z!zPBbiT7Kw#qE}C@nOqBaXm&WEhzrc0<8e(Q=qn4^vi&;tL`L-M){pBSi;#Xo++qnqbkMLy{lqul{7e5x|A+8*fAfuq z?H231zxwyrVng?LB8Y+R7F#U(iJ>3r!}MI}Ol<0c^Uyl67Q#=nCoL}x{|BhGbVZKX z-`XZ#f|@?jnkp`V%*5*!{Ieh=?r81KwRJ*Q)c8T?!+o7YK=!v{yEXeh0;~qC>Q| zo`8i&)Z_CP=WHp>Z7mQTVDo{N$UPt{VXd~^pSL9EQW5A{p7YgwQM@nrlHzS^{y3+! zy*-!kwiC9O_smZdzlUAf-P+wQUenr^=WzLuZA*?g(bBcs0i$x0*w87aSTiWHwb(+o zy?VSz)3eXZ$vHZ|Y%T*6ZGaW)gY{Bz0%Me;SL^YY+l$-VOLN7aE-Kk;ZwRK{kG6M- zd#xSTbB6u3biNpd8tdBfE-!5tX&rUU=CDmX(%Sh|nz+=Wr;3*@!aq+7y;FS9(kVXL z2raqaS|W~Y)c1*dtcS&mn!XPeX0?f1E!Odt2b~4;TmJQ)_~F8Rfiw}>V`q*`U6|1b{EMm$*80wH-1Jvo+`ZLSK)V8^~xwpe;iWuuei*;iBe9V~h zL}uHQ*KHAJ9nKZ5?)}q5V&@F8YM)q~E&7!}A4I>@()np?mNy*Pvh=zg=cVP?8}^H| z6S<-c9Uc?PEfRONhN4N68}>-bG=Nk2NV?e^va$ zfE1SCnU^R!N5d?=lBNEEkOy2P!c zO2qA=TD)jvi~R|yIXNiw=Zo5E^f9;XIU=@-gE{>9_+#-$1DbJUrgg$F@otNCibYg_ z%jiV$+890YEAiKh^pJJ>Adv+2T^EGr!-~E$wq%?5u^e@_pPeY~ad%pihCn+);=e5= zq$QpD$J5gEG5XKq=jLv7u{&o=PHB!lTR-!Vz9MIZc>nC|JzMpoFO#e7EbrJUVmrl4 z8xD)#_YH|>HiYJH5zqE55w9h5pg;}`QHi+O=-eu9ncT$=dP2PDTGl2W#{_r-a@>m& zUyDO1@pM#)UBw(tPnmuXr#WW~m zLxZ@-K-pUpLf45OW3#t3Y|SavpAZ*^I>h@1M7Rc?M|`A_vb;IjI>90`t#d76Kl-zK zLzg&){_N!be1QHu5Y?Z@s6RhUk`kCWR6LFnNia*|p2^^fK&3<6+^Qe^7goFI&?~^k zbXv!rDIOS=eLD@go+JKNaS$@xC-yBviN_~f3oZXLg+XTVpT*tcm4q%)s&_(9_mqm_ z>^(=VbD&gANw&!ZXT$292I}}qif0HDmUaVUY=I+GDohM7;QbP zJ;5@G=N@;q>KzYqtu)&XlSrmfH^!4#z7_k#AnVi~tv-I?iI*|B9}7&8a0MNws(^$Z ziU=M2X(mBX>!^a%Hq>#RMf_ECs0jOK5wJQfN6=_&>?P`LM@50&X@wnGr?(LaI+aM> zEFd`zBBdZ<-a|vZ7qR;ZeT-~ZGPi4nh05tQ))~Vw$%r2WTA-gGhRnPia*G;`;}>@A z&zUd27U-{aDk|-y-a(zCTk>QhC zYQ4moIE2P9Q7nTjw{Fd`mm1(=Yr5z!6SyGlRikfFo2O*}jm6;n>^_gTv=;<>>cYzr?IFV%O5-+It;>p5qL zW{!=-t7mqEDZXNgU6Nvi*pMhPJGNTK4deFe`)q|<%vyv89X>LxO*FPOTr1wFFA-gy zgW}q0m}_AOtYV^GtdCu)pRaEfqpkBT`d!wEBSb^?zYsoPI)UR-hQ0_(Z)WbBqK zYt%ofd2mkIyZZQ7#ro_m>%~MwS>lT!q*64hyrg3bkP2-_XhlW{vq~xbVF;*`bn_rd ziVOnHxFMvStp4})v0|-xG9zh9s3f`INYPi+Tm_ISNj)fj_L3RF+$VvEyF8d%-<&}a z#t$AV4swp3X@!RR4`X&_?-vgZ)erM*{=*t*<->4Uui%8&vaHcMIZph0$iZKlXa~bH zITaR7Z@ZqGL^HW7+!db5m-Xp2MNe50p2sW+@5==byE?>{q!Kwr#f@v>!9E>Awjl{~ z^{9Ad4HWT)q0sq=)um-L9PJ%_(A%T5f)2O7haEb8VMlnvVdhWeS=(#AU0ql$K2B;= z43r&!TQcXQczU(9$|6n-%@((<38}x;_>1UjO(*__O2w^fN|4YK+x3_KMr-i9*U*BM zSHrhKxh-GcjzMJzhyEd(=zmb`8e&bt8e~nkUZcMZ<*U|*oY%NoZ^+U2=zGPb!$_NA z^?f^XE=FAW@F$;$Y6PhJu)-on;?r8alM_YcArB0rf5{y;b&IcR^^iUsq4H510CzQo z#O0n)4nm`M#wPwo+>QKY9$Et0yE{n*pM-+H|UE1Z$ld&J*^ zdcs!yW%S4@?$|&*vx-OKO8D<9K3a2H#ba@_w{E31kTn;}z|z|RynRLIQ}fPW^RYdx z<_0}pFNJ9C7}_a@lOK3Cu5-&4@k$JAgHt~Yqmy$V%38#=Yfp&(@NC_J*@?KEpf>9{ zgE1~}3{3NMDvUfGiwWU8k0ik2GaE_$-e$OviT#;}eo(TS7$>f*Bo%&ZEP0YU zn=s|B^mL2AjA8<@Pz|0JG+fCK}%5eEqdJ z+_zi*MvlSV`Vd_%*E?-u`v$$_BSfkoe6KGbYlwbk8=HvlloR5nAtmCzM$Fuod~MccIf5sA#EF0=wp_r>04F`tcpZDG;oK%`)kn=2O02^CD%GW9#iLXzn%>t;eS0> zT-6tKe-Vpj>8r7k$F1HR(z_sp%_Ic85`{X_Hu8k{oEL>tMqnA=Deko##5}AK-+Qrc zdoT{(Avfo~y`|!f7#Jqfo)yp@ak#Mqj5e*P28-V&p;%*T!?k4qeO!np&eJ;qe?L** z2cVdBSbXK7dOuqOcMfB>f$aHuC?<%0e~#X{Q+ykMgSxywnzdWsE`AJjiuV4g_?s%; z#R_>?n_l{{bs)}mu=`)@HH_3MUqeNmXMvXIUcby;*Wl5N6?1@duQv>PRhG+ePN)C1 zMYT9@!S=1)8}bDFzB+7cYlcy|-fiv_dkoL|N{>HO>+?plsEx2NRwU0m1)IX0s@mGx zs;a^&Ew~yd0tpS?U~LUHq!X||uPw%5-f}wDTkWZ>Ssg;1+_Z4BWg9}P1HLBBS6v6wP5WA4{x$7U}Zx!}NBO|?~_)!LH$;)IN;Qxj$-&P<%1kTN}a z_Vn4)=Os+54>fo_X*C`%?XV;34pgpA!^zOJ*)tQS)g(+Sn2|88x+&oHH{x%hJp=#Z zSfO(5G=IPs!V!rtFb&(*INykFW|Z8cyOavSEYN=O@H8oMK0s%uJrBQ{e1Ww%b*=JH zr5I+kGzcm4K`f06s}u9bNYCgY+Nd!$2apNrR3?G{L00qoa2#0M=rL&gHE)Gc7hGLi zjWKs-7>n(>Sw?O_Mvgsy2?KJomSyG6G&0gNmS!1FJHEMO7=CwEviy@G|ICnoX39Ua z@TahOyfJ1fKG)ba~U zjf|z~4lUSFA;&nVt;H!rO;9t`#3^hx@_k;9GZbj33~79VLWdHn)~rBau_siy+U|AI zp^)YdHsino!*1aC#a##e32?` z*Lt*igN}~0!sbOdl#sM8I_)8Clqg8j?X68UoHb-pOoLTOi4 zo|ZftByPVSM@MFv`Z`m<7(oNJ$g8A4ZFWKTsmj7~NJa;K7kt2v!Xf-&^T1_@ogO#%gzPwGoOEh!Jwv z;II}ZB%lS?8MR)29Zt{;oO60Ga2{wJownn+iZsR?+Ds3fMM_yNb!4Sy8ifvffy3@9 zMIV=DWftestPA;#Di3QlirU%024D*UQk`WN{LK{&)zzMW!G?sDO!NA@q^?pcGMCw% zG_CCUiwhWn>T+pVF^8Arr{_8~+z1GGad6F3rvY|hMhbc;Man8?RA9aYnvF(JAV}T@ zb_Z7md<~e$9)s+pA2TUbZ*0I^)IveST@8+;q*?{!<*lltiKgaJgLkdh*W@*-c(#xf zATopRAsGJka?Y@&NjB`#)IbzjY;GU}K2Bk;q)Gs?E}q{|Aey0MiCM=K zMPW0kX_o1W^V~2wo}g)TAWlfvU8hyE#nW(KgU)m-q3^2vNm`5Hr4#5X)?1ofi;A6@ z>7`JStc$PCTL+r}yu96DjtpJz zF%VP8Ia*Rsn3Zp2I7$m$P(C?pj3u6sJ>;oZ4R^R}gRp-!e2{PVqSo3f7)Q2os0fGJ zG(>BgeBQcdnC(i~FjKAAnu500Tjf~~{<&~VJ!a-wL(X{vGYg^$$2?U=$mcUCY%$<* z(6f?7&RIro;p~}4!Q#cUXQF9juXB?PKDQ>HX3og~U2F z?Fh;FQfx0Eqh>Eyrdat|QRh_3OD4ul&eA_GW{xSCu3}_hs01QsE5p1rnfCg714Dx8zpoQC5+r;BU*Ay^I`QRQsAQu*7 zPXi|kL8p7A+OcR^1+wy&8usM5m_`j=C~s8?4Hxu>9Smuwb16+5Q;VFg;>C+KmbCIsK zVDUg}0|P+|VoU=OCsr%y5)3pvxd9`b2MxtNo)Ch510i9BY8YD=IU9(OLNhEnFx?DT z8B%ySqL2?)UjlA~lh8Nz1HKA5Gr}gz=?SI7F-Yx(TX$EHIZ}0C(li7y9U{6EF?6aF zw1e-pXzqH%bf$H5E_JAAkFuN|1h;W4FP4vpX;t)`WmQVnWtcI0YOyLnA4nmU z?T57vc*gH@C5-uT@z-NwCft(C;}G> z*Qo?n7__1>9O?N>l%*xzT7>BoiOn)#Yuy2|1R3&Tl`Zd9Msj%NXJEks;lUmAf&=X=wA;yLXF2k-T(nX`2%!YI)_}U#U=5MN zrWckM>$6NXqs*De5l5i8TG|xyG5$~hAswPz*li9CO)Hchs9}|?t>?fX;HhTo$Zj*@ znoS85LIQ=&`3+=$v8W3b1junOr=_ri17;daUby8Jwbgu^mB-INF3c;&p9Qut^gzADUxMOKQYgT5n=?!i#3QlVuAi9OYna%PWn>l?nzXnc z=c{YL(iKjQlvqs@7&$5vX}!5B54S#gsT+W4B?YSWu4|};9dCgCVC~JNWt3;?jTCfv zMG^6v7&erl02^-<;NUXQ!%LIC7C7xjMlROv)MsfQNdO3^DEw$p5ldL$;qabAdtAUI zZDkXloDA26dx2Q8^D>0wP6n12YfxqmpZSU+S5>$a%h1B~C0VGM6=kiV=A{8|MgxOL zlAHu9O>W2P*IAgAVP9-VKuC8<&4p)y(+GO~2#7+kbm-6W0!JpB37!}T zP(n~mTuyWcG#OJ=QIJ4>4lVVduZY30?4vjfdq%WMp#kQ?a(Rf!j}W%1mafywosL|4 zAuonybbx$K?C74dhxQVJDH`qesZbqb#}3zny^K?@a=0P1euX7oj;I8G$iTuLlP$wkXB*U1 z8KkI!6~k#bvI=uq!8z|j={)J({RRbMo^=g~VWHfpJ?O^X6i2#b?TX2$sOHP)2R4Zc*aAfh$OB4=P2^9nMvMh1au0(&qgPB`}3$&)nK*(2*Kmh!Em`p5n3NkbrXs+M09`|{b9PM5T`#D0c48Vf0 zHV}iwc5Fsa-9$UV0(cS1@j7GvR>9gL5;3RkbzH3V2wQ zuECDC@_@V?SzPO>t1?tqcFC@AeM(lsEL+Q)Oa{P?6zRPf~Pl6GMaWg;Mj9t3!fAn!A6DYvKcfG z)tH86i0L6eR?mTW6E4M@gGB1^D>e%vD}9Z^yYN@qe$ z+~of;bFj=@OKuWLw$+IAAl1l56}Ex1*7IT}+^p%c- z*ML|&BS^31&GIVEREU$+4D&XK-CdX%UJ|EJpr@Acygy3n5EZV&gF$D>-|~o>`verB z@!B0iVuUo$xzkmJkxe#4n;ugdxhYzp5(b2pKOq@5tj02rMIx;($wacEIASR+G--bHh=buOaxro^cgIQg{G5#|xNR zz)FDC9K4hqBHoZ^LE*4P?b(s+q=PHYx0gikF(a&Ir2qk2uOhDu;EO4Y}+R0Ek5Ji%QiP{r0 zioLvl?5V=QDmnF7K=CFpebogkBAR~KJ)`dj$?CwKw-ew&X>X4mDOJhm21aJOE1foE zph~Q%kRcu8!3x0)Vz%&pe8BXckiH*qC>rdmL+z=vQkpxXasnWBG8fzUtISHYdH4I8 zNXdL}*otjZ2@0F3yQ38V@S(#*wNFjP?^hdZxy4%R&5~2i+)^vZ z$iiN&JXYe}d0uB|O&qcET!BrYYS1f+t4dsUbMqKm#*uA<^q@N2QRjdS^~}Tc%upGQ ziJ_+)FGxiqJ1>K6D8%~e0QOB#O|e`*V#f;}mTTvoQ9|{Y=Q8pFXPToNal%4mn9*lZ zu^o{(@68g=$d;9gw`43zL6UNTyoM;o9+#YobcCig*5ZU1OPESb%<6DUL%zBynq>G} z2h-1y8=ApxJ9@UL<7AqS6g%?zA2B|!PPwP~E3zDjV_D4DAHcdQI#`EvX@5a(Pm>7X z=xE~9`&E&(25+eFcapJf7Wq!H3|O#r$F-!R$E1spCao~#qAEYv$i+4g!2@)Vf%A$Q z!w08D`!ZZCBR9uzW#mwYqSYH6MclfYJu|k6WaG)PM0?M$KW5qR$YJrqOUUFIm9gdg|&L-Pmo%+Cd&Ps=XBe$X_p0 zXM)s4#Ka|bxi3n#iieUVnODT;n>+|~z&ICuA@gKT4l3+Kpokj>g$|yOXcApOh*lHfQ9x9tVLpB`ll z7{&OVfnQt@^5QG{a?S#5F`$#!<6kQm`4o5uvAx0E)T&RbsHWW-w3s3yq*aJ#8h6@X zMV)dohq=>kuZFo1#P`CZML@@lG$=0KYrr`1jQ}1=$lYHNkd_ga36sfi78fESK-dHA z;JXIUX*(pQVka62UdkCVyulD-%RudaM0N~iOJyu)9v(z*s{C)8++^`dMaWiB17s}a zT$-oRX$149gERBc3}%Wqrj=7iP#y9_Tsy)=m-!7@^iewLLh=vNhi4*u@gOLJucC_! za&67Qraa@8$G|k3)%kP)nJ2d1=NZ9NJg;7t~Pf9W`j0U$Zaz}7~Xucx^(&rP`%vcXVs8C z*-?N4Z?i5`Naifb?LkE>JAKxk;6|O zkjNzA_?O*S(5S&0o_qmaJ1|cd)IMpC^FT6Q=8=byxx|r<>4}DrwSoMc5lLW=mR?$c zwgx=W?^CFDW0#e#2WTlNJzSlW!ndqC>>O14&AkMvuRYcx6#<{Simn7`hy>^o1a~TG zDNU!#bYN@lukvm!iJG)Q`Etn@*UPF{mIfA6Sk`FOD2lMsABissgBKKdJR}_MDb4?7 z>AA)7E|z2jRe?%j1ueJTmKI=Igs*Ew&6wWHFid@~jFRX;7jlM=C#Sxwz>c#b?pQ4@ zN(PXkWzG^@lJbU+M$`qC6w*r>=PN~lKP6i(#}gMVvGYS+kEkD?zoVO`{_%N}jJ7y# zHIFG|+=^>nl0DkUhfeY}2AGl5vqC$0#$S1wS)Vl*w^?W}EQp;~zj;>;fulz*kRXsC zHeaM_^TM32iH&W|Koo5%BxH>!OGT$_Oyyn@ZQnzJVe`6-PG95_kvgMp zzeHL6Fw~w~hD$>Re*hoBsSVX07v%&tB&E7z57x_2+fRa6hJ=gga9 zTq17~79;s)TuotO3M9fPHgw^wcsi@66(Cz9Tn8e-X&kD_s(XeGbOpf-O3-h*Iis#A zp&8f=rHi5z89KFqkFFE4GLvsvVum@ObEGdfuqmCB1;aslUrg&^N~oJui*Yhvu$*pE zMGkqP-Gx5hh>H@pyv-6-(vfb*B@l}4`D{Y2p~&~;&VqfV!$ozzja$fc?NGK)%^W+< zhnC1&$8r*_YjD@G8=+Cdc4?+v9)*}8EY57`4HtO-OLXgTzJL>}tjtI+a3{sw00tvI z4CDK`_)PnUeN?rJ61(#i;@n#l}~7{?f)5X&@FO z=&#F#Rgn~0wW01ING(z0NyXWHGb8E9(=##xFb+ybxDM!^Z&kls79bi00Czv58B~Wp~D8B%F(Gk?Z5v@M__5O2wX9iJy=_zavB3nJqn9|?X0K_AG2YN zg^P)5t6!ejLqE`m${=kr8#1l@gJ>cFt51%CMG zErT~Y;I#{T-aKgZ&`hF9qvpT5%S;eDs>l8T*1&7k9=V!>zqqJCV`vJ=wL86VF3Jdp zZ{A9*TG^W$B|HzfCnw$S>9G{1uOqm5zf@g3qZ2aB$;hEAbeJwnQw%4M2P0dw^bMzE zBQxqNPEc$wukmH9s=lJ6_QUAQ1!j!kgw=2da=H4-JbY#~K4g$*Hmhit8E zB1_Yp2U4}zqvJ~`;hB%vj;&bu3rD6ULESQ@-1h42kj*tyDc@`(ZE{ttC;0;g9|rH$V#gn%poS;8g_VP9evb_BDKgkVVC z-a>$Mzv*eXZavh?64zh*jr&P#NxYmk?G z+^8WrBR+(d6VGlx8jYzZD9rYfOdq7ifHQLSpV^VUh@hRG;?i zAe!4Sftta;!4tv33{B}gow=A?KJYxWU)Z}3Rk}Bf@Kk7OuRa{op+|Z&PORnfPv}b6 z&u|kc5x;IUFS8{OMi9SK7F!o$TT_Y{0jp+=#*0a1(7g>FJmUk%XnH+3EW<{dFQ9kJuuhTg8YEzFDDPYJZa#izM!fd26ayv zczNo;OMRLy+dm>!2UX2|elDo4miK04Cebc4QE6?RGGC$#lgoAA!(GbDIJ!UJ7rk!i zmRVF!yYB7CY-6>|YGiY?DiP@SKit<;wP(qw5|)_NnKEN^-9)~Vi##E3v*w?XwJec$ zBjov{JcYnunKD z#Z&S$8d}MDUVR*>BB#%sGfyUBMS;!tqok^90dI4+zTc+N-YW|+ck0D zIoD9(frCCIa(;A4R9Z-Tg*&aHP30C$xpTq3BkS`qx1lBP=3Z9)V#qzR%cdjtN zgk3F_BZlE6xY&&DEllZ)GD0B|iw@bh#@;G#xX(s`Wv2NPyFSUo}VQO}xby3ByBVQdd`pRgj* zH_-i~J`7i)mMG+=|879$0OTTovtK{fF)YuKIhdyQ+>>jauek|;Q_Y|8Rc(EykY>rv zL>nv4&KYt5XTB_cB^iZvdw$L+v)rZaD`Y5^Q-N;!B2Vj;r9SdpZT;~>z~pf6fcsK$bPN?L~fwbamI1c{Z#LU&?7W%{7wTO@M^DiC-7z1 zT-qVyr$2}>;IW|J50e;Z(B^ihV9ssRBRG{c)do3$CizOg1F;I5jH)oYlM7QlxvfIq z zo1e(>T#cyQ3+LiaeO!KycBS^kZQ9x0NvuoSS4&T7ZIO3yePGOymnRpvVe*n{yIX|I z>-H;jqK4kAnR>waPTeYz4H|WdJ}T2=0Qv@`Y>8j4yW;Kulx*h6VRW~H;+2nO__)I(#hx5Ij2u@NB`$Y|D2g|39icz`M<42#{Q-Lv5>p+vh=)S zUv;V;q?Xr1vBxMrt)A1~k$W)#UUmzUx&B7M7$8@~pfo&h3@~$BwqA$*ltCIsR%!#XEI$RAi;cA!!S7rViy6v2Kv!t=*3B1h9 zoS?F$@v9YB)BzyUzcv6=n4a)kj9Y9r_LZbXXlP)OYUFN?#{4i!|3Z4)2GT2I-Q-! zQ&c=b!!DbfLM;skAasz_)62T8raTd7(4;e;D!NUMKeZtMuP#Kn=?l6C9hby7!6wyq2HLaeqQeaDWscwfyDxBi!Fol*6(M z3F_}9>EDR#%60v`pr~EF)-Rv*au&o>mhkX3Fb*@S+=TS2^2Ht~w=;C^0?fcTxVNs8tr6#%srM5edmh6*D5VidGSIP&sO z0X<34yvzwwv=v0ABtJUq&4h06(KSa**l!>C_xNLmW&6EWulq8GIME>je|q#7saDgQ z9X@zX)}UtNwW)m@3`ugC33Tnz*_D~Nx=4}B2AIGKjJ;!|TFsTkCJd~}8Usw&%Z1U9 zB$t`MRyb~mt?=gT@T4J*A8JErW!n}Lx34Xfg|+3AK-`CNRMwsKWUuSz^;dzHE=rJQ zPxof8@`Z-raW2^8(TfIUf_FhF|D52cL>ZMsW;AUU{ILpUo~;kr^iCmTGTakAx;nsD zs|H-%fGWL8q*tnG2)!TMwL?r#gqS$_wvHQfc_LZjG~ zhGk12G88p{%sQX<4q57lw&gA7CTETBcEkQ#W^BI zc9t^BrXY0gL^&*LAozWtMrMB)*3i$Z*AMqhgwS#$uX_ToT7s;tylM-0)t2#E7qsJfUS)o@j1ZounbMN)!Wfg&ld_Yd|jCOAN$f*A)Dx-UpsK#&TkUls&) z$Z%M;!1`qlcw85F)e|JI{6vMg$g$pXYyhVZ1dbJgtnD~rPC{Jd$fymBC0X>w8lHUpEk1<=Zc6yr##!ipM2TMjYH(08|?}` zKOhDV?2#wZr=<;Bj;pc?G)AUjtiqMhKA9#-$QVsEP%axresvOp5Rq;RksH`VaTS!Tpr>D5SRi!)4a=70kjGLj0vD<0K}X zVp-aPDb|%>!;HQyc9ynaip4njRia&%*_lEVWg9)qSf6$Bw03%tzh^m3z^vrGVRq6O zK>1Nk{Tq?pa_72XxEO;{=N*mxyjqYa^|j)^6ww$&Gz1X!hGo?v0S$=&A&iVYMD{`L ztTupw{rn>KwFT_w56-^MvLBvZo)z)0OZY3L{Y&*luy(t)z6E~k&YlWcXX*p{Uz}Me-t?_ zuxj}?fS-;if;4B(?6Pd~5$m$UUmAVli~F2-cxccQL(8{J2Ka`E8X#FNDEyCTg1u$G zNj4BqylfqrAKuSKZCuG2d1P=nr^WxRd zBB5uN`!`|VK=%8ET1^DWP}w*1^D44u$BO(D_O)a`Hn49dNQTP3wt#&y3ZDq}4Jfq> z+zR-)9FB}91)22(X91;Ey8u6(!$ng(M3L9z*b{y`aI)h~@CyMopWv4RszQ*u4$J0K z)x2JWt3mzw1eXL|`pMn9g zRuY+(Bp+WiuprGc1h+x51#a1Gq*qJYn&3kL)kLrk&#`?1<<+C3ShtDbRXcl>C*;*8 zV25Q*1fL#2$g5pP2aW{44V34|t0lVxLV`B{B2Ktf+D z{F{2PfgT(iJXlZg2%xkD>Ay$dRcS$juZK(3h=zV%O$m;!Zvc1^O66zW>?_k%zX(39 zC-_p%rw#qQq9a9})O+=*pOVg#dZd;f85vBSPjL5KOx5=D>aa*i@c03QygD}$5~N?% z{@UPWwqijlNfoZ6!e0$dxR79;eO+L`EVRu+f@}}fqb_M3$(QT|Y$8v{tJn?;39>yr zA+H$F!jT|b$rJL50WE|C=@k%$sP+IIL(?_N2>&g3riqPqP-u%LHqV@hB3K(xjdbO< zqF7~GmnhC!uQ+R{;;g08v5l1(&3q=;ISKzRm{}nhEw4{4uczgdTdYIUY4(O3CJLD) z5!@{}tV(&uMik{$B8qNZ7*G|0Z0~|`h~k){q@xgIn-9zC&P1Fu0SSsATd|;raR7;o zl14Za4)}0zcpdW{1&A!$bZM;xX|3deW?HKut(81#X00}8s4YwQy+P|na+;A#&1Gt{ zdY)BJU{xXbmYhVwQDS9#Bu)dX5!vc>{k(Wr5P2cNZLlvmc=j9^#ICX9Nr4?FhH4Tk zuO#A%dgA^Vw5Pt_#Ebt3r_CWFz>2b=JjKg2mO5 z6~V1Qxn~P^V*YqPN{6%tIixkoA+1p!X_TQZ(TmNg7n`k6*%r4Vb*DV>VzO+%3HIF* zO(&Q${hoC+{oZK$J?pF%V$eP7tR4<&HStJe`a7Y+$D=Nc*I}_mO{I*HNJ~a#FI;=E zC6em`x+{lB*Sjq`hJCIFP?dtH{VV$C-oc;-`er0rtKi>B|Xw)o6{jwPfRTw5{fF zrXtq`g9iYk;eikp>z*;pvW)Du9nrQLYvq)|XNF0EBZ2AhKKNg3iq1f`f9jZlt?H|@D zb?<t+2gU(;P4x$iplo_sNCn(FSU>=+@s!ti!r;L(`3mI|4MOHNgsid0H*q^d*vg|#f zjdLqO=PDHO-4H|-f|M$XSdm)O$}CugQ6QU7s}a5-3M6<{h@`NNreKKyg1-n<6@t5m z`Pjl+0lq(?sOa0mXb=|Fk4IFQvu=f+sypEiN8JfV-J6+fsIbft2>u$nD>K6X6YU|$ zfL1cXhXjL~X(ML_B7in>ULYuL97G$Ne-3jwKI!bm{4pM)JwNRy(`~^=h$&%yrJI52i}zv07lPMQd254 z<+cz6l@9`ZC87xa8K_;d5et4)V_Vc+ACNR_J(Y_cqMp6*GTaGJ=jO*6Wj=t@65dV z!)pVfZPuGm;XeYeg*9V5(Rh@@vx)sRdwJY>C}nQ70AO20?XtJ0Iud2U4D9wjJ;~iD zB|r%NCs3oa-Z7)PM~^zZpO|mr&TI6}sCNdd>bziF5?A!y!7xrOAzTdxSC=O~ zsM2#IAX#e?jsmM~kll?^$q0Wt=v5`ic2F|!hWPL9P=fmD=t+27PJhBD2Hd26!lwq@ zfQircYTG(2|c`dwf+JCZyAA^6+ilUjlqI=)a5+%KSN33dil{j~sTxKyNH zX@a0W4clJ~KB(Ur&A3l!&RT-U1XP9KM4*PtyLyCUXRJ>YAp5V1LxTDb>A+ zx<_$MskmhQcNtRS)geVDO-zmMMAVMRR&C5q06nvlr<&_ZXRUFk#go1mYdbBiqMg4nip_E`kT{?{CS2GJ*#I z<>zg@nt4DV9Grc_$&hq9N_oX)SU)eO1vQ#F0bZV`y{lyW0X&+2@TFkP#3 z^zAN)4#hDxu7v@L`ygN%r#QkiRAmrmTq&Kko`G@;%#sJo-)|G5FF?j4D2Xqt&p;}p zYX*NtqhwBky|}fLyhU}!>-w+2B{>%4*ozZp4P?f^c5%sz$SN7LhiWIg!UsZAFC=(( zKvf8SF`yO_WJAkNYC-5)w_0)9bd(o4HCs+aEj$Oh#A@?dEq0XoyCe4;A97G~WO^|e zbp_)p1j$=YJUp<@a(77b-zV^|5Ih{H;n}*=2vZGL*E)v@q#Id-F}Dc)=C>$`&k)1R zBE-}Dr6$Jt=G9dO&wj_4As_aUpBj4hKiW=u8pnP~{9ty<0Afwl!xzRnL0Q8DWo;6a zHAhg^5~rJDT-hQRid5qv4O8vvp%fjT$v>0V{t7YJ?kB zGTnAZ4C>`eg^1Q(1qKpC4qsNEZY`r*1^#Rc8U6|0id#F$mmVZR^O78evP@_%YD()6 zBvVQ21S6xw?`+G#to$VW8z&O;txoKn41f{Y=!=r!u!0b{ID|qY!5a_s9OVoG!JB~c z(>PvTbx=SRKKgppDF?)~{QSi`#>?bzcHVttF!` zf%MCQ7IoJEq=E%`e=}tNDdpqa^P@C#dyDZV6_PRueWHtXNWV5ApSbmq0 zr#LH6acmA+t+;hYq~fga84Q$Ja88Z4<^;FbGi(_@1zDIulZ#O+nphblswUCIiDij3S3^0hbhT=`8foU3 znc;p041OFK)e$5kAAnY=;;GiU1uBAFhkKNtn(^wrzzREpuN{{sV?w+A;Fh{@`St^JuMJ=ApT4H9}M<45TwzPV+qn|S)dWTJlMaGAT3rU8v1!f zqlF#8-{&0F(9f$q;1wYxNY{9dyrR)UNRSqLLS9AtlWs}-?+y0X5~R^OLayZn&6f2j zL2`F=(X$>R|BZov9YL~`dJh)xuPfmH;9AT7BJe#4&7i&Rb#QB2h`4&&2uemb{sR71 z+`&h90Y@I`OT`ghaJ0us-y`@uP)bI4Ya}DM$1#x`;WLj7+z9>)P|A()*G~v!1pf;t zB_sS&BqR9HiBTxwW5)+>1lIti+z9Xf`ankTSfG@Q@VrPy@Xj0=;kzdUZUmnMO1Tkc zUl;eXgC8|Hnn3tnCk1W{V0KKgieO!i>|VgjkBX|? z3-}Ycj!SrB=(rVvKMtsR_T@u!efgb$AIx>!?*V=(hg)!**Wd<}YV2mfFQAldfL659 zJ-MFy8g$R;1>N+kIo!*DugG;k!qS|P~3DX!ZGeX%LmC)rOAnHXKS6!1rKxUT?y zC5Jm2LBt4BW7(k}$pszZ?}VVM5PUwM>It%6D>ruN*|{#rZdnwsE6FS)hfj@kivY7vD?fiX{>wgb*rBoB0>E2pB5oIqVMFaFExCc~ z9>5&TB%CH-EISFMsz&(VffF)<9MhDH@K;0pHxcAurDTLTSP3_R9ITX#Fb69kBgk<> z$q2{TY$EuMT-Nith<^rrNG>5>1(=CZ*|GJ%8Ct)YiI9^Vk-&t=35}p$ zk9^3uP-cV~7(&K$c}^}JGBq*>YSlL*A9Bc2TpCNwj3rKdl#K95q*7@yf~NuHT8tB; zCvqwGdB9A)%FoW0#0q9#&XAOhFlR_YmPU2+6UfKRyjqjlmzh^_ggHYJZaX8=O#PTa!96Rrejt4BFwQ?h3VIY1Ucn$b(y-E zwzV!{&ZZn8b2sz05;Au)Z!3$bwxE07kW0FR zKNON~g<$NlRf6nlsu|(f?WzRX{gjMw?21)_?0!l{cowE6vKv8gaX?iGGUT-;VJ;A4 zXNn+0Udaf@@UIeNEGrq|7}r&T@5sd~6aDsPa85Vx$Iv=5yJgc=Nh3dqyT6T8&#q0V zh9kXm=$82sXQ>yU)dfXxQ9#vFd3Hdd^1&7V-9O=7A@8FCqYA+@1FDI#e1g3^svX4D5NSc@aFpvq)T4zel)DSmjib~HIrzTzyasp(k1!{ap*LT+p> z7+=%Q-<)>zqH!+m=gkFUkTq~6d~^j$83s15NxcK&yRf zycn4prCK#xRzhX9`Y{;&45bXQ1f#|EDeq)(*Z>A(C<#0jpVZ3G7uW&BZ#IGpf>ltq z?F3~5RZ!MiiW@Mcwya!i*qX0_qT@p6tq_d5H>bKc+m0q}U))jescLm~NND0N~& zc_(A`syHi9aaN+@tVqRKt0|MFN^}sU8lt?_LQqzTS8eqhg5NJB*bbCyFRR0V4t`bG z=Dd*LfH}K0yAzyyg3t=VXm+yyDiLpeS}=(^4m>GCNUZLHVuM{5G*7w?HOYk^!+;4; zylzc<8MB$gCi^=@?=F&NPteGGbo1JNUd5E$K#+f}<`QLzvZe%GQh-HExXM{z1%m@>{@m#iPBA^OXEnshy}O zot>5x^;F8_I!$^iL8>8ZAghI-tP+BMe;IIkK)unW;ly|)~<>TN<8B#0NyFPVO!ibu1sVr{$G4`mqEH3cm+d;y8{^8lA zBr}udHqc7f+pw;mS04!*VGRVy%oAFR$&OS7nS&(v2Hr!N9Nz?Lsv(`<%Dy!rNjf7* zJ_OR?*=0$uqh4_nFj>{`dH+`ITg$?Sk8`;G6cm^AT*(zIkLH3wWYnB8YEBs?QCycW zs-}#pDWjzK(X<F zJ=wD074?%790YG1KxkF-)vXX(PGNEY{-)KINUjU$-a16OD~3WR8P`gsId@01WMhX{ z_W~>S6TEK#A=`mY7yAqLg;*!XVhABRP;yM#T1g0{j6zUHMnehelu_trkYCyhPWSoeKNZIlZf}vhq!Mfqgye77?E{9W}7toe8Q?WWmQYAv{!$Mj_jFNiiYto zf_rStaQ23*c;9N^Chd6 zpsZE}v#PZ)@LxkCR0zHosGVekdDOUN)*Q>cD79|I!kcaxmR)4oFb?zHv@~x+jFP5+ z4Xr1;B5hKd)UlSPz-4DaoAXi1B!nQP7c4MXE0QFsW-b^mi;M}zT1huztw@sOk$cgS z&xb~C=A-vjmXBqHiceX1A?#hHjhTHiaQedz9x z+=k~ip+mBrxcB9d+^6HtnnSYr=yHb~%<0+-tfq#w6vY>_|Nt{vEPlw88*D#%_lPxqP%)s;wO+fHcKu;U=+j3BN;a28NIWmgsb zjw%(M4l*@{FqO~$Rw{#uD(CN6-ShWsR?gqE)=;OpIh_bsqlE;iri_nPJ3(2c1Z8z8 z7+5W%ytzh}*~GIm(m6A?aalLC9_qmKsnRUZQQazyQc#idCGIsyHiD zaaQJ=;dwU72>Iqjzik$WWWz+a2&HBB0kHcJ%2C2j^DD0@reJ^Thp6%slm#SI;Q$1L zd;})Rug53xlO)e&Ah`l%fuzfEi^dkYj!kn-$ecwnXgzcus4oh&Dg>7TH6rVI##YNH ztrbM75n*bO@97MZrDuCq`Fl45$GK?!o)t{FojJLLS4SHNQWaTVSPcYa1s5zrY^}U* zwtD`a)!XtJn)LJG8G5ZiL0b5^V0_c}0LE<8Wc#BqvfXhTyvgWQCKRelb{aEQF~wQU zilY5y*hsVGn^|M~X?Qng3=U~qf<#Y(!zih#If2(t2S+A7^$)o9w{nt0Ao?hja%wEe z38&%SY?K8Zz81HZl#?vNEgDoK!?KI1!A-$~ z7ZapX>Om?qY?>D;H3?F`B1e-uS>LQ97GJ|P&BEftQE zxtWvIkqedFwudFz3Co}Z!e&VW&)DySGxoB1Syqd0QpmO>Yw?w-f}Kjr+B3+i5TtnB zs3a%KFWS633M}&PwheWq%-0SkPw-|H_2QKO#DbOh#DbOh#Qv0jx)*2p%bLroFDSdt z7nEf#DBC#a!Z(f4%>uDY01lgCC9Fhr;uP8a>QsmOXcRj6}U3I(h z=8)F=4r#aE9Fp5O+!t_2?o4xwl%Q?pK`kzT&L&6=!{~IO}@FSjs zSqw8fVbm%hc^M>3bwy5QQ2aIQ7Zl~bFTBC~h}}%`)zEUEb?xUN<{Lp=h2RrF`6ri) zY)vZqJ&>s}gsFUfR%=zx-?O^s@7V;NzgMQy(>a|8KOZe5NHt}cw%Q5GDkUhZOTqYV z8RgZr9P{@q>-;@yrj@b7%08EbQ8KNdk0zh&wGdgg0I!Rv_XGTTMEx8f&xlJ!j@Z;+ zP&O9{%BCVgSqD%l@^-_7<^C#187o8M+lo+}wLo#!1jSh!c7=cFSoa9Yy4E4-+-R*l zfwJsQFzooS$0!xWE-bGo6#(Hii4d*EFm_->#I_eZJIJ3*v+6Q-W|4r~?9-?Ooj zzvpd=|5EO4Ik|-Ih&B+UDzaff4XzC?yO`juK*=i!o7PgB{1 zfkN3%dDAF}4D$D)!5$(XYAp5;{KqLjAI4I_LJR5PXM^)E9)_HH7)nug7QnYg6v6v| za@?6pes#9DTdEQqoo^)i^MO3lI*qC^*$zmD^r%CU9s7_&4#rMHp8ArT;w-bGF8w8V zi(VU%T`{)b`pqG&=N!`dZfAWZa@4pqC;r6986|RbGuYa;9~vPxS(41C$oU|ZpMFYq zeyh0Gjj|x&U5i_{m6N;|w-^~k4m=PIe>O^yM38M!u!+FtVobfN@OlI>`hyUwiO(qS zX^^NTgvUVhe79%l>@&qTQ2yQ=;25L%d$u_#_kVJ73I8>!OORbl_Jydy&w^hsBuJ%P zLZmjAK`W_BkP13Fs~P!|S>6irB!m3DGCovJ)~6?N>$y+D6?(yLG8yf9tQR4t z#0kC@DEU$7DL<-lw<%*w_)uUiW&9!RbJr52*c=0E$6c`FkKsgPVEB{BaPV4TIpAqt;U>wWkw~S|`(cpd)+DjNd^2T74V-_z(0LY(DIip4(lF4+5I;`S>**~qv|khQjZUt z)fIwMfGRn!ZJ8In;rLm^DqE-EptV)Ih|OfM;qGn$8|#pwI7n&IwoGZ#_OLVsZoPDZ zcl29O%Cv-FJeXUM?pcpS1+1?D%MV4?1Y@mc+s>>-lET5pelk*4licQ89(Dwq`DL`% zg{H&bH{UbqHFIWTJ z{GKLKQ-`#tf*sNt;E>Fb{1Tc&TDu(5TIP_}HixvouWQTxhGl6Sr;)Bnfxhk&5=9RwF_Fd->epI^1ICD{?X$_M4}8 zJ*5ZPb`{i-SW9lZH|~z*#Ws55^$@c@h^r9%C{QIwm#OIOAX8%qQ~CU=!K$3UXCpFy z&l-`xXPc5b-Iddc@P9@N2~thT@K!rPS)~MJbtxDcETg=-mSg^&Wu3og&9pLhSlNq{ zFiIvT!AGYu9-B@8xHzIN2l(-b`YJ%an<7dw8Bu>hG4Ig)0*_JyG8&Mw$!b7$CbPoq zY}sktTE!G+H7n|@n;BEE)jcn??r}&uC>kMW2yMIm*|6gyK}$Ck;^qWzCmDZDGp&PEIc2C!-An zsftX>slm;VCMbeb%FR2e%}!wp>LP+vuyB4#{$!T7f;`C}f6rP-K2+}St>-A%MO_Mw zVF$`(GOH(N1-WUpC9yA|yX=AWG+)h99FOJj=s`i84uOQ9_Wv?g1O96sy!vWbBQz5H z+5kdMCg)-^qWEJVtn+-VE1%5Z)gPh|f}HXBZ)=b_=i|j++!%ucte1v^tc?T@p6;FO zf6j$hCju*9`XKnF0ffBzawH^p{fsL2aW{4J~z*iS0~R4gaqdS<#puMf=Eb^t>6iH#fA_a z3Fg}*{msy{O={UDbh%2X?dKJ}E_<;A>2goVD|%fB3DV`BkXLtw$gCxJUTBjlLAHXg z%PTg7Jar++R`7(pn)@cYx=L_iu1#wDd38;2b(P=`a&1!E&#P&eLWpRB8;8zC_^6H65NRaL7jo}rWR$NOk-^#Vgbn9evFZM0X(Y8Sq#}>U3okxa}o7dR} zRl;4)@s{q3(h;`d;_5tdJuWuok!*T4uCgTDfVOumZ96NWZBoU(70vrGl&-REQYEyl zr#RbCinDE^IO~4Jc{jv=UxlaF57pD_*A;kLe#SHP^!jx}^z{05ufo&o*Zsfn^ji?4 z3{OdU%rlHmK{4#;2SHtp0Hei%vbjT0%o2<&LH%-$Z)aL7sJoCF7*>LMC-TDmwVql~ zPvF4~15HrO1U;e8*S`@DVa`Pmb%0GdBLfZR85EW zCOe3`s@Cv#L9wXCd}=}G%18B>`VI&)jQOd+`BtD*WN0uh1Z6ci2rfA>y2g^sh4Ine zWEo9$hNEK?y%>_xoRs9zlqP*LlG3F4*wV;C?Vyi)yGmuA9CROCrtmwuj2~NzT|sC! zJYM63hv~+8OhWSaD4*?^zsKnx=O+1kOhTNFcu^Y^UI`FmFP{5@+!{+_iZf3M7@T#qj! zVVKDh^yYO*qj;<})nl!Z9&63>SZkQaT9Z6>FimAfY}-XpR!2cu1qEfb6O>g=P*yKN zS*6ZEF1;}1)Ovy|1FAyshJdOkxFw+G6MQA0D#Oq70ym?S%$wR=e>PGMyL>@@=2DW% z$@opMIf^65U=S*bV=5F>>f-v;#r3I+>r)rkr!G$B6?0&FL-0Z6UVxvCDBCdf*l$20 z-6W(>;-Z=JX%k&3E=+BjpV~A(H6R%qm!Nr8pp;!*GI}batO30BXOPIwv?a+%!%x9( zD;Lbh^eneJ+pvx^q1;MrL(%$RL(%$R!@7c1*M@bR^X67l8`fFXQ*GoEWz>e#ZueY* zGC1*eNXp=(+95fraa!$=9L_kgc1X^yIRSJ?PNvzS4#}l7r`Hb2IW{NQ4#`rti{h+T z6la~HIO~&(ObE*n;tlKc2r(VQFb+5DQa9YN%iVB;{^UkNxM7ufEfy4%IWp_b(nHFt zLB(B-f_2l~Ao&8y0utV0$B-X+O3pDiV&;8W`3$@jx7d}6e9YL{X79ZbYp@T5wOEB9 z>+K}JO*LxVBI`A>|0rsz3WUD~oMf@<@9-6q5Y69XSl)&qBY)3^V*Xy4PIu>YB21km zXS27=4_kK?f=n*5gribxpp;Z3NIiwhswXI`6~&Qv-U13C$NW9ZI)ATB2JH}K2~zCe z)1=k#T#mQZi2i;lL{`&&*f==^rD$|Ez`lsO5g~WH#D(H}dsi+;&YN$A?pyI54inHn|&T6MP ztDNGjZr?``j6~bI69P6I9MT4aL)wsVNGrp#D?3Y24;`9A$R)cMl=p6s6rClw6Zbw> z&gLQ9`hGddW4Oh*Dsmnh@%LdTu_*d&a}|fWb=v2_^BaM8h2RrF6&@3yPTvEO>P?t> z=JS=+E`QGkRsNpUmvW!Z$tC=3w1FU1k&)PHpvxa@m~u{6FkahQdEIRF{5`9;tJUqCW9A zv^C8hk@b%4r{Q0R102$}1c|1DXq42{m(qAWBRDcW$ctO8fh5Ts>u~E@l=9|5GQazG z-1|^DoBxAb|5;A*IozVLMP51(@pDpmZcrh}Wl+I+Wwy!Qq1I4><-agZ`fZ#pS=W>C zA3>rj6Q;0yCt@Ic3JdZ4y{CZV(kp+jOzzg4T*A8qBP#@{ifj^D4FqKc7i=ZjT6x`U z_53}nx8*Z5>FGK1CEZliCMhqp&OGYZ;el2dsa?gQ7gF`aYb8K-)j!%4g&8FrrppwO>x#|inA_Lob{OEtiu#%{iQhTF2z}IDb6}ean@Igv#wH{^_1eQqZDWT zbeoCq5wceAw-N4;HpU&&M!82Kz8U8ZX$3!*^dj`rNcbjnq4QCW%z9+cf>-lGUrQ=3 z!mZdH6B4U<(H_OMu;cd7Uz4HmR@`EjD{}Gc(YM<|A8&dmz)cbL-vD>PLo%7#jswW9 zD<~Usg0fK~C>u0_vN0nl8#02j5hJMB93G-IRLb=}tCmAr#T?SA=8#r7hqUTBq*c%% z2UAfS#cHTHtDxeneu}f|Db8xAIIEoEtZr{WGcy?6D$j<4Loy(uqYKWKHyd_k>%luf zcOOdGIto~_hOJHY$hORr-wLBNZx4Ty2wv#uSZc^BGES24u|l&!SiE* zcZJ}mfGS)MQm0RXNcAR6J@Zo?8zcF9HdgZYY|BvY=W}uiKOAi!NL6G`WHk_!6xhc*vE9w(Jg1I!?eZ`)J-wg*iq-_Zj9SS0T#h%xP0bj6CIt90;1`=oy zWi;VdE6Sp;*ssLBx0SQ`H{80toa8p#qOnC@+6^=4<4{VfBghG7fthR*&cBBkELd3m zcU)jaw|Iz*e+d#*nJ|UrI}ronL8za<_Y2@SN6z0Xll%LeT*7~e>Jp?X@|6#2@b2Ki z3kgyw{S-8n*%kd&Y7(S=MIWptce2XcK$fhZzh~_uAF3we36k}z(DF4fikcKz!X9yP zh}WuBlEZCoo@Yt8*x1Gj@Q>I(9T^92f~-~Y{EEzX4`cd-e!`r{<(+I-FZp|R6_dYb zS2Ov0Wd`q?(}^&3l0HjSeifGQ6@pxg$^?K))y54Of?S*ml~qqrRx64l@4N*RLXP=+ zmUaGKnGD(?RuZJxzo$vNz$ZE0RwKUt@a}K~wRcPz&HaHUJ;6vmktRJ&n9`)PV@o66 z45!+xUK^m#M^WyaU6xHeV%+e4B0hr~!?Fcsbt6f#K;(+{D z2Y!o&B+E<8I_=|Lr-Wzzp5>jtXHBAvt#PqR@IaW5`}~i!L_(3m8pE^93PGN0&M~m! zKL@dYjN&Qdrg*Y=@LCONEeuw%?#4t`czY|#!r&EaJ&Ib-pwxyU9JNL+Ppd_eB$`&k zK!%09suJsah|XP-duTrx&9+*b=hDBBi%yE?u~jvYQOSc0MbT+_iQH+7PRlcY&+^XS zv$j!2KlGJ&Bl!6=I;~Wprm*f9odkKrejCwQ3mNZ<;wj_N^5{%!HKetY=vR3_n#k?awGk6juqCHw?%sv5wT1FD|jsO7OH z;oX6gngmY_sCt5HJ7Y}`#eXkA>$RekzP9;CHSvB9x)q;d`2~n#BnxWDTC#aP+E!DXb>z8_a%qrO{|iWYAxKO7!CEalRBKVF)ks;t z3$hZ+D2uh!3R|tnvel6Cz96lhZN=&Z&;D8KmK~~fbEwrwSwAkfjIvlut+3UKEUN*_ zGXv|Sf$oGN-v|oTvpIK1<`m-(0m#H!;14o-C^FiPS{p;HM%Fqah)-IYvRF%Px7CXL zLCZ*6U`8#5mHbfi6KJvprRLv93&w`^$f!)ZN@u$f`TqyxxQ#!{%pu9`)8OimycmiI znjkw?)&fCUnS!E3GB4QbqWTMSBQfC);AEsAI0m~K@{U2O!h8F8S3yur9hhBm7`T|=2!1P|s%f-T z87)!oDwSm8O9xxK!u$5ED1|-2^#N6V2;iS0imEfa7j#ytz7Tq9MYbK-U#-V65Z->O&TH4swk3~J%*3)fOC=kbi zq*KgKbZks(CEMUt)M`bk`VfxIpY&r^izIk37u)jv;CLU(f>|4f+Bo@I$f*g3UYWD< zDPo+DEsRn;v;p^}Pd>wu;zB22wE)bOECMe73Q*h|dQOX;4+|p^l61m_d zyw4`JDrv0>8C1bYhMWy+tpZ;FtG&^pl3mCu&L5NPvKF=87Sv8=PVdC6+X6|2Afrc* zv07n8M_WG%7BpLO2SKuO!>Q#}xD)gxFxx(le|Lv!QO5s9AFwf&h1N~7>90Y(eYHrP zYsgN>*hWJRL$lM5()>fH^;UHAe@9smqHiCZ1K@}J zJ{Q+fPhq8@gJIGBmF-gGb!O;N4Ft(vBJ<9*mcK(%z_GE@-no_+lS0rW(=9gpvLLj1 z4^VTjP1@F~O6nD%R)ru{m0DI&K~YW0DByhpc+z(1wFJpqEZ$J$h7D`UpO!mHUPL!k z2+~GJXRV~T{jqVW4N3S^;N(~?!SjJ~yg1F66T)i&!3%(LbiC*RQe_b4&`{tcsz#Yw zjUri1ab+g4p0H*$N);5rH9`4g#zzHSL2Icn!aE1wR0!@CPzwm|op;wUxS%g{Zaphc z$BQ|UA3>Vr+99QKe~-mzp>zG%-X20AMO-sXj)J8nKKABoCmA4S|Yf-)z<&Q_~SpAKte+lZZMETed`RxY< zr=jHR8FS|Bdf;zBxgORb8@?Q#OjW|S|LwAYF7 zPXo%+;Fk(Y%YP>5-h^`QcGkZV^jD+22IbpPE`r?d68WUN74+1T^{l>Q5ocdTIenC; zxCo`CKNEBxHuMWh=$`}KD=3fM$#a~BvVw8}%H4PM_YX#C<&)3fz;6ud9fZ=-lke-H z-_<*NicKgV8||-`?BdHCP`(Rg20f@(8UKZl--`045`8G|2ax;HtCas&)ZcH2`aS}^ z%KTyN`x)qeiSjonx1zk$_|w+^-FDKG@BV17cYr_TSpFkHH&lAkS^Fu+@;?~-hf2?W zvjz2@hW@*ncD{z~WduHodcQ>33%W9T(or7cnDq?%Y#8(zO}$Xseqr@y+)|ISeq!Yv zVE8YD9~Pl}H_DHowEPo(pgS4mnJ8zYwDc3ve;1+LfO70^p7Q41eYpXpt$#A;Pe(Zu zrR8@A@S9K$mA~avR`0G7{W>7;GL$P(-fZ+Z6!uOjvAc}_FCcgM9^Nw>QNDt*?X@1? zjq=+l|A5l^lYBZ+|1y*}p&YTNuQ3Yck$d~=3sGK+@&S~VKl$7Pe&o|R##6op4Yf=@;7$W%_*_{y711 ztUhfe@~u3}-`aZ<_*na10G{nidj`{gcldE1lV> z)}9HFvk2vZj2|Pnro>LmkAC)A1lw=b0_ThI?8XM+=9~5-+iFR+=ueBC?7|; z;vj#24N6=8@q;~P(ILJZ^*Uc3fYQ=W0NrGiXP`VAJjxSMo`%xjjsISa zdS(0~rH81;U!YG}y)EF|eYmf81In9FZa``I4?omn4nsM1oWE{Fc>&56lp7}b`}d)= z{0;@(nM0&M2XtlfEPv{GB=kK2iF+_jDA)hD?irskhO>UE&4m@cPRYyF_h$g^52O+`LaF8m-<_KKZy3< z>qy)=#+PGIP8jd6&q6r`<#d!|UhnTuKzYvufBg(fe-;0woP!|mnWO!^Ehr%>9^ye8qRv!7@2RiblzLuVJ);{Vrn0?=XoF`G<13OsX%KOfC(v$DAs7L=(j^+O} z=!Q!F9^~&2p!_V#Cs00#@+p*ykM$C-M``7k)%zdRAA|nIJa6ki3%a4wv)>$v{=sW${}}YT7iGvJCHw;&=pQ)BYp@CB7L<#SAK302(f)QE<2W@NNKrJ@jl} zK7{hqQ~dSuQ+>Gz<>+bt+Wz}x+BJy2yF=e&P?qU$`Tw)+p}Y>v2g>|6^K`H2mGC3| zY4sY;S+M8TBo{zX5*;;;kE{)rWY-<@U-a z{c}@1=fV0v?|%w;;;BLM#rtrd^uv)?%JR#fpvPZPW~k5nMFXR^CydTaNN-j9ZVP5ad9l>v6D;Yv;T^E zH=(@Q@U!x7$Ni6>ykijhKInNBO8SN4u;ouWj%ysZHevj_`8Z!=J<3;5E<4`ipD}jZ zbsEPxuFF=Q>Fe!zmM_mhxftbOQALacO`PusO z3vcjxmHE@^Pd=xe?C;c~ybk3pD8G*KTPVMS@>!Hzhq67X$M&}0uHa|&xeWO2)yLZV zYV~9N`w+MFZ}bxGLV3d!f6YA2Jjgt3^|AE_qrcV2n~!{3#-DmV3w?(nK5Y9~J!ps3 z_eC1LGV2amd4$3n64EIUjGYI{G(Bn{)hohuF zz0~-xjqC00XOWYK`Z3P!bF9DSJ{|X=zQS>=WdHIm)c+$&(*K0@j6jY9--EnOSie1k z(vWAQdjj-NqkIYF1n4!TM8B<|I|t*;B8;QlC*n9g1^V8Jl6F{qu0j3lQQlzqS^D!K zcL7S;$#tuxCmqL0jw9U|S2FB()}ZW0$$i#tV~5?>wELJ`*Dx=Y<$cbZt-kHBYkT#* z4SH`JM1L#qA8il$fAbVC+4}29_GW(ly{@N2jycZxi8K3 zq8^;*l(o-h*kR?*0pIPFZ|&P&eMtXvtZxR>pZ7nD`P?%o%k2Fi?vs8L)^}y}zlR<# zq5Km{`q$cP{kXmDX!)^UzZw0T`&bh&FSqo!gYIi6nfEFs`Hyse1U>1NmCzr8_1a-5 zPe#da#n(*t8vOv}m~;F!KZyS9EPu@pi$8O&zvd^ouYZ%j<}Y^gmjL+la{g}o_vRTM zuFUl1VEh-&^>qA6d;aDge=(b%=jX2se6!xu^M@q)6BPV49e+3eYwe-D^^kk=yjLeb zO7hUnvwisml;1?T1?BMb{QWPZd<5k;P)>(F3s74An?c9+q5ig>_2*OMSMeO_U+YgR z?>CV57nCCr5C1Irvk|vfoA@1zI9`VGF_eru{&FGXcgq}a8U4avETkX!D^mO=BF1|U z;?4TW{@fOS$mZ-uFNZ(C!ry7y{Q~P*e|^yq?YkQGZ5YH}E9L*|_EW#FA+82%AGQa7 z!ra4EqL3uVx{>bS%Tr*y5JW%dGDjxVV>D#M+GxWClTmSsC?V&vW z^0W0HfBLkn|5*LWXI!1X!{2gdf91Hq{yA9xv%exuey!loUvuVfEL(ku-%Ot(c{-poN z_y2GHvHJ5jt(KkdEww-P`90vrV7_(`O2&h2|9^D*TK)NprB;9bw%K6%+xGou+e3N$ zB}VH%&Z~zS|K!78VdQTsavUhj^XzZdANE%r$&WuT$6r_EZywtE#P6tl>dRjM{#>kY`BPB$fR5`z%HcfJ*0;ZM#QKkx$U6t~ z{5PSThq4jn1t?okE<@Ral0Odgdt7f=;5E7jrPYtWCo|z9f9GbDkD;{mKLLK%i~XI& zi+#x-NU`*!KKvbxGJboZ{#ca!IT_aHkAK+uq@Rd-gVB@kRM7J$ zR4&CefB%F(oMGjWoA{wM=K#s8C5f1kfh!cU~~mjd{L zKEH!zrSMk@`11jS@w4?`fL{NL(*CG}r6=8#Mg9)If=j*my<2|gjIZMJLu&j?A;0|0 zuc+BisPP+0|0wzVB*XT~A6E4m@n;(Bho!B){IoN_hFj*Z%i4TRs}H}FXg?;%?{xB8 zs{CeCl!TiE6(^~%gz%2RGk7e|GEb<&I{lOiUv%lV6eJvN(=Vv_aH#}_|3|60C+~lVs z2di(temNLD^`yf+v< z`SNpk;$1Q0Ym>59X0?nZN7@_6DQp$K&{2F6Pmv(7*X%w8^mN6qNh`n)M&~ z@MBW^N*LSS(m%e;WB4I2(({{HmVOK9Uqs38VDYP9mi`&^8~%3Xy_a|jenrR9Z|i!5 zd|YMIu#{p%Hay!>h#!$n8D2y`GUI#j`MV>sowBE2o5y%~L^dkBXP+Yc&RMo^5q@;G z`gKM4U9v4l6ybNx#vECMe@)hXY!QC9?CL`N?%AT-is<*qt}evynRP!r*B8P2m5G zDlC8euF|gsR=Ir7?PpMQA1M|87@qH+Fv?ZSP7=J@RryhkLrBECzw>~94EWK&&j)^^ z!FL0{<;faHN&0sJ|A3+YIPmuv{6@j6p4sVM;Ef}E`7-b~ z>Dz&yaIU`|kNo!|DTihIXN%A;migP+i^QWJO#yyak+1$)1U-pANbo9m5uR6n68ske zKjwVTVDmU%-UmK68~;2E{J94IH1Kyp?nvmf1^7D+euNOHo*Rrm_XmEH!H)xek&$~E z@KX%^0>P`jTe`g>#~|PT8|XJ0KK}vyV+Q{<;GZ%0p8)@?!Rz;@w7oW?Q%A`afXsHNZb(_)G-;9z%bg!^?Pz z{7~bfB+Gh1zrfI6=kPMGxZe}-9wFV?{k;hM1E2NRI-cp@ z9=oVrdt_6<{|JxH-T?f1(C-fXeBft*zcjb|YX|;W(7y)we*wM%{A+=~9r){k-vjsu zf!}QSd>{Bbfv5h@0Y4V{>}|Wc+!c!|#zD1A6K?9{6tX-wS-^0KW+IBF_Ca z13%ix)$cZ`KQs9AM9_a2_zB?8na#I=--vcR9Q1z%z8(D8j=PG&%KweP)BlG6pMn3; z;By}ER~!DTfqw<`BF_E21NhSo{|^B_+2B6|deTRPEXXeT2;Yy}{wfW;Nhb1H25^h4Yztj($v5^Fo_=`*MohA4yOYr)gw<7)fOYr(Vv?BUP9De`Iw#&Cl z=$|aX>vz?P`2TMS{xy;sis%n6!Jk-yKdl5mOYqDg`S@uqq34Nl-z}?on@60rzb~IG;qz<>{;wtY{q`*O=kX=@DS~Hi&#T)|Lf=_}zqSN_O9}pV z!S5~e+l|M2sn0|Bf0odHy9EDq30}WTw|AEAOZ>isez*i-Q5^19f*)6cKcfUcrv!gN z3BIEQuiyDozg>Ntci0)4_~q{-CG?*z!GFC3|3nG?XC?R-1YZ>Adq_~pt>o`u!S9`= z`%#CN(4Sa>KTGg63HL0kEun8I!S|NnZ!E!oxCH+|3I3ZU_|1aXcD&~VZ^SWJ&puy5 zuTCB#rL}EwOY3qWZf_TIvg#IDtzApn zs;w=p%i0usYPG7!B??cP;4fO&^eyY^toE$z>}&6ERFj0al66*l#jow1ZK7zkk5U|W zFly`Q?psssTPfa6(TmsgX^V&?m^E$XB{Mtvs&y^xy=^mAcZ<<&ODd{{It4eUcXjr) zt#;m+bozihm7cEFw%%U#%JgL`JC|>(=1Bu;PG8p6y1c%xr@eD&NhR@|Q&h}SU8=L% z`b3{g+IpsTE}7r5qJ2qAUt4A3gvAcm-g!w^Xtv5k$Wpy#%boW2_R6G51LW8DN#o2| z(bkb~#Z#TYiJp*^C3;sbhA&U{w>sK8SN8hYmH+|$o7;N2yldg3%9@!gJ6rqOr6Fgw z^!80}S+U}r_SP6(6$uLsjl2e@s|J%rRc-6;OXMetJ6cy&PpXRht1*OIqp6c7_q8mo zteL8PWS$FJ=`?LCmh{$7s&!eV$Np>~N~1 z6A=`d1V!AP_7!6G((0;~6)TnBj;J|96N$5YO-s*G^_L44IIJV@FrT58v|ZwmsMhQH zO0}=W`8Kb&bcU5}GkSWudh$$E@1B;jaoM<9b@?S{*Z>|YMd4-DP`%o&1CC>o z3*N4k807RuzU6ZL!#Jb2>T<}c_9bmy)voToNJ+>E$wkPy?GBUczV!`_GiO?)v)>_M zrG{KbOM9ooO+-?vPOZ##h@2HpA0A)NWho&c`HUnF<1E{q_E}H!5UCGgytKR z+Bf&az#T4u4a<}04unIARCeCwRofRQvX@k&$Jqy~1N%8s-tv;^0E1(>=H~&~%guST z$IC696@`>HF__25=&rgkcp|dYatX>@@?=KSc6ZV0CrpS2CDtjEI(|~b6=i4ksu(>% z;DAtxtv!^GW?(HJEOkY;7>JnT3KLTd3O|a*HVYkMa&Y7jazsvgOep2aAb4g#Du_yi zoF7qP@Z}U>c{TJOx?VH9OyD}ZF7K2{x{l3mmMV$do6x=5O;Dp{Lm<|-x~!#hiO$K+xp3~( z^X0YUYPI3~>0~f#&b(<;=Tzs-oLN7kq1rHY+MF5HtitKhxh=iR;#HVc3Tnpc)&ZT& zDenhkjR;L`K9^Qq?y9Vr(mpBxBbm+)jmO;-_}vBYjeZT0FhJs z`g#PR(?Olr&hBgL@N;27`hl}5i-_~xN+nj*8Khh_cDB2=nc52|lL>OQp=V{A@K`CU zmdct2o&8rUGR2h(8P2P7bv#@lYKe_)-8v`Ds%w>$e zx+?7lDanWpvt@Z>)R?Agvud zP@L1&-P0yBSCP&|J@%p<9z)5VcQ4}>QfLsJ^$={?^Dny1{tzpkxyrTBbR8&cZ&ZY1y- zMTus%tmtJYtX3EI$^x>lMWzzomufn~Q)12~$?9}*S6|yCP|Rx;{ylA~q|3@qJz<>p zYGbEFWb5*_CEmRSp*N>(Roe>oD3`1Hmi2U9Zo2JEw8vX#S!q_j=k^v}grD-7EUC z@vi%g7jXPiiRkfe<7&K1V&i+d+=w^6O_o`%(UvU{CcN@hF&lO@2w&PNWFn@eqrFuW z>grSWg3;k~yaZ<^J2)L3vOy`hw#6%ThO7&3!TU?yoFd$^cyUkLDqd*dc@kuzxikLRCR@>~pV%b9_Jf8*uyrL9}LSfA&lS+`hPjay9IyeAcJ^ov1$z)p#yElsG7( z_2tw>kk9(71*bmYc^aM{XK7C^tFHOKUb)EE=Q(qhi%bPRJ79U0sn7H1EJ;V1w4c}4 znfg4p&ho!i!Q7wacaz-9>(BG;EP1YnEQ~qXt*Fm<;5m4f&l?BUkP*uFoBBL2&+g7)DP$CKj$kRA?GCXf7XAWlAChG!1W=ezFMrD8HI8%&63_MQm|)=V;zA15PkGUP zo}=Hi+0%?>VEW6f1|^H<1W@@Ch(O8uCmX!d>L1IOa%)#@_!OSR<5Z*bWg@M%&kfZa;7t1XlfvPeb(kqiI0nG5`QKtiig#O?vJv8t4*cY^mYr_u|H+SqkX0qbo!C+M4|wf)*%-A%)9QTE0@;F-e{L|A#|-fAr@`#zg3zjSBJJPD0K{R>ox%dO40w3xx;L}48hGe#(V=G?D zY(Wnl&5R|ift}f7I;LM}Ml8`9c-P1&0Z#?KYG!9-g=7|b z*U6X;YMC>pxceFzTc>X3!nqvYr-Zyod1U-!$#}%LQhdbX9&2v;QE!8RXfYGBk9Rj% zAcr#0oA*Ao>vaa>x$5h!LPHByvSuWT4O(05Rg|P=vR%833H@kq$OG2Y9uK&Juw6g$ z!!%^zO(iDUm&^ZB@&LGv{8ZR z_Xltq^Z^tG^v_d_#H^&Ernl0hDw+eqq)8ylT(XQtfX)DFa=bY`RhVjn#d4O!+Bg1C z0@g5)`(u-BdO>RX=?jX*26%rtnx!Vh3jNrO}ppx5gmBln-56UD>UYQh-y9H+5 zMv2TG#D62|I#hYS(>aJkc^_od9|3e7ZzQQ8q3t83$zB(p>=B+wa=AeZmLwZ4?IgO(in4>=-A6w1E;ElVy?E8~$T%7QEV$2{pn3K-wFTdu+< zU17f{e3BKu&FU(MRz)qK-av5;#UhF&5Z%SOTCwkouAhiyn9u7M^9*c_&P%Op-?r2W zcw3d+jnXg~Dfq7;xeJti^ss2?yb6@$Xkpx33Z$4S^3 1): + raise ValueError( + 'levels must be a positive integer, but got {}'.format(levels)) + if min_val >= max_val: + raise ValueError( + 'min_val ({}) must be smaller than max_val ({})'.format( + min_val, max_val)) + + arr = np.clip(arr, min_val, max_val) - min_val + quantized_arr = np.minimum( + np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1) + + return quantized_arr + + +def dequantize(arr, min_val, max_val, levels, dtype=np.float64): + """Dequantize an array. + + Args: + arr (ndarray): Input array. + min_val (scalar): Minimum value to be clipped. + max_val (scalar): Maximum value to be clipped. + levels (int): Quantization levels. + dtype (np.type): The type of the dequantized array. + + Returns: + tuple: Dequantized array. + """ + if not (isinstance(levels, int) and levels > 1): + raise ValueError( + 'levels must be a positive integer, but got {}'.format(levels)) + if min_val >= max_val: + raise ValueError( + 'min_val ({}) must be smaller than max_val ({})'.format( + min_val, max_val)) + + dequantized_arr = (arr + 0.5).astype(dtype) * (max_val - + min_val) / levels + min_val + + return dequantized_arr diff --git a/CDARTS_detection/mmcv/cnn/__init__.py b/CDARTS_detection/mmcv/cnn/__init__.py new file mode 100644 index 0000000..10293ab --- /dev/null +++ b/CDARTS_detection/mmcv/cnn/__init__.py @@ -0,0 +1,11 @@ +from .alexnet import AlexNet +from .vgg import VGG, make_vgg_layer +from .resnet import ResNet, make_res_layer +from .weight_init import (constant_init, xavier_init, normal_init, + uniform_init, kaiming_init, caffe2_xavier_init) + +__all__ = [ + 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer', + 'constant_init', 'xavier_init', 'normal_init', 'uniform_init', + 'kaiming_init', 'caffe2_xavier_init' +] diff --git a/CDARTS_detection/mmcv/cnn/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/cnn/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd99ffc04382e06704a5fde34be2f1bfce33d51f GIT binary patch literal 563 zcmaKpyH3L}6o%s@P14-j@&v41h*U-pm#JF^kQlsJR_vysK2GJNg+3DR!Bco;;uSh! zrxkRh+hXCpE3gIVpX0dp7<4j_z>ja}h_hduDI-)m0{ zAix2HIPA4AA{gQ!#5jfoCy?S4GMqtX%0{*wl^v;N-Rb0= z?rN@1LN*GhIEr*D>!r~rwWc-HvO21Zk}j8gM$Qf^*LYK&g>Nyi2rNR2$YN*_Ti9Qy zQiH{GtV3$rk-Y0mXwUTVS4RiNyv_aA!^s=3tJTr}|HXAk2o-{m4@XVxjTVQSV$}ej z8OmzErLrjZjJ8F)Y1#sS@s5{W-AEIcyP}k`Abrz>Ty6F`x`l>q1;4XT(b^7DG&b|n Ik5Pnv0AW6nr~m)} literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/cnn/__pycache__/alexnet.cpython-36.pyc b/CDARTS_detection/mmcv/cnn/__pycache__/alexnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27dda5430cd003027ab0ae1e91d0022045ef70d3 GIT binary patch literal 1792 zcmaJ>OOG2x5bo}IdTj45%jOa(VGt3*2-#>s999Stg~w%MArdaWjKWg+e`u5F9`d2{6-{j74 zL4OQWe+hyUP75-){?Z8>FwVGBI5T(PQt~6=F83}8_XL}Gl{4@;eNH<58g`QYnt|+4 zyIYhgPqGt{P3L7UO@`j>z}s-tw+S+!oWLD);PB>aGNRn!?qy@(@)q}ba7hOq4|(H~ z4E)!`C4&HNYun&SA#HP%P9hJSJ~OR^%>TIl!_h^KRe zrlC-AIf}QAQK_!=e3WOY$xGR-4_H$OHp7?U!0&r79hmwdxC}9LMJhI76`^DWdg2&& z;=u%L5IaPS`&VcQc)-Ie3R(z0k2kJZy^isSH{ri^<^IpAh__+a&ReT|uRtcvd61=3|olLEaqoPRf}$1S!WCi&XZq3!dtpo|n2en{i=8hHL~q zJMCps_EI3f6s9}BQ18QeyD;@32ts$LPhGlCBif>SHxf0R`>X|CAGByAmwTJ~>26`@ z-!L81hWI^&sgFPyV7Gznsz-)RoQk{xR;<9r=?Z$p6?6v)+2>>$s;&%;$DNA)2LHeU zxBmk`P$xJ8fQoWF7HS#Kl`tv=w0JyQXcG@boW{U{7)r4Z#oY02lB5a;eMgT}fUqKs~05DTmeZGPw3e(|W=U&BXpSxLA zH_v`eRs}RA3VX{PvU=)1T9G!^J*ZPu*+fzhF=hbPj0C&ZGF=}bULL`gC$`JzC*l_W10X|eGHNy5u4NooSB zPhqd>g0Srku`^K=;EmCN0xtLfb;P|cybu(PBJ^Cg^B{_9YbQ`2gUyDfRM|;aN_7u^ neYKDhDz?tE|EvzFUVnSRwN|G!{Q5fRZ?FYuq;SY0)^h#@g~qQ^ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/cnn/__pycache__/resnet.cpython-36.pyc b/CDARTS_detection/mmcv/cnn/__pycache__/resnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45ff43b04dbd525ce188a738e96f1d502f322c96 GIT binary patch literal 7641 zcma)B$#WdneeP{~dU_VDq)1AZ+A^(h6bKYZiIx?`mM9rf5*txWDol{LhK+d*V78v& z>jtr?$CWaevMN<@>6k;xcl(&izmZfW$9KpjC){)LCHcPBJqtil&P?@hUcaqh|CaB! z%-wq3|KacU!+-k&!}xDw?&F~SA+F?05Mcz%;#%Ze) zcT!dBg`Ghs-qUw&z8OFKt>F3?S8@#`HM(YIWTr4PE3-43?{!wjQ=OAn?jGF@u<6rb z{~+xQRp>4XW6 zX~nT}KgOqt|>~weOB3aL|J&xjP24d!PadTJoA-*KTJC9`~6|N zC&}wAu;Mr!%J%+d+beeEFP}gTCL@s4NFjq$t81MarjgoNE(_<>$&8khmbz|conl6- zjJ79CVV&`=f|(XZEVTW(S=FqBo*JZSwCWl6Xk_J@k@VArU*Jt}PJ8Pi6sg>a!TNU8 zTvVmxU=&HYio+y!6v)3ErV zz}VKt@feNF9D})C{GOU)uoI?Y7cCe|(Sp$wEoa&)38Pn$e})w$y_TkBeb3N)OjgmghK7ENPasuI36WuVCqBvrIjR!$H()DZkYk48=j8?OLn#1V<=F zD$&s>j1ve{^IrZT)~b3Tns(IhPsGD>Hcbh@Eks2L@svpe#50`|G+%!kHLZsKPkZ#W zJrZqcj~Z*|?6C@a9HwbMiUF4A6z}{yP7RXCK*Z4o`JZT9G|0>qwVs>YNbO&kXq!Dt zt_geWW%jrdlX|$Tq;Lx~Tw!M(9l{xn(AB*#QsHJ5n4*J`@>v-pWsG=*S##~A?N3%f zcdDy~u(85BqY>BgS$V&PgF1Q|q9S|^2MgNl7q!_hX|rF>_@ot4g{IX{{$<9BV21#; z?)yRU`P1D%TUgLZg4YYcdVMWO_apQZ(celBhe1C)jwGkBf8reS-nHpgo!y|5>V4Yb z?r!vZcG>O7Bn>u>HngM3_gbIsrF9Xw!i}`#Hk-C2eCGI2W2+1T1)lX%n%Z|rjyJ7* zh;4wLq|R-jlUgqCa>A;7iR}#%0*L%J3Gq%+H*@pUMtOs`+EBH5-o|YXdUpwWcjYS_ z*ESrmHFaY04cs<;ZOHOF9J+aNJmBqjNjP)6rXjbLb3VVjQ`}8#XlnQR$oH`-xd&p@ ziFPn2(}k1ccNy2bv-2&Yp<`afNMnYIQ!Dv;RHS8S>-Yc-ZE|oz&%CkqCD2j6k(pgn zzL8p8IwbSd8r!f7wt$+LF~LP5p1sv(yuuGRB;3;dCMif}j7@F`UkKnu3+7a`=;!i2 zJfdhh)7EUXH0{$qQ}0sNV7!n0U=CPI?`i^!u_}jP@5K=O4vrN0wA z(1AZ_?a5Fmt36WwEF#o8SU8USz|;+WV13fbskP4}WDjs9v=GCpU4h56JWG3unO1z0 zr;uw{DBmJkC!t-;+<|QTw7PPBS2+b$`qyzXcHnP}l(? z+sO?bG(mY*jh-z(O%br;%0nsLivZWr!EFisP#xeX=AKepy**=G$tu8aczTRDKvFMr zv$Al`%BQAjOlFVGqqP(s0Dj*3#Q52(V}G^^y`^G}%Qa7a{s>`R*UBotV#rn6dI5j| zi>8*}Ea%MA!Iu93GLGW36ZUf;q8ceT9F3w_sFDohy{K7LC5`1XSW&L-Nao;Yy1!Op zyg;UM^8ebdcseINa|{9-PdcoQDv% z@Gdy`<06cB91LafSiA7Y>j47|up%A4ILw&Jg0aarg&e^U9Fd2eBzjSxa$&0(`(6}o zK)feWd`Wl@a#7;MJ7B+_5!{0KkO<3g0D&AuqPZTtwGj*=gu)jd84t;?C^|vH9E9<4 zz6Qyvoa5R~+=`xt{ovNlaM%aq9_>ebFY<`2y1$h(Aqm1H&=V1hN6;d~=KW*pB=(Ok zJXRjWF`o#9OCd;}3r`op(@-{N(!z5muk-wq^Ie0(sI#}9>cvdosW;Q}%yufWvgsRq z4vEaX5#rn3Clg={HnWDD6M+J5GX`k6OgMqjo2$|`qV5DoCHF07XrovmQjx3ZON`P72rHM-Zk zuVyQBs9%{++(!mi2?vQ$8Qj82gmn!8q~7N>UE5gLI(fMh!=O4M$kF_Zae(lp0P$SV zTKFGAqFQ0jnL}v(*+kTIDU+B_94=%Zj6^Vf7UfUf>`7U}))(VukmnZnxj%CYRS!`L zYQe5ksBfaYKaY-eDMYzA;vh_Q@}X>GtjZpQN2)pp$vRF@^$RGa$_0F=%G929h?oZi zm$H~y0y9ljROKh((Puy&RCLOC;s{OU!{Oy2K%y!U99kqpfEIna4p5YjB4AMykbi{U zZErUU)0XHAkWSIOR7vw*`7>{*%7v3tm8px854fdf+(WiLC;2N9hPm>OK~zoq%~lcB ze8?`n>vLfaOM6XsAlHDG*ik8P~9Q@2~j+Rp$DfDgo+_ln*Mdhew^%DISC z+5gChy|hYWFG_%=txXhTb-lKoA|xE{?V$k363Iz0l95K(NCbleq~~yaA)JXMl5Zh$ zYSxtB0hmKDaXV5rhE$n5K&Mq@FG_!$??%XkQt8rBGM7=415)c^i8Cq>k4I5n5G;ya z{_F&j5UtF?Ul^t6a53Ln>^b+S^3V}8X{B0LMIFm@^4A#*Hlon({KwJkP+G8gLv;xMhaS8bVGhk#3m{ab#^1aML zod&J?ZCk7uS&|KxXKBfXqhc$}_jrtX@u1{~Cvt+a!MjqRQZVn5Hr} zDH^5xOZJ!20~BFVW~5o_Y*0A>l|+8X`!>lA36ry&&`0V@-W9Tyax%cm{7Xw9i)k$= zG*~B%g1JS4IOQuLmC~gSB>n}rnR5UFERiAwN$aoUKC78oq%&O7heq~Dc=o;?o)7fy#nS2ij@e!44k%aFI5x%mz_tNYwD{s(9M z_eZ&0VS9O~q0MdTEQ&%Dodu-E5GTjOE;{5ciH36uL^+HEHBu^H2ckNtl;7hljil3} zvT}1H(bX`?`i*2!LVgB?eTkq~yJWg)Nz2Tue80cMaBym7$&}XSnxj)e!VU7hKaRN^`he8^U2t~^KAa@}@pgi=SmMBk| UH54)zZNIkMTz-(htN+^n0et(m7ytkO literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/cnn/__pycache__/vgg.cpython-36.pyc b/CDARTS_detection/mmcv/cnn/__pycache__/vgg.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d685be38a6cca520bc9eeb60a740721c476c605 GIT binary patch literal 4774 zcmZ`-&2t<_74P}j+1c3-tv)Q<#0f(ZC~x6Nlhi!3~pp*h9f*GV0r8C(zWq5AKLE^dZRQN#QuXQ-SdYb=g@ydTCjY% zD`YHsK@uGbRh$hfmq)#jWE!S2<5)R2q2_sx`K2fhhP^NriSh=cbk>(;X#0}MIv!td ztnUr_;(5@xeJ~2+_0|Coll5dcNY?v(E>h7V)#$hGueaiO{eHW>Hat*8+AH>4f|8^H z!c5yRXMb`D@0%@$*!a0<)YmrXq0?p)XM87RnK?CXu@7IL6f>t|q$b%Vv!>S6Fj!{G zmo(Sv*qJfKA9ys&>J+#EyX72X!uD(Tjz=7BaVslu`%$jLfo%(0+wf>HieZVCklMqR zq}4xXz2Lo%{s}u=m^#{I76ryO^dJab9|Mb z=g#jDGW^n~Y}e4iP-+(VlL(Oo1c<}yeDNV`7Wq?pe2FhVEH&Lvna!FdSo538`pNCv z;h$heJpG-p)!iAyV$Jh>{N9l5|;me4h#&!Reei`igIMo$@R}T9A$5*dj z@vmM(+dzAHvNo5TnaRJr1LH#Y{Y3b4>-z)g$Ak1_Hd(pV3zI}7$E$(0BczYzjt6!j zrTfut)Y9Q{Rsq7geIie1%Q;uD7KL^1z<=>X;yC26;9apl5^+oX9OwLy4myiQT3m#! zyvAYsh#FJsRD z5E=IU-DoU$W6gi=s^1r3oXqV^UV*y*o=Ep# zV62#k)W5Oi=Xco;6JJlHV1ntwzUUp0yP;p4SyqnXm{!C=QW%2vh1I!#Ka`D=(_-D@ z zBe|YB;9+KFTx635zPzeyLdT2UkH;RohqVN%cP+{w)IY=v>q$zoQ`7V%ul z3fThow45#QVwcJPa2N9ymN73jd1nwbv$0^+MgTp@5D#@24CXUsfpebyf!uPyfkH+U;Tf4ZKBWrDXegw78hqsjx@ZUjRSjipFF;78h>b+KZ;OLB zr5s4*U~l~}Rkb$&bCObW&aY@7m?ary^+c?!emGX;Q=q8Ir!YrV&mf2@&O%$2=CLN9 zpiM36JQyUS9mO{L9C@Bztl?m&%K6lQ0$+KkUS_g+2K92d zOQW!-oI6o0LMbnjZ1I*H46#Y&9YeH`ja6kQqPMFGI*;U5-k{kYoh=7Ily)<$Scj7PA5nZsDO}9PYFXaRF7- zNiF3l9bclY7Vf705zfsEuixPXC6|#JQ&T>hnjLG( zCT0rg$cs^wRH&ol4&GSTmA@W;gup9I849$wCKe>E)B&8B@?C(HF0b}CX513Ed#RV9 z_@aQF^UL6Gq!sW{oj=ke_gL(vsLQ9uCs+r8Slrq~-Ks0GR;rxdpxs7T(9YW*ul)J; zzYZV11`AtB$`Ada6sZhxZ@AwdB`GDl5D5f%Uiw?8LK-#YMM1I*6OgOo%)9z(N=gHgY$fp#!lcduH>gK}tV-JlL!kq%Vaqp(yIytjqz<7m-0wJx`4N7l6(*%jr7uQf78E z9}c_(KI^#L=-Tp&%;=QRJPsLh@Px8hIoEI^12+Tpk?~CVM|{^H$K7RVm3VjY$p%N; zTRD)@Ni-b7TSfRJOj9XeBjzhaC^#i$T={(>I;aU>qz--iek2|M7w`cjZxQze5LHM= z!=7lkxoeEEvVkRu{1HufiwK3ZGCJ~YdZA>0s@5(7Ss#Rw{}$|opqwGPDu~7v_=r42 zk-LOOzH-gOGWlsHGj|bvPSBm9o0a?%#V5OlRwu+JFlSnK_FoABJ>w zF~K?|>5o_O?&*2n#KNbd6_odXc#rM-_ZSRK&O!Q{;KN9Mg?>#J8eicWP!dK$q-%gH z@Tz{VXNB1nxPmKSA76odJ*-rmuLOr@E>`+0R{8?%m-lklSzDXhca25)9L|;}t%454 z9Ba3UOWzxLFws3%Hr0U|g%K=NhU$`1RVC`>2q`jX%aF^Tk&yPjM((-e8>QTtHlkF> zd!*3OT28w)Ahv}Ws-VXjhP*@iw1+hahj{%1TH=E+z^J3cjzF`fdo}RL(G-q{C33Ak zR-=#?0s68MoaXtTdKfy(IC(uYL{Xd957av|^b8H15ULHnJiVY^bxtru_2^?oY%Wit z4R2G&lhhSUL{mFNXl#3P6JNhu4P(3EQJFz`8D)kS%M+QGOzP9_*V0;@bc^1T7u&w-G18xEJ@?(Fs7rg~{ zN#7}`pi`r+qQ~ZDqblD+N4e<$Wzkw3=VgRL-wsF;6l|)T1FSBfa@f`=POzPSyXTe4 lJGz9?tDFLW`9)FgD|tG2l}w!w-!(lX+7-*I(vH3A{{Z}ailYDk literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/cnn/__pycache__/weight_init.cpython-36.pyc b/CDARTS_detection/mmcv/cnn/__pycache__/weight_init.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebfab428d8aef60a973635851b391f87b169628b GIT binary patch literal 1880 zcmcgtOOM+&5GF;5@@l=0xSMU-qHPKkXay88ng&4-6nUWNrH8%rmO~7#D6i!Q6^!U@ z4d~SLRA7Hd{}Qh`?^x$a(0+lf z`3*bGG@r96f5r7cM_T;NUq$*8Ep>{P&?ma5_t1*@3EaMlG9)mcNZSvvG^j-$w8VeTinXJ$STTle7`7 z(F}Kv&Rl$6sV0{$GRDP~sjS;8YSUO{?8wPK%IacbT{=;XvKAo1Xt{85QS0TrbmCIY zz3*QJ8BTq8Hk{Op@*(!-VzpFeSgf>ahRvJ087>yOv}Iwd+Mr!r4hv(3A;5?NKX|ip z2iw@>i3y;@j`3rD!l94WdybuxriO)@7qo&N8mW0o&e3RvPG+*{TlN=6o6jQn-Xcjz z5u3&>*WwCEFaz&Px;`tByco%Qjo#Fg_u8-O(R+tHdFIlksm6E`IK+@SckzX)Odu(O zBlLxdJ$LxBX-XjIzXH?%ls=#XyQ4qVWmUpxz0(K?uRC0O1I!Vi)R^BFNMv~oJ#rjx zysodS#zJ+zq!2z8q1(q5ZhyTQQA$~Ah)2fzyvI+X0TFu_-*4ygDb8%AgYrWBq9d@! z$Vm{`W4{Wyn}_)-bQ!}#8r%%KkaJ%+P~HPc&qzyNB4Lm)jQ4d4#0?I_`Rrm96{E_TF!v~h2QvZH<()K7uoK6T$vN2Li!47>90Z?+?n@8g1o z?hTWqf5``t48`SJ^cA!TJv35O&+jQ3mQhKiLat+-TqW0B9N?^zl%~qKY+kC_YDBs6 zB;ifnIVPPj=Rnc&C;;&%-%WYYOe(+t`kE&uWfDQv>hr%O* zz4hV>KY8s)g5xXtQ19pP WfANfhH^e|DGMQ!AQpUqC>wg5t2Xb`) literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/cnn/alexnet.py b/CDARTS_detection/mmcv/cnn/alexnet.py new file mode 100644 index 0000000..1230ee5 --- /dev/null +++ b/CDARTS_detection/mmcv/cnn/alexnet.py @@ -0,0 +1,61 @@ +import logging + +import torch.nn as nn + +from ..runner import load_checkpoint + + +class AlexNet(nn.Module): + """AlexNet backbone. + + Args: + num_classes (int): number of classes for classification. + """ + + def __init__(self, num_classes=-1): + super(AlexNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + # use default initializer + pass + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + + return x diff --git a/CDARTS_detection/mmcv/cnn/resnet.py b/CDARTS_detection/mmcv/cnn/resnet.py new file mode 100644 index 0000000..e98f6fc --- /dev/null +++ b/CDARTS_detection/mmcv/cnn/resnet.py @@ -0,0 +1,314 @@ +import logging + +import torch.nn as nn +import torch.utils.checkpoint as cp + +from ..runner import load_checkpoint +from .weight_init import constant_init, kaiming_init + + +def conv3x3(in_planes, out_planes, stride=1, dilation=1): + "3x3 convolution with padding" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride, dilation) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + assert not with_cp + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False): + """Bottleneck block. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + if style == 'pytorch': + conv1_stride = 1 + conv2_stride = stride + else: + conv1_stride = stride + conv2_stride = 1 + self.conv1 = nn.Conv2d( + inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) + self.conv2 = nn.Conv2d( + planes, + planes, + kernel_size=3, + stride=conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.bn1 = nn.BatchNorm2d(planes) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d( + planes, planes * self.expansion, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + def forward(self, x): + + def _inner_forward(x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + style='pytorch', + with_cp=False): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d( + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride, + dilation, + downsample, + style=style, + with_cp=with_cp)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) + + return nn.Sequential(*layers) + + +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze + running stats (mean and var). + bn_frozen (bool): Whether to freeze weight and bias of BN layers. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + frozen_stages=-1, + bn_eval=True, + bn_frozen=False, + with_cp=False): + super(ResNet, self).__init__() + if depth not in self.arch_settings: + raise KeyError('invalid depth {} for resnet'.format(depth)) + assert num_stages >= 1 and num_stages <= 4 + block, stage_blocks = self.arch_settings[depth] + stage_blocks = stage_blocks[:num_stages] + assert len(strides) == len(dilations) == num_stages + assert max(out_indices) < num_stages + + self.out_indices = out_indices + self.style = style + self.frozen_stages = frozen_stages + self.bn_eval = bn_eval + self.bn_frozen = bn_frozen + self.with_cp = with_cp + + self.inplanes = 64 + self.conv1 = nn.Conv2d( + 3, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.res_layers = [] + for i, num_blocks in enumerate(stage_blocks): + stride = strides[i] + dilation = dilations[i] + planes = 64 * 2**i + res_layer = make_res_layer( + block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + with_cp=with_cp) + self.inplanes = planes * block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + if self.bn_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + if self.bn_frozen: + for params in m.parameters(): + params.requires_grad = False + if mode and self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for param in self.bn1.parameters(): + param.requires_grad = False + self.bn1.eval() + self.bn1.weight.requires_grad = False + self.bn1.bias.requires_grad = False + for i in range(1, self.frozen_stages + 1): + mod = getattr(self, 'layer{}'.format(i)) + mod.eval() + for param in mod.parameters(): + param.requires_grad = False diff --git a/CDARTS_detection/mmcv/cnn/vgg.py b/CDARTS_detection/mmcv/cnn/vgg.py new file mode 100644 index 0000000..67dc22d --- /dev/null +++ b/CDARTS_detection/mmcv/cnn/vgg.py @@ -0,0 +1,174 @@ +import logging + +import torch.nn as nn + +from ..runner import load_checkpoint +from .weight_init import constant_init, kaiming_init, normal_init + + +def conv3x3(in_planes, out_planes, dilation=1): + "3x3 convolution with padding" + return nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + padding=dilation, + dilation=dilation) + + +def make_vgg_layer(inplanes, + planes, + num_blocks, + dilation=1, + with_bn=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layers.append(conv3x3(inplanes, planes, dilation)) + if with_bn: + layers.append(nn.BatchNorm2d(planes)) + layers.append(nn.ReLU(inplace=True)) + inplanes = planes + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +class VGG(nn.Module): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_bn (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze + running stats (mean and var). + bn_frozen (bool): Whether to freeze weight and bias of BN layers. + """ + + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + with_bn=False, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=(0, 1, 2, 3, 4), + frozen_stages=-1, + bn_eval=True, + bn_frozen=False, + ceil_mode=False, + with_last_pool=True): + super(VGG, self).__init__() + if depth not in self.arch_settings: + raise KeyError('invalid depth {} for vgg'.format(depth)) + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + assert max(out_indices) <= num_stages + + self.num_classes = num_classes + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.bn_eval = bn_eval + self.bn_frozen = bn_frozen + + self.inplanes = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks * (2 + with_bn) + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + planes = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.inplanes, + planes, + num_blocks, + dilation=dilation, + with_bn=with_bn, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.inplanes = planes + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + elif isinstance(m, nn.Linear): + normal_init(m, std=0.01) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i, num_blocks in enumerate(self.stage_blocks): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def train(self, mode=True): + super(VGG, self).train(mode) + if self.bn_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + if self.bn_frozen: + for params in m.parameters(): + params.requires_grad = False + vgg_layers = getattr(self, self.module_name) + if mode and self.frozen_stages >= 0: + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + mod = vgg_layers[j] + mod.eval() + for param in mod.parameters(): + param.requires_grad = False diff --git a/CDARTS_detection/mmcv/cnn/weight_init.py b/CDARTS_detection/mmcv/cnn/weight_init.py new file mode 100644 index 0000000..1f807cd --- /dev/null +++ b/CDARTS_detection/mmcv/cnn/weight_init.py @@ -0,0 +1,57 @@ +import torch.nn as nn + + +def constant_init(module, val, bias=0): + nn.init.constant_(module.weight, val) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def xavier_init(module, gain=1, bias=0, distribution='normal'): + assert distribution in ['uniform', 'normal'] + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def normal_init(module, mean=0, std=1, bias=0): + nn.init.normal_(module.weight, mean, std) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def uniform_init(module, a=0, b=1, bias=0): + nn.init.uniform_(module.weight, a, b) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def kaiming_init(module, + a=0, + mode='fan_out', + nonlinearity='relu', + bias=0, + distribution='normal'): + assert distribution in ['uniform', 'normal'] + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def caffe2_xavier_init(module, bias=0): + # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch + # Acknowledgment to FAIR's internal code + kaiming_init( + module, + a=1, + mode='fan_in', + nonlinearity='leaky_relu', + distribution='uniform') diff --git a/CDARTS_detection/mmcv/fileio/__init__.py b/CDARTS_detection/mmcv/fileio/__init__.py new file mode 100644 index 0000000..b804e0b --- /dev/null +++ b/CDARTS_detection/mmcv/fileio/__init__.py @@ -0,0 +1,8 @@ +from .io import load, dump, register_handler +from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler +from .parse import list_from_file, dict_from_file + +__all__ = [ + 'load', 'dump', 'register_handler', 'BaseFileHandler', 'JsonHandler', + 'PickleHandler', 'YamlHandler', 'list_from_file', 'dict_from_file' +] diff --git a/CDARTS_detection/mmcv/fileio/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/fileio/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b049b37d784694a05fed0679d48c6c5e87386ef1 GIT binary patch literal 483 zcmZvY%}N6?5XU$B{n2fE@Zc-##Ra`e6%QgFL=Xz*62eZ}29wQ_Y=u6Ouav7NU!f-x ztysiBe$4!b`DZd0bzNTVH*|@D;5|4xg^RD|C=o zHzU(zLwgWRH?f`FP{VGyV0Yh=t?rHLf0Fu^)`WH{oEuqbCCSP;*eUSt zDA%DIK}R03N8*usWFEPP--Rw69-7*QT>a^>`C(G?U$;#Lstx-!u^Ay$2twWhJ@d{U z`$fA^lASyD>)n?2ZMOqzS~DoqN(pRa-N;q%;_kKeAK|Jcg!gn!DP#e-lkTL?iAX{Bwyop$_A>iJ%} zYQ8AQr$fKqlxwpEx#)^geR6BF@ITH5nZv2yA`n_Rv%gZs#p{2_+Jy9`&NJR z33OY1R=OVqdi2Dm2fOUXYA=bE-phw&8}~}fCyb`ta{W%AqI+=?y&GgAiDbF_p31Y? z!|Dfdcsw`vS&%06T)*YJM_B5+Qw;jfG)>C(xLWQzXbCqp9!EX{)d_ zJGbSgwhQZ=ommAF?AVhxv_sGGMqwSf!airBbIm$q1=FqLj=ZPa!aeE~>>R(ZF;+0l zI`y7m;T(B|HDrbTn4P&ART*k@IH+6L}qjTegI3tJcFHj z(RgFgZppSKRrcvKA7z(~+2LQFm9C5eak||jul9I4Q5w#vV=+eLcpb|+IwlxTe#X^b zrpk;nrGqStB-KmlV6~)*DO+ls#JXIqqBSzrsmi4X({c2Ul({V1#v}e3W!fG&evkB( zol&3yt)+kEVuFM~bH7!(bXzW0QjLJJeS8ui2Wg{WfPVdy*o+Ah5C#pCPkGkz7GJ z7#Q6k<7OtznA5n111ez+Z8&8Ye0?guS|>8r(Np+UxdNV?`??rk%QcwNUzJqe6}aRE z-BIz%E9lDZ$3ZeNppu(3`a9~b($q2qX;0^L48bv`ejs$o-(%iipAFGFLS4Tl2_W(r z>i$68v(#Oqju2(K7jRd)wDkt=)XV4$atvxbX0vvK9)i#&>#|K-zKC_rQH3DnqP>l# zK1DY>s;Cz3 zlDS*espHGD(^uy*8^^5XbJ}DfSw{jeu-o@c26hmtxeRkPK9VLQDVkLR_MIwY%9VNf zr2`*dPNFM&Gf(Z+_J2(2K)GvqUF4C=R**YwG!IR_g7J`!({xb=QCmh+l=$CR-{9jR zn6Mvt$O9X(cXpfQw2e%p>ZmkO!VRr&*<<#tb#5~}VFTOk8#OrCJ7J>wwtNX2OdV7u zV6y*22IGVzkw^<4+tWW?ewFQ8NgjeOx3_6=_3H4BqfV literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/fileio/__pycache__/parse.cpython-36.pyc b/CDARTS_detection/mmcv/fileio/__pycache__/parse.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92a8c97fe39be3a5fbf68b7ece835872a4b0ab72 GIT binary patch literal 1770 zcmb7ETW=gS6t?G<&61=Asz8cV`HiepWu<}y3KbARd8kkY1rgK>D`)K4IGM30wl~>D zvplfy3gTz*m;8!^l)um?&Y8?6ksuFPvd43LZvK3}^WOS;|C3jT{Ev)~zsR)_!~Rp4 z?k+51MMNV0g+3#a7E~l6g?%KlZ^)n*$T?_`Ji!&lD$F&QpTTrfSOlFXV=_-YX^4mx z(eE$j+4OBsn`l8lCok9LD@}G3EeIti_x8zjx5+^IVBTwDL5~yrxJjD$IJJ+zB29V} zH?-*;B_dwXx5$E0(j+4JfXtJV_xH&@5$V_DDfu2WA+?PEMhW@SaKSk9a_U*3O3Anu ztmf89<_{&yjrLM|#vLe?%6VoA=DbyU>~{Kn24l~T-EO zl3X)wz_L;vWJY(uJe7W8^)+k|zvlqU%U!njZ!ponji4d|wSo4};c>{5K^hWMOC3_n z^;m`;=7c$hjMpF+A+z`#^VKYHo+b7LNK=vh8f_7s;eLw% zw^o{{$sUrE&6Xco$Bzj3vBLlUpZpLi_ubn+!b(M0v8J$$uLyRg$`Y%~pBZK>tBggO zm6J+4=IRn2N3ergTXtuM@aJltLz#i9=Lc@5t?@#^t9Mpx>9BnTAhTnb2s@TDcOPrD zb%i+;rYy}Fa%maw2GdhsPLMgR<$v@0MtVcok)PFa8yBP`e3l?!Uhld#vmNXrOgT0I zy2E{$kOiNV&}q7akq$V}4Yr|EX&uzv)orCMjI97H1s$oDxhj+tH&G8GU$ln~rLM7c z?ZL)SZx(y>IhJEfLz}`52xy`A6WfO+r0x5J7+PG|*mKyZAf-pv8awD&EZ)#t-ai28 z>Keedx6uCe)*hMXLJT?~29R6ET`X_n`39zWMW1&-=8%-+7LunA^tlaRhSa$S(kFx1 mBE>G!#l%5?oC-03+p1iPfR1b%ruVQiVp7VFTYjQl|L#A1!0CYi literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/fileio/handlers/__init__.py b/CDARTS_detection/mmcv/fileio/handlers/__init__.py new file mode 100644 index 0000000..8f763ec --- /dev/null +++ b/CDARTS_detection/mmcv/fileio/handlers/__init__.py @@ -0,0 +1,6 @@ +from .base import BaseFileHandler +from .json_handler import JsonHandler +from .pickle_handler import PickleHandler +from .yaml_handler import YamlHandler + +__all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler'] diff --git a/CDARTS_detection/mmcv/fileio/handlers/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/fileio/handlers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a56167feedbead23adc2369a3df0af394232a23c GIT binary patch literal 389 zcmY+AJ5Izf5QgnMHrXYCbELr%vKQeHa5I%@N0&8S2#W?sxCnlw7^r_BFPV?w9y)XqW zz~^Ei3Q?{zT8iZ(YF1wuFb$rMuDF-iO3Pbrg_Z-P4(`0Of6MwowQv9GPu%ETeMkT^ zq(Q<3haBX-S(tGE`4bWdgv7-?)Dm0~lvaXDh)U1{$QNtl-83zB9CjV5R_>`i zm*?O`c!<4n;uU&gW}`I0v|^*3j5D*d-~2ewJDujM%aQoW2>C^p76*M3ws{Id5=k?X z)0pNgW|X`ol1cBJNKbiZUd$!mBccBrJW0sx%ImG|_o@`OD+Z?2A}w=OjtZHgeZD+U zHes6`5Q@Z9l9(xorF=;xJ11u}Zh-Dd?(`MVec5n&AiFYM z(WuDPGmzu`9G_hGLynYc`j9{((<4fgPwkiFvdX&Qu(b|*Ke3F#K zoTLpu&Pg8ENov0TE$EEHa~!&L6GCeq7AB;>bp3;yt_MF$^;H$HJ5AWGoj9W-_>flK1>GYb$Vc*d-foyrUQEfnDnTY8jY=Wt<%uGrV%0oYmP0sFmE-?7umAF_&HP)a@lc`XCo zgEJbN7B-g3jAhhoC|u8Pp>{RIv&OgQRY(5&;IQC-Sm$4M-3_Vkf;)U+)?v#ANir6> zN)p>l5-8dkdBNMsXL#0@rCctQxC^ v+hD>0fjN5cNlrhTSf!N=Q8*ZXC#^k>HA;DiRRxAxrs z!C&%~Q~yFwok^N%DOj5E*nWQVX6!eU$>{yY`nk2IJyqN*5O< zo%bcVl*rZn>S(p`2b4#Z*3^oi=4=_0uud(%c(GH#(LcySB9sQTpW;(A_Y7TM(^iyX zE3Y}nC%LcAhIr}&Sl6p0uzR$SnA1{HAASFo+jG}d**ofwpAuTNW)L{W%|YPvU4f5q z#n5t+_8{*mE_!(---_#@?Y*#&C|n{tWp;v4OZdl%#zQ98cIShd!~dy4%q)p~tW=ZN zS}8wLsy48$DBf4<$2zb26HArZDY=8cv(md;YKEm&M`(^*EMghUZV$WJxAqJ_v88Ie W?H%P096o;A=YK~Z-w|Uh_T?Q1P{RWN literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-36.pyc b/CDARTS_detection/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..964cf3f1911459d2b829cf8f0a4f564c064194da GIT binary patch literal 1414 zcmZ{kO>fgM7{~pRwrSUHjQ5R=A#Rb-D!~=O!~u{H6%w~xtSoldc1aWLbYoL*o3z`m zdkAbQRT`9l^i588Whn^zDdku)JG z4H+epNuKc33q49c5$Va=IgvHxo$;_H>)%PxxPloZptccz8^(u;x}s%&?L%bL$6=yA zM`M{N9b?|?g8J?`PJkYA8Km0Wt~ehBqs9mZ<1A$|C-yJt<=f@h5j z;*rn?h85c>YtKrta&>}b@csg~S%pxL5uMULow7>`6KOACww^m~>m^wv18zN|lD_r& zfNtxD$B{lTzlmOf+1FpYgDh3AA&w7DCegSXpUB8`O`e%+|Ez5|iznh=fb*w1*I!gs6XZs@`K^;9k-C5GK`%iAXs1gql5zQ8af)sR0&e({d zR@~MhDE?Jy<~{4BnN*hPeNbC7lU!+CgK2hkUL%i+LBKWgSRdhVK)0X^N8J=6P9kH3 zaNDBWFs#=hI08f=v_((1{wDy}TB5%>qfMAPWg{@4WM^Q%f|eSp^8$81*IALpS#pK) zK|sr>LaKf=NeW$uAzPD^G&eJN4!i2OOH?FuymkfCM3D(o=rtHMC3l;iZY*;z_ckf$F(0Jg{2cDNp{A9!x7^-~;fV!*Awjl4EP zsoe4aa^~3EcVkz%OXxRlc?l~lx&@&tzi{-+93I~u%Z|Y^`W~V=!?b5W&Mu1@?qg1A z7mb(;M%jk$6SymKFv{lL(X33Q;|!0-$M z*0G(+s8f+vhwq!l YopgB%XEpbj|F-0Phu=h}x6Ru8FN7OEjQ{`u literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-36.pyc b/CDARTS_detection/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..325d4611ec0b064c78741bbb6b0fb21cc338092d GIT binary patch literal 1099 zcmbVLOK;Oa5Z+g8H;i2*!S~mC;520H+4tY)Fk|Q?cZtF#A&6`PP-lc`k3=Ap9 za$rSG1-s}N7IV#Fp}7|4ES6eog)6es9CoS+~T zKNFl);zTa^h%dwil_C0|5D_ZZwx-f%l+LoM4mmZ)+)ia!TqvEgCL1|5Lh{Z#{g%S9 z{mEbpzr8w)#$|5y(H6(^Sz1KHxlV0lr=^YZT${=at4Ud)Kl~a|f=L;T8v$%Yha}f; zd(*kwrt^{!lzBqXa&Xe4GKeJOUH<*iO2vEsV24yY4Z@ROYw#GIg}kZ3LX5;hUU0es zjS?-yg$g!s0rH`Z#wm?m#W3A4Cy@{kBiJNE^*81g?QRh?smdkwZ4CaQ{&1E04dn1Y zo41xS+{SZ5Su=(^L~$M2&XR(y;O<{U@2?|@sD9|VAW4ceH%a2!Ns^a(mXY60k}tC~ zYj%Ljh8;4LcSWRHE>a2tG##AQLlcNV1*(pP8@1>_a@5xlsEc(sT<3j0Eunf1P=Zqf h{FT6`P0F?I5Endc2tK3V&r<&aA@~m8 0 and cnt >= max_num: + break + item_list.append(prefix + line.rstrip('\n')) + cnt += 1 + return item_list + + +def dict_from_file(filename, key_type=str): + """Load a text file and parse the content as a dict. + + Each line of the text file will be two or more columns splited by + whitespaces or tabs. The first column will be parsed as dict keys, and + the following columns will be parsed as dict values. + + Args: + filename(str): Filename. + key_type(type): Type of the dict's keys. str is user by default and + type conversion will be performed if specified. + + Returns: + dict: The parsed contents. + """ + mapping = {} + with open(filename, 'r') as f: + for line in f: + items = line.rstrip('\n').split() + assert len(items) >= 2 + key = key_type(items[0]) + val = items[1:] if len(items) > 2 else items[1] + mapping[key] = val + return mapping diff --git a/CDARTS_detection/mmcv/image/__init__.py b/CDARTS_detection/mmcv/image/__init__.py new file mode 100644 index 0000000..3d2c98b --- /dev/null +++ b/CDARTS_detection/mmcv/image/__init__.py @@ -0,0 +1,12 @@ +from .io import imread, imwrite, imfrombytes +from .transforms import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv, + hsv2bgr, bgr2hls, hls2bgr, iminvert, imflip, imrotate, + imcrop, impad, impad_to_multiple, imnormalize, + imdenormalize, imresize, imresize_like, imrescale) + +__all__ = [ + 'imread', 'imwrite', 'imfrombytes', 'bgr2gray', 'gray2bgr', 'bgr2rgb', + 'rgb2bgr', 'bgr2hsv', 'hsv2bgr', 'bgr2hls', 'hls2bgr', 'iminvert', + 'imflip', 'imrotate', 'imcrop', 'impad', 'impad_to_multiple', + 'imnormalize', 'imdenormalize', 'imresize', 'imresize_like', 'imrescale' +] diff --git a/CDARTS_detection/mmcv/image/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/image/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..903329aa6f0915c2b464fc5bfb8ed5ab1972e4ca GIT binary patch literal 787 zcmcK2O^%x|6bEp~2M_{D$mdLZf^3SEZu@nDZrat`Ad=@0tA2oNl2Lk(?s|b<$=j}a zg;~`vtu&+Vi^LE6Z^?exex0WA`^QuHt><|^y=TwbwLiGme0QLSzVgTipL)=vKJyI$ZQSXyHb?gx=06u;!=fmaDvBSTe#uoak8kr+Lt^KwtLwR}^TQQOlbg0N yIk_F}#G#W-?b?S+E~%_UUKFyHT~WMh-353<4W5;F$5+SjMlJo%LE literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/image/__pycache__/io.cpython-36.pyc b/CDARTS_detection/mmcv/image/__pycache__/io.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f451bbd128c80bdd9f39b8fb95842d30e2ca9f9 GIT binary patch literal 2443 zcmbVO-EP}96c#1jKwn1EkK7q#dRkTM=nnsU#XC z-PWCRQ|~c$Jzy`e$Jhhtc2|3aUhNzzb?kP%nGBH>e-01N_nnXas?qT7zZr#p)-3C9 zYw2;J{XM+ucPN;}NMfCnz&0%>a7^t6C8+JBoK}Jg!C6jHO>03d^#U(l3)YCmE9w;b z-NZ@jK|O5*jdVR&Csy`3^p>FaT2MDD%g}BH4b!eb`#R3zb>86XtooZ1Y_RLhV{5;X zV3TdII%_~{i*2%NtO>0fjGS2h)<58@)%8if_T;3~JNmKHZ9m=hoxB;1c=W6{h!fu9 zFJq_82fbBNhPkV<)u;}3N{=-hO6uNJ%b}wwaKXhfx$xD$)gbZp!8BSFcCOoK3vS<`$ zL(XI!4j9>ZOn9(HoZ9fJQz*1Gvotxk=eBIltP@K+Giy%dBd7_~+uEIxkFCs}kqe?r zGeGscj5Fpov2^9UIH2E(-(1)&@)aY*Atm>wqh$(VCc&HAPI+byKw6VXe+A5Jcc%)ExYjoc7Ji{yo}XfY*nhG1^}s*uRY3v2k=Me3ud1 zqlZf;EaFtS{^}EmFhveiCSb!%Xlls3M}N6^<==(TyEL6Bjd6G{&Y@YEg(>g)Uhc&z z&Xf+bi05`T&MTL@OQcg?k5dV7_b^aOI@nbn2$_aDccSOJkUVBQubo1OI#LSh+rior z{<(`w|FGmuSo`Md)<~p$7s_lnorGB{nzB%})L5uinli3=q+^jmJ9^$Sd}zgDXFQdg zu=*CfstJYFs1uh|$rhA#Qn$_MA{EeYbhHhx+J!=cTtPBem<$*8Wdht;O#mQ2qymgP zGbB2B_16l^=|GBwr<6$WNht8X++Fl;B-x)tB%w zbL4FV2_Zstnv)9`U~=aKw8%X#%>cfcJvYFFFF}ERlCkDR;!`aQoj)K$3;9`KQpgRg zg4B2cb^x~}J-}J{Y+}r_==m2*_~Bl> zPa))74#Eh86SWPWaH2)eTt2`bArf?TVAKaTK~@b!!ay)Vfcoa%LQnedWd5ocy%=#r zF*s%fB*S27vkxS03>Qf<{gC#c18|H<6mg{nlLXHO@=|!~mvcub(1`F%p%ZxpZcG#s ziI^7rsl=%{s$Y_~p);s0T+Yiy&;sx3#Ca9(Tw?+S;)+r7Gx)p@Z^5L7OPY}Jrq?It z?t5F=-BbT&UJ+`?xa49f@1QZm=aRW)4Euv7>dH=mO)t(s$nf5TKPcEe(Q#5pT#*vy zNM*EB!2$^_;jfV;JuKF+Fj<4rWEgIoTS^RbAd?-{Z&Sf03I7hbYbo^2v#X?P*U6Uc SlABP&&#{S3s;>8u*ZddNn2n_X literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/image/io.py b/CDARTS_detection/mmcv/image/io.py new file mode 100644 index 0000000..c5b70c3 --- /dev/null +++ b/CDARTS_detection/mmcv/image/io.py @@ -0,0 +1,79 @@ +import os.path as osp + +import cv2 +import numpy as np + +from mmcv.opencv_info import USE_OPENCV2 +from mmcv.utils import check_file_exist, is_str, mkdir_or_exist + +if not USE_OPENCV2: + from cv2 import IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_UNCHANGED +else: + from cv2 import CV_LOAD_IMAGE_COLOR as IMREAD_COLOR + from cv2 import CV_LOAD_IMAGE_GRAYSCALE as IMREAD_GRAYSCALE + from cv2 import CV_LOAD_IMAGE_UNCHANGED as IMREAD_UNCHANGED + +imread_flags = { + 'color': IMREAD_COLOR, + 'grayscale': IMREAD_GRAYSCALE, + 'unchanged': IMREAD_UNCHANGED +} + + +def imread(img_or_path, flag='color'): + """Read an image. + + Args: + img_or_path (ndarray or str): Either a numpy array or image path. + If it is a numpy array (loaded image), then it will be returned + as is. + flag (str): Flags specifying the color type of a loaded image, + candidates are `color`, `grayscale` and `unchanged`. + + Returns: + ndarray: Loaded image array. + """ + if isinstance(img_or_path, np.ndarray): + return img_or_path + elif is_str(img_or_path): + flag = imread_flags[flag] if is_str(flag) else flag + check_file_exist(img_or_path, + 'img file does not exist: {}'.format(img_or_path)) + return cv2.imread(img_or_path, flag) + else: + raise TypeError('"img" must be a numpy array or a filename') + + +def imfrombytes(content, flag='color'): + """Read an image from bytes. + + Args: + content (bytes): Image bytes got from files or other streams. + flag (str): Same as :func:`imread`. + + Returns: + ndarray: Loaded image array. + """ + img_np = np.frombuffer(content, np.uint8) + flag = imread_flags[flag] if is_str(flag) else flag + img = cv2.imdecode(img_np, flag) + return img + + +def imwrite(img, file_path, params=None, auto_mkdir=True): + """Write image to file + + Args: + img (ndarray): Image array to be written. + file_path (str): Image file path. + params (None or list): Same as opencv's :func:`imwrite` interface. + auto_mkdir (bool): If the parent folder of `file_path` does not exist, + whether to create it automatically. + + Returns: + bool: Successful or not. + """ + if auto_mkdir: + dir_name = osp.abspath(osp.dirname(file_path)) + mkdir_or_exist(dir_name) + return cv2.imwrite(file_path, img, params) diff --git a/CDARTS_detection/mmcv/image/transforms/__init__.py b/CDARTS_detection/mmcv/image/transforms/__init__.py new file mode 100644 index 0000000..918e56a --- /dev/null +++ b/CDARTS_detection/mmcv/image/transforms/__init__.py @@ -0,0 +1,12 @@ +from .colorspace import (bgr2gray, gray2bgr, bgr2rgb, rgb2bgr, bgr2hsv, + hsv2bgr, bgr2hls, hls2bgr, iminvert) +from .geometry import imflip, imrotate, imcrop, impad, impad_to_multiple +from .normalize import imnormalize, imdenormalize +from .resize import imresize, imresize_like, imrescale + +__all__ = [ + 'bgr2gray', 'gray2bgr', 'bgr2rgb', 'rgb2bgr', 'bgr2hsv', 'hsv2bgr', + 'bgr2hls', 'hls2bgr', 'iminvert', 'imflip', 'imrotate', 'imcrop', 'impad', + 'impad_to_multiple', 'imnormalize', 'imdenormalize', 'imresize', + 'imresize_like', 'imrescale' +] diff --git a/CDARTS_detection/mmcv/image/transforms/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/image/transforms/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85a85405cf74e4dfa7f0d2b6daa83d7b376185f1 GIT binary patch literal 769 zcmb8tO^(wr6bJBpG)>Yr{cfj7Tp$u6QrRM4$qFH{^Ct4-c~T3%B(l>=ouhCAuH-E% zt}rY70-6~iu^>`@vHySod8Jl6oGMuwHF4$ri3)K==Y=vvK9{MA-!7bb3 zj_q*I_AZNH3^62-!UU!;gAC@dfF-P84I9|P4)$tn6? z)h6W8b!|+Uu5WGZcuG#@Vq?d%L>ZSTGbD*b*<6I~^T>`R-YMBxm+7|Wy`ZGhZD%8i z9S6G_9%bK_cvijal*j`+mDsczxsvz7W)eX>cJpxfmN0*Y^w27m{3>kfCK0CMN@({x zJRK4vA|l2_Oe91~Oo%BlBQj!6EQlqsBGv@`Q@wS3$`5Q(wW`&o9PT-U{6$KS1^#n* zl?1~Hr!g56EgdkC{EdGF3c=atE*xcbRL@K<->(Ew3l{a|vnhhNAn!Ev?Yho?% zYT&g@tZrg$h&8}#n^=?6pNRkP2KFVV5yb``Z1GHec-2ENnq1Hooa>sc$QJ}=u;sf~ z>0C(d9i(CuYTP?bxBbkyYq^|3i12 zC87s59W7>I+Km=GR9!XAR5wYu&?3^4ECoNBcTE;uEyGj|GnuGvl*O4;(=Za9=^}S0 zNpa{3qQk%tSGr(NBJ%+rVzLOE5I zf9$xtM;|h}rT0I%^fKuwErF>IDE$bz?0_xph@9PEZW~b8=yv3h#k2GY5Yds zks&n~p1Wq;Mr6w)RxGfMU@E7}MnV78x_7nyfU{3=r`^T%E3CfZhC(&%axLcuSB4#B zT$IvlZ4eC14RC~u+F%H6(&gcuzF-(eI+Kge(>MdIs%M=$-OJzmTXA1~v)+mO>f6o7 z|5gg69kCSrO74t=4z-pBmb{90Ua0fRY&sP(cY=UtQ4pa1l#FtVLp|;R1_madN`zV( zVSRQdqpK9gTUWMDzj3@kZ1A6R2G2NNrwp7^ce5S4V6OcxRh#$2sN znWm`B?A$>O!{pongY{QLV5QsvQ;Nh2BC*0qsd!Bp7q`LFB;&JKJj4GywGYOnuH`oB NpSW#zug-3N{{W9r^0@#2 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/image/transforms/__pycache__/geometry.cpython-36.pyc b/CDARTS_detection/mmcv/image/transforms/__pycache__/geometry.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63d2db1afcfc8b4f3c96afa24db3852dd10bdbb4 GIT binary patch literal 6201 zcmbtYO>f-B86JM^2Y2;hOO|CdZ3)#0w5cM+iqQmyVK=qoCMYTdv5^#u1Z#@J)l%9e z*TY#$Yj7`7g1Uzk?xlwwdMwaC(PIz&2|NYpA@!j^kYiAwecl=FhZHxd(Or<7FV4I( zeBS4MW?q|{b8mj#@i*2O`#YQY*+~BsSITkYj2m6nGn$4$X|roKP4%>zHecp8cRn$i z4!^)FyoywbyS(-ZYnJ&6uk$&SRQN?c&lixY@>Rab&mraVOZ+@vLaN5iyR5PH6eF>Q zkym*1FiN8&-p$=kBBDo0Ed6d^QZ;_`B+x>4WrV=wCY`=K`-sXO!%;XMq6i~_&g9d5a zl#6=%-bT!QA^c(En)i>NY_bOL)YF9Q3;(#<`{QN!x>`Oxk-GkMe0U5J9Mczv=A_VMN;> zbmghxBI!k`YF>0+bfzFV9XW;H&rI;0G?60dM}em++I}DtF>St=2p)=-nxnT7#k43t zQ)PPI)0X}~Cat|>5OZ&1FG;#+_(3OBjt}ne`N1HS$~?_UY4j-cP!S~XUR8e7NxI>w zDHUI5?D4AZ+d1OQ8}u1=A6>pbwe3CzjNyfWOuZw}l6POP*8Qi%y~4q59~XX~-&73J z0Eg>Ec%QyA_=I>xR~2b`Xx}Ll0Ma?%(+VT5g?ji=nsr2I@%pHXRL9CEl z!NZ+=X+M->-2Kv*B0Ao=Er>62%il{uQ?bk)K?|L`N51I4*=|R1*q9eCwQ&?u^OBxc z?x@e&tj~VOraN7a-00-SQC{k)fnY#_5nx=?iNmATk$!ZVLD2);IaB~G=?$AtE zB#2I$2NtM+9oQ7=h{g^aTqT4;2f?t!%b!|pkRXLgylbqo6O)7HK!-E+*V(D}j8pGf z-*|6Q5Bj1&UVZe}TNGON_L5`xJn-9w-;s|NsHV^ROcE;In2D=*sjauMwY9bBU48km zF@ZkfP)p%t$`2czFxv0PO<VCbV?b zSrk>NxJ7Lg1}N?^cLk8KjjQSsKSoJFK%pP`!Xz&K9vO*`exlXU3dO?{HnQS2fZ+lT zjMpwPiZ_T&Hga`UHa97AG8>R_R@pM+XY<4MEA!q(sOk#)tOE9o0tl(VVigBK`p)uVr%_ z9`vc1xmjJOc&QBv9?i#Jp%19aGkq+K7Dwks=b-{&?Gr<6WaqO*cs6>;&SeWa1%#wlvPc`{%O-?6@5C-sk1m&K7EHw45y#WuK_BMU-90 z>Y1xY9Ia$?S*=K4R5Pn(7mJy>*$Sof%(D6H0{*&gIa{9AA-q%$&Smo_hQS{E@dNOg z{wA>FU(n51U+_u=yaH}%xc)x@>06L(Mbzp0GU$Y9;a0DCsSNw+wL;O63l{$T$_?eE zvsvoIhh*#f5W&hT|L476fOk*r1&dL>8R8BR;1IK*cdu@GjZF{2*jIZ-gt4NR!lq}l z+9{?|+~5i7fWn|2bW<@+A&a##J;hd6wq8{NJ@w**=1TlD9Sa4mHqqEdJm~E~j>6P# zl#H<^+{LE1o5ZL{#HrC{$9Oi=QSUU$sSWOUDYTTBd{kmv&3Qyz zy8+P|{YbD9F)ZOqNo(!{)*n?y#$)z7^MQR=?_U)aVRvh8y_y>>I)WI_b}Hk(cCV8` zLDK8rIHBIC>k@mk7WK%ct?Tr`y6Fuh#?1;=HIxi!n7dnz^SK>!$U!9;$R90c)r**<3Y+C(MROy{&EqS%ImC6mgXd1}sJf=rI=+J2kZ#92l-g<5 zRF#gL&GK;z;X%k|d8iX&!u@)o4P}f_;u!mV^+{n$dJQ+WxJvtd2;fC?1roV#+K|LA zpv*I0FkEBJT!Y#niA*t}j`}wsp&_S1<#>Cde+UK>{qr|upnpsWflREQUY4Xo=v=_& zrArJfe_1+^&lYJU+);jF95S2_=r-V%wgDw`wuPB~NbenQ$&!9oNouhCM{7_$Mh2#) zzUX=Aq%pO95P!hbSpoH2NKJk(pH;V0J4pWUk1M4 z_4#B=PtNB8F2x|^c=RKO!3KDi9n$c@ zAjIx%%n)iTFr^3>I_}FCp+9EkZ{NFWE61B%8d!;RD4G}XjUmj*@dfD?b!)idw3mrD zsA{U%U#G0vH#$nTYgcu&^H8%K9-(F?Zcxrb?yPWeq8>{Bgz9t!H&!F`o`+sp!G8_U zim`5pw^4HnouprG;Yz7S7H5o!woLs3IZBWhy{cS3Q` z^0%LXjWA9JBJ@H(=y)N=X{RRZ9?nN19Ij9{#TlHZsr~#t0mNt%mq~E4!&L?(A8So2LSnd)t=kp7kf;PBOBRC4iU5g;g$1T8$iyZD#c^c2NI_wOpW&ChGVu#c zoKsYRgynlapY6N%&d0M^diM3e6an~vUxT85j%JP!IB?QHL3qGv6G&Q685SV{zQHFv zdLzYzr#$8fdJ!krkWD}F6%Su-&J~{t2-AXEO2vEhW-K$zXoU*XyyQ6fJd<8 zF?b~}Fi<_Wbb!%^fy`kD_5kyft1NU8Gq&%93);@b_l;uqc%eOSJfpR&Tqp%=9W|C0 zkyW=?`yKQYFMM6*4@!y!Vq5h~*5+l;naNG3OfDrCR+P3@E&AnCUQ1Sq+-laEd!?nx zTSV5>&*HG_-L#hLedI+X2tYPTND`uF=>M;EA5R9T5E-^j|Vt7|F?OWF0>Q+8>o8l_FgN-;KYEIV@7 d{>`sCzI%EsmYhh%myI~}e{6i3B-o~D{0)&Aft>&V literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/image/transforms/__pycache__/resize.cpython-36.pyc b/CDARTS_detection/mmcv/image/transforms/__pycache__/resize.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dcdd3378f8aab95434f029b82064233e191c664e GIT binary patch literal 3286 zcmc&$-EJF26y87E>y4eJp`wHeX;gj`OHqOX>P1vhO4OoARgjVZl>s?x&)DA7yKBvk z6OyiPNRhbV4f+DyafNu6y+Y!uuh1*LGrMu@)T)Yvgsqu9XJ+>7nK|D%-+WT9d-q=U z{a@;a@wYMcD?|PqEqRQN8{8Ziq1iM|lC6Q&w7AWkXGYT&){%o&LMx+H&|F@6UTRkP zB6oT9nc4LCHSX~RNDF+4*LWRLja&OhKC*jJd`hZ;mZ*7Z9omW8!Fp++*AGN?^` z7ewlZEv6me`Wv~`9`=OaU-ueTW@AlO%8fgjLylx6{YGYbjzv_Q@tgn$VUMjw+?UcnMGz1AcsUx5 zRBmD_*kpDBgacppgkq}%h{ok2siU1ABwKCA@c<^j?F-cxlDx_1+2KjP8 z>|7$C#tZ5i4zS<%ha$5($D3I>|7=#vm$y1G7fGXvoC$l9pqAC9Lm{tYNV?Qj&@~sP zylz&CU^FX*$2zU@&LdN95a#ROgO)6zGZvRk$6P{lteROf`w$Kn5gLP^1GSn@4AbW9s%c@uK8QXH0ihh}EcbQLc33%Ilf z!4q)@FXWZ9zHG{Op-pf?WtFh9O-uenH?J(BBwEVuTIZI$i{fWLG36cQaObE*YW90ANbm=|m2jJKzLJWx#3C4ah1QWf6 zYUhA)ekGowU8+Wxj9S!h;;~C2rX`{OV-SwQDW=U~&Dc(t1*r9?n@m$Ji$-Bv$Te~~ z(thVuauN&%tSyiT1!?2np!z{FQ`vInUt?mBfJH7XYY!=VH^D7R^n^_2eqhGf^v94K z1IkOv67YHwv~jp60UXGE)IMF(#~Ws=E-tfY)kX|aN@?RW&P=v_r7p~r>kFd#clkUa zlLLTc-gkC&T52vhdw(Ya&jq+MX%IXeud#5HD2fcuB~ak{^|Q@=J$W(V7cre+g(3mR z*H{}g--{J{I&PRcs~dHl`u z=z=J#9-Iz~HI9O)Z%=tKSOlM`bAB`SF2G~S0g%)FY(#rBXq{nvbkuwrUm9In;s+=`m$w#&uWks@pLWu zAyzi5{3`d#vz~daR(FKEt!T9*k+-}-9o4j~zO#GqV6V0N;Qro&{e!HQtB-bQAX~__ z?XSPs+0MLN!Mgj|VzKQ0?soI5{ZHlFWRHkdS558C)aK|;DRX+h8^(Mz5TDUKAt5hn RmTSA$K+gK_Io^tQ{U2T-cQOC~ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/image/transforms/colorspace.py b/CDARTS_detection/mmcv/image/transforms/colorspace.py new file mode 100644 index 0000000..975d14e --- /dev/null +++ b/CDARTS_detection/mmcv/image/transforms/colorspace.py @@ -0,0 +1,77 @@ +import cv2 +import numpy as np + + +def iminvert(img): + """Invert (negate) an image + Args: + img (ndarray): Image to be inverted. + + Returns: + ndarray: The inverted image. + """ + return np.full_like(img, 255) - img + + +def bgr2gray(img, keepdim=False): + """Convert a BGR image to grayscale image. + + Args: + img (ndarray): The input image. + keepdim (bool): If False (by default), then return the grayscale image + with 2 dims, otherwise 3 dims. + + Returns: + ndarray: The converted grayscale image. + """ + out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + if keepdim: + out_img = out_img[..., None] + return out_img + + +def gray2bgr(img): + """Convert a grayscale image to BGR image. + + Args: + img (ndarray or str): The input image. + + Returns: + ndarray: The converted BGR image. + """ + img = img[..., None] if img.ndim == 2 else img + out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + return out_img + + +def convert_color_factory(src, dst): + + code = getattr(cv2, 'COLOR_{}2{}'.format(src.upper(), dst.upper())) + + def convert_color(img): + out_img = cv2.cvtColor(img, code) + return out_img + + convert_color.__doc__ = """Convert a {0} image to {1} image. + + Args: + img (ndarray or str): The input image. + + Returns: + ndarray: The converted {1} image. + """.format(src.upper(), dst.upper()) + + return convert_color + + +bgr2rgb = convert_color_factory('bgr', 'rgb') + +rgb2bgr = convert_color_factory('rgb', 'bgr') + +bgr2hsv = convert_color_factory('bgr', 'hsv') + +hsv2bgr = convert_color_factory('hsv', 'bgr') + +bgr2hls = convert_color_factory('bgr', 'hls') + +hls2bgr = convert_color_factory('hls', 'bgr') diff --git a/CDARTS_detection/mmcv/image/transforms/geometry.py b/CDARTS_detection/mmcv/image/transforms/geometry.py new file mode 100644 index 0000000..ad136d5 --- /dev/null +++ b/CDARTS_detection/mmcv/image/transforms/geometry.py @@ -0,0 +1,203 @@ +from __future__ import division + +import cv2 +import numpy as np + + +def imflip(img, direction='horizontal'): + """Flip an image horizontally or vertically. + + Args: + img (ndarray): Image to be flipped. + direction (str): The flip direction, either "horizontal" or "vertical". + + Returns: + ndarray: The flipped image. + """ + assert direction in ['horizontal', 'vertical'] + if direction == 'horizontal': + return np.flip(img, axis=1) + else: + return np.flip(img, axis=0) + + +def imrotate(img, + angle, + center=None, + scale=1.0, + border_value=0, + auto_bound=False): + """Rotate an image. + + Args: + img (ndarray): Image to be rotated. + angle (float): Rotation angle in degrees, positive values mean + clockwise rotation. + center (tuple): Center of the rotation in the source image, by default + it is the center of the image. + scale (float): Isotropic scale factor. + border_value (int): Border value. + auto_bound (bool): Whether to adjust the image size to cover the whole + rotated image. + + Returns: + ndarray: The rotated image. + """ + if center is not None and auto_bound: + raise ValueError('`auto_bound` conflicts with `center`') + h, w = img.shape[:2] + if center is None: + center = ((w - 1) * 0.5, (h - 1) * 0.5) + assert isinstance(center, tuple) + + matrix = cv2.getRotationMatrix2D(center, -angle, scale) + if auto_bound: + cos = np.abs(matrix[0, 0]) + sin = np.abs(matrix[0, 1]) + new_w = h * sin + w * cos + new_h = h * cos + w * sin + matrix[0, 2] += (new_w - w) * 0.5 + matrix[1, 2] += (new_h - h) * 0.5 + w = int(np.round(new_w)) + h = int(np.round(new_h)) + rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value) + return rotated + + +def bbox_clip(bboxes, img_shape): + """Clip bboxes to fit the image shape. + + Args: + bboxes (ndarray): Shape (..., 4*k) + img_shape (tuple): (height, width) of the image. + + Returns: + ndarray: Clipped bboxes. + """ + assert bboxes.shape[-1] % 4 == 0 + clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype) + clipped_bboxes[..., 0::2] = np.maximum( + np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0) + clipped_bboxes[..., 1::2] = np.maximum( + np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0) + return clipped_bboxes + + +def bbox_scaling(bboxes, scale, clip_shape=None): + """Scaling bboxes w.r.t the box center. + + Args: + bboxes (ndarray): Shape(..., 4). + scale (float): Scaling factor. + clip_shape (tuple, optional): If specified, bboxes that exceed the + boundary will be clipped according to the given shape (h, w). + + Returns: + ndarray: Scaled bboxes. + """ + if float(scale) == 1.0: + scaled_bboxes = bboxes.copy() + else: + w = bboxes[..., 2] - bboxes[..., 0] + 1 + h = bboxes[..., 3] - bboxes[..., 1] + 1 + dw = (w * (scale - 1)) * 0.5 + dh = (h * (scale - 1)) * 0.5 + scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1) + if clip_shape is not None: + return bbox_clip(scaled_bboxes, clip_shape) + else: + return scaled_bboxes + + +def imcrop(img, bboxes, scale=1.0, pad_fill=None): + """Crop image patches. + + 3 steps: scale the bboxes -> clip bboxes -> crop and pad. + + Args: + img (ndarray): Image to be cropped. + bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes. + scale (float, optional): Scale ratio of bboxes, the default value + 1.0 means no padding. + pad_fill (number or list): Value to be filled for padding, None for + no padding. + + Returns: + list or ndarray: The cropped image patches. + """ + chn = 1 if img.ndim == 2 else img.shape[2] + if pad_fill is not None: + if isinstance(pad_fill, (int, float)): + pad_fill = [pad_fill for _ in range(chn)] + assert len(pad_fill) == chn + + _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes + scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32) + clipped_bbox = bbox_clip(scaled_bboxes, img.shape) + + patches = [] + for i in range(clipped_bbox.shape[0]): + x1, y1, x2, y2 = tuple(clipped_bbox[i, :]) + if pad_fill is None: + patch = img[y1:y2 + 1, x1:x2 + 1, ...] + else: + _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :]) + if chn == 1: + patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1) + else: + patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn) + patch = np.array( + pad_fill, dtype=img.dtype) * np.ones( + patch_shape, dtype=img.dtype) + x_start = 0 if _x1 >= 0 else -_x1 + y_start = 0 if _y1 >= 0 else -_y1 + w = x2 - x1 + 1 + h = y2 - y1 + 1 + patch[y_start:y_start + h, x_start:x_start + + w, ...] = img[y1:y1 + h, x1:x1 + w, ...] + patches.append(patch) + + if bboxes.ndim == 1: + return patches[0] + else: + return patches + + +def impad(img, shape, pad_val=0): + """Pad an image to a certain shape. + + Args: + img (ndarray): Image to be padded. + shape (tuple): Expected padding shape. + pad_val (number or sequence): Values to be filled in padding areas. + + Returns: + ndarray: The padded image. + """ + if not isinstance(pad_val, (int, float)): + assert len(pad_val) == img.shape[-1] + if len(shape) < len(img.shape): + shape = shape + (img.shape[-1], ) + assert len(shape) == len(img.shape) + for i in range(len(shape) - 1): + assert shape[i] >= img.shape[i] + pad = np.empty(shape, dtype=img.dtype) + pad[...] = pad_val + pad[:img.shape[0], :img.shape[1], ...] = img + return pad + + +def impad_to_multiple(img, divisor, pad_val=0): + """Pad an image to ensure each edge to be multiple to some number. + + Args: + img (ndarray): Image to be padded. + divisor (int): Padded image edges will be multiple to divisor. + pad_val (number or sequence): Same as :func:`impad`. + + Returns: + ndarray: The padded image. + """ + pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor + pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor + return impad(img, (pad_h, pad_w), pad_val) diff --git a/CDARTS_detection/mmcv/image/transforms/normalize.py b/CDARTS_detection/mmcv/image/transforms/normalize.py new file mode 100644 index 0000000..6e450a1 --- /dev/null +++ b/CDARTS_detection/mmcv/image/transforms/normalize.py @@ -0,0 +1,17 @@ +import numpy as np + +from .colorspace import bgr2rgb, rgb2bgr + + +def imnormalize(img, mean, std, to_rgb=True): + img = img.astype(np.float32) + if to_rgb: + img = bgr2rgb(img) + return (img - mean) / std + + +def imdenormalize(img, mean, std, to_bgr=True): + img = (img * std) + mean + if to_bgr: + img = rgb2bgr(img) + return img diff --git a/CDARTS_detection/mmcv/image/transforms/resize.py b/CDARTS_detection/mmcv/image/transforms/resize.py new file mode 100644 index 0000000..9c4a064 --- /dev/null +++ b/CDARTS_detection/mmcv/image/transforms/resize.py @@ -0,0 +1,107 @@ +from __future__ import division + +import cv2 + + +def _scale_size(size, scale): + """Rescale a size by a ratio. + + Args: + size (tuple): w, h. + scale (float): Scaling factor. + + Returns: + tuple[int]: scaled size. + """ + w, h = size + return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5) + + +interp_codes = { + 'nearest': cv2.INTER_NEAREST, + 'bilinear': cv2.INTER_LINEAR, + 'bicubic': cv2.INTER_CUBIC, + 'area': cv2.INTER_AREA, + 'lanczos': cv2.INTER_LANCZOS4 +} + + +def imresize(img, size, return_scale=False, interpolation='bilinear'): + """Resize image to a given size. + + Args: + img (ndarray): The input image. + size (tuple): Target (w, h). + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos". + + Returns: + tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = img.shape[:2] + resized_img = cv2.resize( + img, size, interpolation=interp_codes[interpolation]) + if not return_scale: + return resized_img + else: + w_scale = size[0] / w + h_scale = size[1] / h + return resized_img, w_scale, h_scale + + +def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'): + """Resize image to the same size of a given image. + + Args: + img (ndarray): The input image. + dst_img (ndarray): The target image. + return_scale (bool): Whether to return `w_scale` and `h_scale`. + interpolation (str): Same as :func:`resize`. + + Returns: + tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or + `resized_img`. + """ + h, w = dst_img.shape[:2] + return imresize(img, (w, h), return_scale, interpolation) + + +def imrescale(img, scale, return_scale=False, interpolation='bilinear'): + """Resize image while keeping the aspect ratio. + + Args: + img (ndarray): The input image. + scale (float or tuple[int]): The scaling factor or maximum size. + If it is a float number, then the image will be rescaled by this + factor, else if it is a tuple of 2 integers, then the image will + be rescaled as large as possible within the scale. + return_scale (bool): Whether to return the scaling factor besides the + rescaled image. + interpolation (str): Same as :func:`resize`. + + Returns: + ndarray: The rescaled image. + """ + h, w = img.shape[:2] + if isinstance(scale, (float, int)): + if scale <= 0: + raise ValueError( + 'Invalid scale {}, must be positive.'.format(scale)) + scale_factor = scale + elif isinstance(scale, tuple): + max_long_edge = max(scale) + max_short_edge = min(scale) + scale_factor = min(max_long_edge / max(h, w), + max_short_edge / min(h, w)) + else: + raise TypeError( + 'Scale must be a number or tuple of int, but got {}'.format( + type(scale))) + new_size = _scale_size((w, h), scale_factor) + rescaled_img = imresize(img, new_size, interpolation=interpolation) + if return_scale: + return rescaled_img, scale_factor + else: + return rescaled_img diff --git a/CDARTS_detection/mmcv/opencv_info.py b/CDARTS_detection/mmcv/opencv_info.py new file mode 100644 index 0000000..521ee0f --- /dev/null +++ b/CDARTS_detection/mmcv/opencv_info.py @@ -0,0 +1,12 @@ +import cv2 + + +def use_opencv2(): + try: + major_version = cv2.__version__.split('.')[0] + except TypeError: # solves doc generation issue + major_version = 4 + return major_version == '2' + + +USE_OPENCV2 = use_opencv2() diff --git a/CDARTS_detection/mmcv/parallel/__init__.py b/CDARTS_detection/mmcv/parallel/__init__.py new file mode 100644 index 0000000..cb20d1a --- /dev/null +++ b/CDARTS_detection/mmcv/parallel/__init__.py @@ -0,0 +1,10 @@ +from .collate import collate +from .data_container import DataContainer +from .data_parallel import MMDataParallel +from .distributed import MMDistributedDataParallel +from .scatter_gather import scatter, scatter_kwargs + +__all__ = [ + 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel', + 'scatter', 'scatter_kwargs' +] diff --git a/CDARTS_detection/mmcv/parallel/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/parallel/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d96f3b8840064c409b6185249b886a36172123c GIT binary patch literal 472 zcmY+Ay-ve06ou_1P12uKtUN*&BEgEF60Ai?FnKX1*GePoq_SO5ABk7;%ET*l;@Y$- z*z(cmWBZ<)TvgTNcE1)cl#nm-XXMCBT=p3S2v9{dVT1-w6x9h!BBv_V8OtJPYNT_P zM;@s{m#oxdHpYDp1(Yy`2~69ZO(*jj6lPByL^dgSK4K4#aPcYWtLzu4@2d94T7DtQzBq76l7^mm@6CH(;+ Cp?y*S literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/parallel/__pycache__/_functions.cpython-36.pyc b/CDARTS_detection/mmcv/parallel/__pycache__/_functions.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68cfedd1b31d37e5a5ffddba1ed314d2d461fd3e GIT binary patch literal 2443 zcmZuyPj4JG6t_Kpb~3w3x1mZmX@M1pDqEyY1c(EaQWa389wKO36-88|>3F)C{o~o# zo;C@y7dBGT144WZz5r)FLk}E$QOJ%k0(0u62&~dK*265tWItb(nr#-6437yhQ zoM$TzkDhJJrr9{hCba#3`L^Vo!1V z9-Q;yg!TO?nYg9fuT2T}2Ch5;+ukY>jMIsy-G0ztIH9;QrN3-aVpqb_gIS9b_%5il z+~>8Q*~EPaV0}mKk?+absC2<8PY^Ly(pA8%q$gyJtdbr* zan8n5=U27|clwQhLu#tB-(iMzHx11?t_hRfLJT669j2zn)2CUID&u8hTxi&DeWxVG z$gA5MyZI>n2&Cv86tUPy4tT6K)HqigqY+Pan&>PSFixIsjAI!Ohv{%5dQu3xuDauc zSK*CergV~z#y6MnDflj*9$uMUtb1daC-G3-gu&@Pi5T36+c!imqQ|gdkcr-5^MM$M z{F!L$gK^saankK98E2RZ6J{zCO2;Bejf(Vg z#V8#bHz{~*{0ar*SFbk<-|Q#p7@ubRC%GKO+PG-6<4edTA{S89L6}-zXuwSwzZ%F@ zR3UgBSEBo>z4j~oKsHc#Ah4^eAczytB^J^aYtkljX@dr=Mcecmb(!5cPox@b#*@B+ zuKom}AtW?~P~X+8gb)S^!M8>x&i?O^9Cn7u@#wROyZ?!X1hKPq4E++qyL2v*(z^^H zRMOJLKq^`FKXoYC`}85%TYpGI-`epYnV9^eBsQ7oz>zPshY3_du-gP$hXlfv5R%R> z{eHcKG(!!f66rTeA4iZlq)H*NtQR@!FX;<-_gUHy;1g%dM!*{!-jq#vw!Dbq0t)06 zv#bt8vMX}|5j`UxiD@G*p;$q28AS_(SxO2iQ*p{trv5C>=95L4Cb{INhpgdOS5Tm% zIyLzYj_2O59E@Huv~TL8I1{H|Vg%SQs2u(-Oy7pC@I*pG+N8^LiMHqpxVr)45N0b( z;=VZpAYbA+*4-Qib9ZmFU=6avDcvbnOs@iY?rOA&wzi=wj9N?f4J3QCSrK|gD^%j< z4wG0`ZAe#fW^uCYvIq(rGq}+whwsl`)&(ki7EAs{%0<5dWg&c)bZjUBffi7tI-aTP zSfcxS<3d#&cpZ0IO2fcDcL!6O+4=%>3UijQfV#kJORNE-20OfP?p9l1C5R#skJ2bI zVHAyWUJP;Eh@!nB9?mokAbOo8qg3zaTw*A^X8R0;4?*!cbX#u%CUM@OTb;m0rnzb> xLsy9IY1!(Yl`V_#844B@Ao$09D0lXtH(4~njq1iXv3e=wX?Uj-wt|bn;=ex!Egk>> literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/parallel/__pycache__/collate.cpython-36.pyc b/CDARTS_detection/mmcv/parallel/__pycache__/collate.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50abe62bae7f5391d935ca3fa82848efe0c9a349 GIT binary patch literal 3203 zcmcIm&2Ae<67KHd&yb>iEGznR;0z4IYZ?ya#6b`kitJ>aAUW7EAYla%Fz7M6MGZBa zp}Lz6r3@E~(1{r!3A9K~Ipq*6uuqT|@M#}mu!jZqZ|Jds^|Ljbou7^K~o<>#|fuCsV8q*r{+IvTkTvu4`b#RI>maC z5^nLFZ+A-d5{!#H&kK)fz0Bz*soKv#54l^V@hlfRZqL_^X6XB_7SAXw!2I-WSG#`< z1MPZ&kWGg2@JvCop&=mkGxVB_h^B2eWa@QoaoWy}NIO5IL;5AfHO^LuE)3a`B{y_& zM22)k7h!gfsax9qlD5czB~tB@{H6b%E_0U24XxYc{>6b6vGx=?^BD9+R%DIn68Q@B z-Ue+$O3D6%ZLkq#dMc^DX>|F!+-8cRT{&X%wpNvq&=f&+jHR0 zzYt$KcZL`DO0fTL>Hqm58RR1H)$BO4mph_r4&}LxICVK&8B#>$0RG0$EC?6GgX_>5(Bf7t_P5M9K`bnSfe?2Jn*U^Wu{pCd4#gp1VSFsIR zhjfntx^EF3==>IR-kONz@}Qs>GW#ne*^QWnESoi`{;`eN~3gzqndA}^ng z0-ss{Jth*R{)ZE-Ul|lKD<^F)lc>a}qQU}6qr&z-*(Q3iQoZ>%|bw2&7oHAMUR4?-x%;Wv1+ay@;-@urk%FRn@ zZqB|+kHPbw$=NxyHVu0WLkxp}BW=AhBHxoi`3c>jd=B*5Q5oZQN_N%o>BlDoq!>TL z=O5ci&Zm+~$g@o#Jy89u+XPDA>uKc(x4G+Z;I*^k37`M`P|dH zPT12zI^64kgck;72cCzn1}(}v6t%LM_^1yGxbs3CaZokqj_P&0q14V!D2?}2F+3hvLz&~)_ceZ3tTy;dP zRlDkR1h|F187ps}lKa^86$mQrHuqlG=5^ebP-zI1*?awe?M+oBHe1?x_%Q5?WZ&V! z7X!yrP7vzMYr$(0>nWaqd9aLP>D+{-=Amnmh}kF9&ann7&xwad2#M{sZt%IH$tThj~tJYLERBN3M7g{tm92bn6kJhpnucZaP z);)M$UiUqvn_;KB@dhG6SOs!;E)!Yz!zRRRqc)b?0#tSu`@54Jn#&xr2XHAR?o_FX zfB{}*vIrtEZH%3fC76+A=&I0CBobrvf7y@~RC^q-WTDbCb^~ z@rKF*DoC7?cq>jac=OmGJ-pU+c?0OA9JL$vfPoK%BY1# z>($>2)ThUwPKOx-b{5wbp)ZICF!H;9FxvelTRIvQrVtWf~S> zVBlAfS|hTOX5;vPGT4*@U~5ktya4hZXvlbR>1AGUubJ@Y1Vtd#XJHq-Fw23xc~dLp znPd`OGkFN~JKp{oOeSX7B>El8RnNs`Pk8~BFWG2e z3RG98B%YOtQN3u)3LaB0&IPEi1ezLZ*98wQq8O5_K7+6XZ=_i!FB=OQT)+w_c#@%P ztT`Z`@@ zS74Nb^*Q+EU|)f*T8vgK)N=vm=cx^Co>o}J%5nlr8hE<|FL?pF`dJgnS`gHBdVwj} zt{<}?h|A;u@((C^E|KhM&sR0P8BFJgE*CP1?+nhuCw%(3ZjRd+9w+=bzhis76Y`!f QHjs4+V`4KKTt>M61te1Bq5uE@ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/parallel/__pycache__/data_container.cpython-36.pyc b/CDARTS_detection/mmcv/parallel/__pycache__/data_container.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26268b21e88526626df15c93c8388831fd2b58fd GIT binary patch literal 3096 zcma)8O>f&q5G5&+q7>OqoW%KTyG{BL0jb<30eT72rfFTEK-8d3QNWiq&0SekC{o>} zY{*eAk%8vaOMgZG60be^FXYrWOG%Mj8wiD6j+Q$!Z)e`l?){aO`ZsU8;e(oG{c9~e zF63{~^j&nq5_V#3mv4L97LKSKS=-K!mT-l4WC>4JCY5bh*uPp${}>upJF@A8$3Y`& z6*PSvU2gU4H`c`d)9Tm)wvK{{{mLGbZlxVzf_X^INBam(KSGyVW5Bou<=p92CXRlP zyTa*t(^0=yePi{i(|^IFGO=I7&Da{-f0d>cAv(PE@)hfbnxz>F^IXNd!(1|qbuz{B z(SYP;t*FO3PIVrpkt|#h=3(LGnTooF|6HazQ^o4zneq=x;lB_4ewY_E&Z8vMn)4#y zJPrGjB(gxpn@-_|YEKvbivxN$?ZWL0)94@T?bc>D>&tuS)4kC!OgE#E2=%5OWO}pT z7c!So9%m`!Xn%7MsxV1pvPp({l%;tXr&6^BqoR7C!ofhQfZ|4-<*eJjebYX?4Omj; zJU1)ggs1f|$)YgPk6KeL0IU(9Jj)VYcr>b;j#_~kb)LFabWOV)Sc6=w|KBR5O;gG( zXQQev;yuqQ4bG|c8h@k(I)O^U-mZx2!V~_H4I))V0D^ha7d25o0$H}Jkk5&Rkpsvp zV%5ks$ZO)fk?W8zh;<{MgM3k3GIB$%h|6Nb$g7aAh^t0k6W2hx8^y}^2;-Ak6vMy4 zy~k`GRYQ$19hLORIxO4mAsM<=uQNQ)M}rtKOz_gOrVzkjr z;(iRSK^6E2-d8R57;t9TFkiAfBY1>KgP=^uhq9b}4|YKd%|a!aF$HXQ(qVrv!jK9_ z48F=8(3~#+cc7Iz-b>leQWSU0@^+p+rO{_ozV49gY+tH~RtW%F8jy>qu`>B?VNsY8 zRvBZmVhBE=?jVxcL7eVQF?@m80wHYw`0Tg4Y!FgF->n(rq%~h_8F)$#jrR?LB@@VC z-pz)2nL+rOPD!MR9&Al1zz}w%ff6~uK45W9Ajnxi6Ec~q0S`w;k)eT%;!ceC(iDVs zw_FnnM$S6&;9cG^BV!_?rHGmTEmOdbsW3*g#pt28hxPMP6I!Q6f@Q!@pp>`b{q zQH|3$=lnLz=mt8=_fg;I*RT&amN?j&O`ab{v{8H8&vc193B z#wWTX%z|)&1~Xc{pC7uToZ*KqBi}9fxwsr`L+bZ)a^q|#44<|ZoZMJ;vb1I1&)ElO zJG+HXKP@;r5xLnmockgAQ2Uv5i1$R(9&T@A$6T{O6{ptg-*)q|kYa2r*$>Ceo>CcQvfW`EMPtMJQn Y%6vV|K@mJG*Y$|vYIbVi1eKuiAKOCMmH+?% literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/parallel/__pycache__/data_parallel.cpython-36.pyc b/CDARTS_detection/mmcv/parallel/__pycache__/data_parallel.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4b873ea7795fa4f220f36179a541502c07ad69d GIT binary patch literal 597 zcmY*W%}N6?5S}F4TIyC%#CIr27xXGBsCcpn-u4p0PHb2ArzWXV=}oUbh_AF)PriaD zXZnM6V7^R}nJ?c=j(ffC+0WQW0q_MI7Eydo?hgqNP!v##R0u$cs*t5hm4Iu&2wU%f zEi2ws8e#hm`di;r0e#WLmnN87<4jT5;+r1e!yTWQ5UkUWFUF00#<^>g6r7X$eF6)q zKu9GDl<$x#RKy)MJ2y9*v>EGUgbe!+F zfH)&>i~FXXSCeV*O?#=V>7sqkGplp-pWwDB_2()X*QGrotVXk`sgi7l#wUJK`=l(< z2AhSvt|-o)lgX;{1ZfOF%O|9uTsDMOOi(Yi9F)w=0wUDh}) zIF7Y`nwnyH<9bB0(6|9HpS1|M&BZ$%x!4c;t#mikE*lT3YOq%3Sjpf2j7%6?x11A` YY!LG-9J4o_lck@mF}rpQIT)iDEXc`9unmSGrWsAQq#e!7uDW{^ zYgvI|1wJK^pOL@R*PQehd`eZ%uB14z37KW8pHo%+)z?+~u;1@||NbD2B0~NoR~{ew zPvF(hK?ovfL6WAWIZHe@O_#}zI^J{kV*|Y7jHfp?E8!5I^lm3*dv?yeu(3h9zn;^3Q1U}e$%U3^wjv7>Hhj0<+mnFl&8t({p)_2NXVUh#KB+Q3g;SI|4y>V?+aHB|xH^1>AmbA*->1;( zmmuEk7=)QE*fx3n-W+Cp-?-bsV);B=0tnS%v}m zeZl{**<(YDjTGuLG~>iNs@UXcj9=K|*(5EbhMgO*+#pD=(}?z{PdDi7;U^IMn5GQr zjL(X{4$?E206<9&nP5S3Z$SV|itsI{pohM?2WtWHIu8+AWUzW`qP#8GAyLmUJDfG& zcNSzDzh>1sWQo;mcQp$-*;40QrxR{Tqto4naL0vU<~-g3e0K|L{?L307mKzJf;GLh*MRsM|1i7v80NanJf_&s|>I(Izz#h>{r_9;Zbu zwe<>FsV`8|0EN5Sftjf6z;-!5hAaWsl=Gy=;fh`)j@B}R#9l+-ev@dpGoV$Bsy^`S z8qA>`jeI0@4Tu;59Yflo8*JT;W_SJ@nAs|SV~f|@@eqUCTx`snIdJb+2rlHvn7XtD zu;JdO?>Byj<7!$PZNsKW*p86L<4p3g&>#43Tg!TK4|F(m37y^gxWCKY7coW-qPsuh z=R*+SqRT=Ct_@zAeY)@3E@X|S~M#zkvAkm7BnY#06i$`P0h$8kc(eB2_eI9Dokyzs9`B^Ni1T74%$%^ z4I-seN_x$R*JfO3m$IAWjd8mC|FEB$11PNs jk`14^g6a%kdfW6|8jc<}5Nshv8oiEy;2pZ|^}N3T!3sKR+a<& z5w<)8;DpnZoS2iPG-D?YCENk-0%sO?(^}@8c)-_qjeD2$q|O_>&V67$r^lqd{t}wV zQJd=eaTF>QiU5o%-L9+au>)IxuRFB?{Lc5rT?6s`*S)a zGbrcIbNUS+%(&;*3-v*gmt!RZRRk&&gIEQFax6QIMu7EX7zah3P6D3v`>}}gC=U9f z$bv#7gCq~rV6Q*UqrFpE`Fa{8R#V7%Ajf4{2o>}TVRh}PNg3~*{*-0W(@q(RFiqpM z^U9-6JC-vtFE)!|*Rk(n%Rd1WnG;1vY{tZnaz^gl;bczdWab`McQxERnv;{l z1Bn+a5-+U8rm=KuVf5Clvhug&73m{W+`07U&Y`v5c8`P!!7FhvdA4!b1Tr8~@~wY? zm;b_kz}tT#q6TuCY1X-~>s2BpipD9vAK9y$FT2Aciyr{ygUL9|yU~P)vMb9%cC(Df zDvnfA%n@o+(M~ za`b~R=5-x4_AC?}TE=A>YbT9!?HWI|bDosife+wMBBfnZg}RogIFoHA(9i@y8mqRW z9iC)*eZ_6PUInseI-*T2vsiqfqP^U=!+8xUpAV8S}cnP-(oE29WyZJB<2F1i&K`D6`DzVL!%}zXME( z*i?X1Hg#rB&IaUyOsUufjjqtBvI7H<&gdBt-%ZJg*>OUYQ&B69dni{^vNK9a{`u++ zq<)>EQO*3jtfWj6SdFVv$~B|ug5ZP}v=Th&qfs%SK3Swcc$TQ)>NE+?kn4fOOtKXl zipf73<=DJ 0 else None + with torch.cuda.device(devices[0]), torch.cuda.stream(stream): + output = output.cuda(devices[0], non_blocking=True) + return output + else: + raise Exception('Unknown type {}.'.format(type(input))) + + +def synchronize_stream(output, devices, streams): + if isinstance(output, list): + chunk_size = len(output) // len(devices) + for i in range(len(devices)): + for j in range(chunk_size): + synchronize_stream(output[i * chunk_size + j], [devices[i]], + [streams[i]]) + elif isinstance(output, torch.Tensor): + if output.numel() != 0: + with torch.cuda.device(devices[0]): + main_stream = torch.cuda.current_stream() + main_stream.wait_stream(streams[0]) + output.record_stream(main_stream) + else: + raise Exception('Unknown type {}.'.format(type(output))) + + +def get_input_device(input): + if isinstance(input, list): + for item in input: + input_device = get_input_device(item) + if input_device != -1: + return input_device + return -1 + elif isinstance(input, torch.Tensor): + return input.get_device() if input.is_cuda else -1 + else: + raise Exception('Unknown type {}.'.format(type(input))) + + +class Scatter(object): + + @staticmethod + def forward(target_gpus, input): + input_device = get_input_device(input) + streams = None + if input_device == -1: + # Perform CPU to GPU copies in a background stream + streams = [_get_stream(device) for device in target_gpus] + + outputs = scatter(input, target_gpus, streams) + # Synchronize with the copy stream + if streams is not None: + synchronize_stream(outputs, target_gpus, streams) + + return tuple(outputs) diff --git a/CDARTS_detection/mmcv/parallel/collate.py b/CDARTS_detection/mmcv/parallel/collate.py new file mode 100644 index 0000000..6e625ed --- /dev/null +++ b/CDARTS_detection/mmcv/parallel/collate.py @@ -0,0 +1,84 @@ +import collections + +import torch +import torch.nn.functional as F +from torch.utils.data.dataloader import default_collate + +from .data_container import DataContainer + + +def collate(batch, samples_per_gpu=1): + """Puts each data field into a tensor/DataContainer with outer dimension + batch size. + + Extend default_collate to add support for + :type:`~mmcv.parallel.DataContainer`. There are 3 cases. + + 1. cpu_only = True, e.g., meta data + 2. cpu_only = False, stack = True, e.g., images tensors + 3. cpu_only = False, stack = False, e.g., gt bboxes + """ + + if not isinstance(batch, collections.Sequence): + raise TypeError("{} is not supported.".format(batch.dtype)) + + if isinstance(batch[0], DataContainer): + assert len(batch) % samples_per_gpu == 0 + stacked = [] + if batch[0].cpu_only: + for i in range(0, len(batch), samples_per_gpu): + stacked.append( + [sample.data for sample in batch[i:i + samples_per_gpu]]) + return DataContainer( + stacked, batch[0].stack, batch[0].padding_value, cpu_only=True) + elif batch[0].stack: + for i in range(0, len(batch), samples_per_gpu): + assert isinstance(batch[i].data, torch.Tensor) + + if batch[i].pad_dims is not None: + ndim = batch[i].dim() + assert ndim > batch[i].pad_dims + max_shape = [0 for _ in range(batch[i].pad_dims)] + for dim in range(1, batch[i].pad_dims + 1): + max_shape[dim - 1] = batch[i].size(-dim) + for sample in batch[i:i + samples_per_gpu]: + for dim in range(0, ndim - batch[i].pad_dims): + assert batch[i].size(dim) == sample.size(dim) + for dim in range(1, batch[i].pad_dims + 1): + max_shape[dim - 1] = max(max_shape[dim - 1], + sample.size(-dim)) + padded_samples = [] + for sample in batch[i:i + samples_per_gpu]: + pad = [0 for _ in range(batch[i].pad_dims * 2)] + for dim in range(1, batch[i].pad_dims + 1): + pad[2 * dim - + 1] = max_shape[dim - 1] - sample.size(-dim) + padded_samples.append( + F.pad( + sample.data, pad, value=sample.padding_value)) + stacked.append(default_collate(padded_samples)) + elif batch[i].pad_dims is None: + stacked.append( + default_collate([ + sample.data + for sample in batch[i:i + samples_per_gpu] + ])) + else: + raise ValueError( + 'pad_dims should be either None or integers (1-3)') + + else: + for i in range(0, len(batch), samples_per_gpu): + stacked.append( + [sample.data for sample in batch[i:i + samples_per_gpu]]) + return DataContainer(stacked, batch[0].stack, batch[0].padding_value) + elif isinstance(batch[0], collections.Sequence): + transposed = zip(*batch) + return [collate(samples, samples_per_gpu) for samples in transposed] + elif isinstance(batch[0], collections.Mapping): + return { + key: collate([d[key] for d in batch], samples_per_gpu) + for key in batch[0] + } + else: + return default_collate(batch) diff --git a/CDARTS_detection/mmcv/parallel/data_container.py b/CDARTS_detection/mmcv/parallel/data_container.py new file mode 100644 index 0000000..9beac66 --- /dev/null +++ b/CDARTS_detection/mmcv/parallel/data_container.py @@ -0,0 +1,84 @@ +import functools + +import torch + + +def assert_tensor_type(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not isinstance(args[0].data, torch.Tensor): + raise AttributeError('{} has no attribute {} for type {}'.format( + args[0].__class__.__name__, func.__name__, args[0].datatype)) + return func(*args, **kwargs) + + return wrapper + + +class DataContainer(object): + """A container for any type of objects. + + Typically tensors will be stacked in the collate function and sliced along + some dimension in the scatter function. This behavior has some limitations. + 1. All tensors have to be the same size. + 2. Types are limited (numpy array or Tensor). + + We design `DataContainer` and `MMDataParallel` to overcome these + limitations. The behavior can be either of the following. + + - copy to GPU, pad all tensors to the same size and stack them + - copy to GPU without stacking + - leave the objects as is and pass it to the model + - pad_dims specifies the number of last few dimensions to do padding + """ + + def __init__(self, + data, + stack=False, + padding_value=0, + cpu_only=False, + pad_dims=2): + self._data = data + self._cpu_only = cpu_only + self._stack = stack + self._padding_value = padding_value + assert pad_dims in [None, 1, 2, 3] + self._pad_dims = pad_dims + + def __repr__(self): + return '{}({})'.format(self.__class__.__name__, repr(self.data)) + + @property + def data(self): + return self._data + + @property + def datatype(self): + if isinstance(self.data, torch.Tensor): + return self.data.type() + else: + return type(self.data) + + @property + def cpu_only(self): + return self._cpu_only + + @property + def stack(self): + return self._stack + + @property + def padding_value(self): + return self._padding_value + + @property + def pad_dims(self): + return self._pad_dims + + @assert_tensor_type + def size(self, *args, **kwargs): + return self.data.size(*args, **kwargs) + + @assert_tensor_type + def dim(self): + return self.data.dim() diff --git a/CDARTS_detection/mmcv/parallel/data_parallel.py b/CDARTS_detection/mmcv/parallel/data_parallel.py new file mode 100644 index 0000000..6735cb4 --- /dev/null +++ b/CDARTS_detection/mmcv/parallel/data_parallel.py @@ -0,0 +1,9 @@ +from torch.nn.parallel import DataParallel + +from .scatter_gather import scatter_kwargs + + +class MMDataParallel(DataParallel): + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) diff --git a/CDARTS_detection/mmcv/parallel/distributed.py b/CDARTS_detection/mmcv/parallel/distributed.py new file mode 100644 index 0000000..1930b67 --- /dev/null +++ b/CDARTS_detection/mmcv/parallel/distributed.py @@ -0,0 +1,50 @@ +import torch +import torch.distributed as dist +import torch.nn as nn +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + +from .scatter_gather import scatter_kwargs + + +class MMDistributedDataParallel(nn.Module): + + def __init__(self, module, dim=0, broadcast_buffers=True, + bucket_cap_mb=25): + super(MMDistributedDataParallel, self).__init__() + self.module = module + self.dim = dim + self.broadcast_buffers = broadcast_buffers + + self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024 + self._sync_params() + + def _dist_broadcast_coalesced(self, tensors, buffer_size): + for tensors in _take_tensors(tensors, buffer_size): + flat_tensors = _flatten_dense_tensors(tensors) + dist.broadcast(flat_tensors, 0) + for tensor, synced in zip( + tensors, _unflatten_dense_tensors(flat_tensors, tensors)): + tensor.copy_(synced) + + def _sync_params(self): + module_states = list(self.module.state_dict().values()) + if len(module_states) > 0: + self._dist_broadcast_coalesced(module_states, + self.broadcast_bucket_size) + if self.broadcast_buffers: + if torch.__version__ < '1.0': + buffers = [b.data for b in self.module._all_buffers()] + else: + buffers = [b.data for b in self.module.buffers()] + if len(buffers) > 0: + self._dist_broadcast_coalesced(buffers, + self.broadcast_bucket_size) + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def forward(self, *inputs, **kwargs): + inputs, kwargs = self.scatter(inputs, kwargs, + [torch.cuda.current_device()]) + return self.module(*inputs[0], **kwargs[0]) diff --git a/CDARTS_detection/mmcv/parallel/scatter_gather.py b/CDARTS_detection/mmcv/parallel/scatter_gather.py new file mode 100644 index 0000000..1ea64d3 --- /dev/null +++ b/CDARTS_detection/mmcv/parallel/scatter_gather.py @@ -0,0 +1,54 @@ +import torch +from torch.nn.parallel._functions import Scatter as OrigScatter + +from ._functions import Scatter +from .data_container import DataContainer + + +def scatter(inputs, target_gpus, dim=0): + """Scatter inputs to target gpus. + + The only difference from original :func:`scatter` is to add support for + :type:`~mmcv.parallel.DataContainer`. + """ + + def scatter_map(obj): + if isinstance(obj, torch.Tensor): + return OrigScatter.apply(target_gpus, None, dim, obj) + if isinstance(obj, DataContainer): + if obj.cpu_only: + return obj.data + else: + return Scatter.forward(target_gpus, obj.data) + if isinstance(obj, tuple) and len(obj) > 0: + return list(zip(*map(scatter_map, obj))) + if isinstance(obj, list) and len(obj) > 0: + out = list(map(list, zip(*map(scatter_map, obj)))) + return out + if isinstance(obj, dict) and len(obj) > 0: + out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) + return out + return [obj for targets in target_gpus] + + # After scatter_map is called, a scatter_map cell will exist. This cell + # has a reference to the actual function scatter_map, which has references + # to a closure that has a reference to the scatter_map cell (because the + # fn is recursive). To avoid this reference cycle, we set the function to + # None, clearing the cell + try: + return scatter_map(inputs) + finally: + scatter_map = None + + +def scatter_kwargs(inputs, kwargs, target_gpus, dim=0): + """Scatter with support for kwargs dictionary""" + inputs = scatter(inputs, target_gpus, dim) if inputs else [] + kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] + if len(inputs) < len(kwargs): + inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) + elif len(kwargs) < len(inputs): + kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) + inputs = tuple(inputs) + kwargs = tuple(kwargs) + return inputs, kwargs diff --git a/CDARTS_detection/mmcv/runner/__init__.py b/CDARTS_detection/mmcv/runner/__init__.py new file mode 100644 index 0000000..af255cc --- /dev/null +++ b/CDARTS_detection/mmcv/runner/__init__.py @@ -0,0 +1,23 @@ +from .runner import Runner +from .log_buffer import LogBuffer +from .dist_utils import get_dist_info, init_dist, master_only +from .hooks import (Hook, CheckpointHook, ClosureHook, LrUpdaterHook, + OptimizerHook, OptimizerArchHook, IterTimerHook, DistSamplerSeedHook, + LoggerHook, TextLoggerHook, PaviLoggerHook, + TensorboardLoggerHook) +from .checkpoint import (load_state_dict, load_checkpoint, weights_to_cpu, + save_checkpoint) +from .parallel_test import parallel_test +from .priority import Priority, get_priority +from .utils import (get_host_info, get_dist_info, master_only, get_time_str, + obj_from_dict) + +__all__ = [ + 'Runner', 'LogBuffer', 'Hook', 'CheckpointHook', 'ClosureHook', + 'LrUpdaterHook', 'OptimizerHook', 'OptimizerArchHook', 'IterTimerHook', 'DistSamplerSeedHook', + 'LoggerHook', 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook', + 'load_state_dict', 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', + 'parallel_test', 'Priority', 'get_priority', 'get_host_info', + 'get_dist_info', 'master_only', 'get_time_str', 'obj_from_dict', + 'init_dist', 'get_dist_info', 'master_only' +] diff --git a/CDARTS_detection/mmcv/runner/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/runner/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2213752ed7b87e1ee2b1e6272169b2d096201fbd GIT binary patch literal 1185 zcmbu8J9E=O5XUXQElYmC;@IQ`g&RBr4FwY(p@5kTWJqYP(J0=tEpn^VoaBJ{Mtr4f zQ}G2TQgL@Ob{Z?lKR4V->$v|M<58WWZt>w1{Qt zILoq_BRLc2SRM zIa2p3U_%ugaG?fuXh0KM(1s3lp$B~!z#a@?A4V{SM{od#a0JJ2qNBx_9K++w%;d>0 zOBFw{WbyMV45?62<(x0xuI6(ka?)MU#DgGCyda!&SqZ`*)v}6yoKWHMkgX?<%)jUS zM%K=jbaoT*AWV#|o-rP;1U14v7hfXq)uvQ_ijn|>9~;@+$~R)ROfx>H+^YaL8SQt< z$EA-EqvDcMFgdnzyVxYuuITsVp}z2Mg9o~MMZ=hjYwio!El={Y&bSX=oG5Rq*jch! zm~D%ZwQn?7ER)zvxHpSdvL5@l)Y~PfzPXVve8wpA5*jCytSn!MfQukmOGkGx+Nen( z?dpIQW<*gTO;lZ>%8ZaMzy9jY1xI5_WOZ6jqoh&RsA$+4RSieO)u?IIH5wXC zjh04RqodK)=xOvd1{!-BLyfH|v9IMwqoD9{aw3bu42y9@Y{nPfwHX-WPTFQ(R!P9( z^eK8C!|HR%k{hzhS^516fs*;7&ktuFlk}P?i`e>f_Gb2v4O^y50>5?OT zrWh{PD?gmh*5Jp}IO6dX5okhZNx(xD&u*t_FQ%S4A?j>+PouRgzeElzMo;zOigoK8 W%dtAvi~Jz#SPg5Gw=LT$Z>~S&);z=j literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/__pycache__/checkpoint.cpython-36.pyc b/CDARTS_detection/mmcv/runner/__pycache__/checkpoint.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8d5aadc8402869cba7dd3c341e6ec50668e9efe GIT binary patch literal 8965 zcmcIq%X1q?dY>5#-VaiwNImS`%*tL{a6}L!D3W3n$F{uQD3K{gmZKDEZD24x00xg4 zxTnDfG)SdPr?Q7^RZ>aiw1>T>DwT7tIVXp$y=AVsSXKE8bjt7R!HbkONmXdD(CF#u z*Z2N?pH#``Gk@^1ZGQ8fqWq__@L5Oy1AP7O&@hFmJ*67`svWHwlOuXnm%s69T>d7i z3H)lkWIt6+sWdy*OZPL?jO?quY&9$A<*GUS>b-owP%X%J@!nE@xw<@=ccprTC0Mez z(qFBvVl2hdEW@%a$MUSeme?}8!dBQSyUNzsI=jZMvm5LtdxO2ne(=k+>Q(l;?01;) zty*1UKVrAORjTXkE%r8kudzGq$Lt-vxz65Yw=r^qZLlJH45Z5<68-3^m%?N!Q*Ns=y2nP;ljayRa6 z?eCh~Tb4Y*yj*HXG&sUCEoi{KeR;&?+Y1~Cz{ZP+$r(IMZ1y+o%}?21(>??>Fa zT-lSoM2bu*0E)_Nf6uh`?@G3HFI6%NRx>%QvSV6X<~>^;*1F`w76{w0_U!$}-rXH} zQ2UY(n)w1!JJYH(8v8pDNtb3jZ4NJev|T&i+TSRb?Z$Sw9C^>nKcTX{!K{0>-PmbL zZe9KfJLL_Vl^f;C#QtZ3pNGTJ{}wr48!j6z;;&^7gyXrBP-C-gdw+K~Qs+xM$f>Xz zhki9H+x~W~yHhT2Y+HBt?DF2;c@6vjQsnktJ*jE#*?X30(n-a&YIXr6R0}gFD?vkb zDSDoT-pRn+>Z{9g#)4^{8-;7((6wtLGRtub5Jmf)UyZg~CTaO93hE1mXm zaUy)pXmvgo&7#2@v%&c-BX=>nx$tVy=xEvc!n*~_A6;7*UtBXFOL7;ZjGv#CD%fZjIadMmrl0l-I}4rv}QgQ&1uD=T_)q{AQe47Q$!|6bh2m4nTnno^{y-9G*g-O ztrDa^#+z{l_ss@6|4oqXXe`#rF};%y^fQ(IR3*sp|B`RwlQ)^!oA}?T{1!`$b1X^F zzZDzjgM3R3;@6aMK@@@lc3+~kK`vP8EM3ECsv?%p6mVjBd_`OdvS%vGbam|W^I#c0 zP4=>#6-pz{G^7+c+y%36LweT{tMv4Z>i#f@v3xg;vA+mnovSpeDsD=wMLm{Bl29=J zO;eOdp2-Yfn8F6kw}c@)!!)Awpp?lN_&jYpzR|Zu+hYdxJmxeVn;A{+^^F61aZnc? zx7sDwEqy8zi&77}*5&pO`)0 z#+d0c0|^FB_kFV^m&lEfLzsP|@A!RFSZ(9j>Gh0;ZTM~P*fs1U+clgfh@zcoU%B0c znh$x)KaiUcZ4sfy?daOY10(9-P*WJ(wruCf_T?4k#6+~v>iL*)doj;CFrL{CT(ZA7 zRbJ+H!}EGL=C}>E*_^l?U9)#;*hsLrWk+Wd)m{74pWT$sb6n%V6oMasYg2*Mjfe-+ z1-P{sBBnB_xx59x&8S?!7Xmw9J&?-oRTpS+yUe@5zU;kp4V!DlG_Jf zqjONFC&z7-9?8-A9mA6`%Os+sF`^l>BvWn9J${bFiKI)$!=~YS!te*SCDrYYG2o6X zjOnFjLLd4zi*934&PpGswL(7VbA0{lXj)33e7QVUnG&e*E_$H;t@gDJ8&Jblci68r zNR4d2hCJ2k+Uy^dq84hUau|aMcp4kX=H&;QZLe={qjg)SL(|=~PMPU%`UB73?DwPa z0Dhrw9c}WV>)L!1`&!+BhpU$cr!O=2d&s4cAP?SuOqZjZCX~@z7wn~Uzvo%7f%nmw z&L7<#y7md!As}1ai?qS_hNLlbp&b~{$C;6K#~8f>>+m57sYU<8+*?o_Khia$Z63ka zCRT-0{I)r;4>CN9TaF5|!{zWOM|Yl1;>7yV-KS)$R6>xJJDotfYDnk8JSo&jW9<{u z61~$*sAdj}>tSY2MHqL4-S@+UIY1eKg&7Bw;586X7?&CwYOWimKO;AQSj|J7nZgWp z-x*~i*I1g1s75DOuS#CkraVRQ!{L544L22e z$53}c0d^-!s>^1tM{`MTa($RP_mqUa)cSs_nxAb*Rhck>8VssD)r=&nM!euZp!@0E zbi1Kio9i&o(fHB@eM8ku@jpYOEbFSSrPLKQrDjx3Eoce#s;a9w{Aucnc0E>zVFun7 zCT#-08T_TxWi6v#m!laqA;)qWFJaG_T@vT%EAhaeq9cF;5be~lCRCwyVjZ2SfqJGP zqT!cmU2Gr__XCQ=u>kS6tMex!*-3TMX9}V(&7qO`oEWTHA3Zf5x==Y1Ph&_117#54 z>Yd0X&YXzn!tpYQgBW7WZPhS^bW73<4dS9vHp0xK#}7VyRD1H_;U7zd=LyhI%uKWh zCLU^@A0}=0$N_i^W2l{R!avoNIIq&Bz>kyoU%% z`ka|x>!KX<(|u)$u8);IVJ;<>nMjbNNMC*rqZYlQ4_$jEhbkUO4rAryP@pI(Acao+ zOa&&p|Fam$2Aq%acfMX138r?E0i*+I=pq&9OamfY7wM=caa|_BTA;S|XYjd@MeK02 zCQbGTMvPb@-Gg6xJ`OXoTLaC0F$)Vuunv=hZfhu>h4K!i0ZTb`Z2D#w`qm)Z@$Vxb4|AaR3~KI#5kt^rf|SF@Rk*UtP(&ID-Y-(a`Ye2$p! zb|B|m#hib@DJ00G`;JIJtmymyEK)4nNmHL-q{wu#feN$B{Q}@z#!|zhn6z*-h~8f+Wl1{wcR0mRMn7pJhPtD+ILJ65zVRmd{i`)3>Sq4WKPobyg@1 zz8E=O76cTQmOytbKIBf+ajPji%GH4Is@g zo;>_*2oT}wRRBmJ<(x~HBOcn4+_rHW{sp+^Ck|pLH=?shBct;k(T`LZC>ToSC(&oV zPv0EW$iWcCn(p;E?Q8nR0lBCtuBMwzAT+_iDCihdEctAXB@sWoWB5H()wvC@B*Tg? zkv8E&c^L}L=OEYIz09u`#WR39EO`ag5nYF#YlCzO_>;K==Ovz-qV);8pGZ8qfVm|S zz~lwQJl#kFii`IdWfzz@dO}Bj@#K-=V;*?rHH}*f?{5*_XEur`;H%Am8hiNn3EKcl zG;S?eGUS1&02PRwF9_cpjtb{rzbIgIl%Ll*u;fJ^V=t3bWy90U)U@0-dgJ++4kpBE zE`||O6@bj#qNpWw7!?l*`cF|jL^MRM14uWgK2qK1;|NbgNINBI5@iC^ng)GLK36?3nrb%sPpSB zVAR=s{uUM#HU3ll@IRz}gzLEQk6|QXY67SHP^X@BmNGPjYsBZ8{0F1R$x445#yo!j zXyy|i!{o!q5|v3$A4O7~OoLO*;{>Jo?@?o*33UR_{3bmwQ}Y2eghrw}ro*_@nlLrB zX8w>~zD>=Ksi9~wjZtLne4j=M_FZ7u1WnC3^H-ST({g1M$Os53C&R<^H+sqdEfs*T z2rI2%mZs)n*VT0`2b6^{QpM_-sL$ah!Ur;5t0xAWV^lr43bD&L-O*4Lfy=(He7%ea zO#c|Dj(+t3h!~0~DPGh%q+-@!_*%cXpqm$R+fOCHKaMh71Fq|*pMCM$Z2g>#zkVTk zH0YxgA4w{V;Yvkaa=KcWU!#U$nTdok)DJGopfI@)qBTwYS8U{`&?sqQ@3P9jM1NM8 zqL@4bK!1-1luOiZiWeMZ9CP0kcou-6{(Ay3x)VdBL=h>9i72PUDP9T*L0Rk+7r5>QO0 z!VU>8Y5o)9+UJy%&6SJNXHwY-$H^Nmyp)v?nZ{Kx%kD9sa(3iMW*xaWRa>SWr!K22 zG7!}3l%)58HF>>WGCl$#o)l`POQU3)YNhyhT;s^*1m&XY3{oYyt)}-{C5ja8mEd~Z z_TY{u!O7$(d3dsX14<{UJSoN0FsGsM!7P)Wy_yHC83ofq#N!!B=V}nM;n$x@oHA~c zKRdVTQw&p9wYccKvyN_ZIC@v^&>#AOC^Dn8cM>n}7|2D87GlKnaZ#)0s7z3sFf-IC z>74k_P^Vp@6f-@ethgFEI`VO0`swK)%26ZSs|!5ykLZ<7`-a)tbZg+_uV_4J`Y7ih z+bs0E%;7bU*X$E=l27DY3`sZl0iNVNq{&1MPi8H;5BU`1{25L7GivJ8)ToiZaaJgq zB$+h7HdoGHW1{~i8YP#K2_iYftJ*S*XyA`0_F6jLkZK-|99v)jVMPTLiq2asTNQ+=sT|2 zlR2|5s{++1%_va@;>*m9TeNRH8ld>e|AZPDM<}I=A{aUAh+A~~1=l1wA~WvPeb@rx r_xrGD#V3wOsaXo?8Io(zKK;#s7yq}vl%9EzDx?ZXgw@P$YU#fLWKM&8 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/__pycache__/dist_utils.cpython-36.pyc b/CDARTS_detection/mmcv/runner/__pycache__/dist_utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7d368ce1dd01d9a8b477d0274a19fc703c349c GIT binary patch literal 2332 zcmZ8i&2HO95Z)yziK2e|lg6#nv{g|9g$}io1}M zO}4~5)_{=~YqA!2+N{kMKrb-5PrA$R;6&2X4iCd5w9tSL?}P0bG|53IGNOz!>kZuI z#1(hIwwQfMDJcnaPAqXx*(E)qrS%qk{2_QN;C-r`Iqw>HtKfa9Ds$epsso%UG7%27o#rt(lR~5ytGoO(NMhDWf`W&` zNOaVB9zE>*KJL2ON^`w<94TL_Kqx-o8^UtPC>Nm>R$=pyx~OWU(% ziQC`}DUQMCOq*CLZnJ z~7Dj-oe)Xv;9GP zGS=JM-g&;ee{iuT!;CADB^^1;&N{3Simt`Dj`0{sb7HTx@KGLIF;L`1#B?*3esCJZNpP4%x(yaq-E7oFsx)QR#@<;b66Q--2(;Fi znvn8gQAVXO$^(+UXH`C%f82O+`k*hF<65oWedkswV zCpj{Ip;s;-AD9T@VE3*L z`CSLH4;*cfGI<~MffSr0_^VvgjIN!DAkQQ56wj%GAQrOj#oaWJDiVIilk@e>BnyK? zK3bnn?pnHz`=~5Sq^_V>0<(M%tHnbUw?M!}j04$BH|Dn628YD=5o@I?egq9u5niCa zLbWJ<05z$cud!#Kuw=-09kU?f!vYvhH|C59I&M!UrA4CRXb`rWq@e=n-DmK){7y83<6%_FAs%I^Kf2^glIiF@XR8 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/__pycache__/log_buffer.cpython-36.pyc b/CDARTS_detection/mmcv/runner/__pycache__/log_buffer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..024bf195258a148147ef9c771dcf1fea276aaeb6 GIT binary patch literal 1570 zcmZ`(OK%%D5GJ|Y)#_ozNsZKQ4iN$bh%OP(UUD!}=h5_1s6ZPaz#_q7v!rFo`;bYl z5DBYGxz=DzPN3^Y zAOsOKBRP#IMa(i5dA}0j34Tcgmwe+zzF>cnq5m1Q$&lLa?@CA|#mi}G#wkvIZ61IV z==uo=Ng^sp!~_-Wl0;r(GUCD)0gQdo7olih(kKwOL|60xwZtAAv~SzLm6PYy>8VsH z1xwiQ&!E46t{;FfWJ78a)9s&a|DK>Q%LM^LZ2W5IE8JzfizJKBrrMNhY1_pP24!XD zm9f5(iC98#T+8hAZ=%|;>&waLtjy()AdAVeN{Ugs6p0?`d8tQvE~JsEnU)3MbTLv@ zQAjn)%1Qj*`Tlsmv|$`ii>Zm@5dB6$Xz0Gxon4TRzbbtbEk^939RR4wjLhf;XaK}^ z$io>ID;*EH!o*rX&19mKkGSV>+yptK3LS8vHKK#V9*9149e^OT1K;Y_m2b|%H;(Pn zZ~*#OjydAlj(AU@J!f6D0R7MDd$9B!bdA2_>8rap&fa?e8Tm-D!mwd z8oFV+Z`eor;g6cmfSi})-K)23?X7w3fv5burJhdEXBZz7hkQiM%n_LdHQ&HRfHBOw zF~jl2;hP=JE!&yuX`xM0q|$N$x4`zFX)P7j!Y@jd%KB3ybM5lcBqB)Wb6E%|M@)g` zi$rP2R9O|qdgpR!J(#(X85>k{kr??s?9iAYLi_ZNOHhaI)7AbJQH`Q<6k&R>A^k{` zqo@W@Q$f#t-7^FT5)dKn^42l8V$x7-$sRl`wZAUJK@#|rz zZiCBBMYCdVeV`{v4c`2$4aW9T9sw8VxR2Nj63xHLw z4pH1kfse~I;jST8Hl|GDC}d#}`XTSM70wR>+fBeg=z9 Y`I=#O4%)NfapU7tbl=^z9t*vH0ZYGCLjV8( literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/__pycache__/parallel_test.cpython-36.pyc b/CDARTS_detection/mmcv/runner/__pycache__/parallel_test.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e59a43afc33f790c327a75ce8a2532343eb9a7e4 GIT binary patch literal 2265 zcmZuyTaVjB6rS-dwv)YdX;-ul;8Kth9@d1wyi#*9x40`LQ0~E&t81i=@71<{h{ewlMf zDW&jzWK2mtTR6j+Jtco`QgU*qaz@USOuWjhyz6A*S1!o2DjzrvklLK62 zVy-QI6s5YA<>637Oq)Zlv{{s@*U{cMXFLt#RD#TUd3YR&q166)M0ATqDgt@h!$r7X zX7Ruo7G;<)?Ir9ad{Od}>yF^EOx0p%*W$DZ=h{L(?KJ0q6Y(*a`}V=cK|bbpfM&zf zGRijMQx?e$S>$qKJZ4<+7`=mae6%6TEaPIMh(wg8V6fs+^@~&8KF-A<7qv|uNR{hA zh~d*^+MzDIJ~irjm5Lj161VUio+~hr0V5|3z=9$p%IK87qI1aSl)SPRv#ZRxHKmnv zl}uVQGNUtNX3ngcJ#%KQnY-^H=3ctWs#;U}kW2v|CpRkhA=zDAzf-bnTJn`o?S zQ}U}ZS)Qy^D^*N$!fJaTB=X~+aXmO@a0Cg3_>0) zVjN(MlWZ8|SulpYCPm7F?;rgn`@SE*^DV$+vtHru3`ap%off>e8T^QF3gR@9GQdy$ z_wF`83%V?c6?bKNB6Vc9XV#Rn*^AN-(#LrI?iHjq4)r5$P$c?V92k4453->_9s|M&;sYqyfr6tb8gJ7iYlN} zx=yZ>xjlFHT}ECTe;F?<03hud+ELJdpx0Q~hI*!MJ!(9z%U7Ez&)|~LzpZJ~^TY}$ zgQloX1QxVd#jS-Q7i+jjqr@^2G_CGC;ySF}`djE*oR5opcVREl1IPO9=H&*ng)KIf z_xk6zpIveqyT?=HN5!?%?gait@7&q0HTDb2-x%7wsTgzLCA5xxc6!OD9u)orji zTznj~77I;}#Z6SyT7XBpfZk!a2L^nBQm^5m35VKgSekG(Srwn*m9wl0RQ$oDVm|fK zrNm)`<(ELnx?gwyj&Z~4z+b_k9b=6y8|!qHuGN1C{7d-oReTM%dsHA0dM(j~NjOOQ mNa`S9vTGF9cQ54fETP)nf<9QL{2o?@^nthrytHGw*Zv2CVuk>W z0jN=f>C|KfwU|jO%%V1{&?>X3!>Zua$?Pwp7m8jy^xkR+?S$bO85A1Q=jXg1EjtbY zT6#V~{|ah7CA-pGni7(dL$9CkLdfy!6SOojUtcQlJD;|9ng{J1zmj8>aLf3I&HZn# zbRy%w7&}OBiO-=bZc5gs$G7(OKX2_8_?Oe;clW-QNvGuX^mvtj?j$8|rpE)cIDU10 zHx6!henM8^l_S|B$yO!%C>fQE0c+LnWVY)Dp3hx3b6nRCS<;vD8Q1-uc>Q9f;<_yC zxGt>}B|)n@ss|6ts~wrTB>9!*A7Df> z?HHvf#v@HUO`*@vP&5x;?aUAhF?9L}|HLUe$gOm+dX=J6qmWIITd@urYlI%=`U*27 zhMCa1*2S-F9*>*#TsB-%;`nFAd0=9!!sSsGe^BNO(8;|E+aFQZKgw(nVAd*Tx8Yu zaKM!byq|U%cw8gm9_%`s>GIeu7oW&0yblaj9Xy9^xN`?vFz#w%9<;5Rkr{Gp0-jBz bA}jrbQ&ob>v;8iw^bJ`cku0VeeeUsJvBYVC literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/__pycache__/runner.cpython-36.pyc b/CDARTS_detection/mmcv/runner/__pycache__/runner.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ade2b21de5cee595b9e7e30c3aa0f1383ddaf312 GIT binary patch literal 13009 zcmb7KOOG4bb*?HFpUr+sYPB?alr+*zbJA+f$d9pQjVxIYj~uyYEOk6F?J-r&Tis1I z$)au*H4=wyJJaJ=U^(M#yfI)02?7`h@)LrKMbY~)dA1b>G_8Om%K)%a;vO(|Q= zOx2cLwwje^u9}l)zM7Y3p<0w@sajH&%Cx512W zZLhx5^&6cqnho3s-dIH?AI)4#o8QE+$d+{aR_N*VM%&YI@wWbhu5wWq7vJiJjdr7- z6z9|8RbAhf9;KG9_2LoOQ1Q0E^@ZN%rl(OTZ+T&@s~f&1}({zkL5sr@zv)WbU8dia+^<_dnn1yI2=c$QU5nbizV1^=Gi%<{RJ zQyFD{XjXGxUS(D8Lu1dZ7Ld=Yg5-rG5(eX*DDHGV-%(PV&>p&#MK= z&mez59hCem@`u!-V!Ipo(I)s zbxNH^%^~%9^`bh1+@da%#BP#4wb@I0xmsY~h&^l+2~9=aao?({kxPrJXg4CAU3YPZp8 zbheyruhsRmv*~N+?MLfAPI$Fca`1OmZv_`6$5Pu@Udvg&h!eVaFO+Vb&gz$a)oXe8 zDi@vgZ7=B)`p$;u=w4?v9lYU&^=&YPU-yCl3oX~(R?FRJc^G)r85W$j7jFB?3Af$Q zNjf{O1HN?AnYb01ZBz%gyb{hb8yvjFI4z`kHN7aL$gNGM5jxlsUI8Ok(y90kAP>&Md9TrG zId#7S=I>!rx8sb~aqD$oD;^Oj+6YI~e9zZAxF8@ShONj5zvH%&LtrZhp@Ub)K|SJd z$^~a6D1wQ-xESIU=%y8va_d2f@{4X z^xK>q6|1Dky*0n%-CJ>bfrs;&JRRk2y1iC7>@Is;s}~aqz-I=(YZL-!AOu2VVC|Vf zIW#{o2j;-2S%U)7%pjwTouXbH7)^VS*<(w#0|C$H0D>QFUy?Rk+Hy%-ZeXEDJ}fkg zgY2HE`(dd$#T;tOsGY``@}6lLouvWP#!f;1vCLWS6v7#0?qL_|;)Kc|Jt?Up=_yH1OZuWRaP952zH@rWFsGe14urSUb-+60VID`61JC0cYtb)f zqQdY(Ml)A~z|&#F@7&PZ*HKpPOH_0rC2AeF?L{*XB4I6M+Ne0n&ZOBI`$x0;E?;D~ z+w})gt`Ugr&-g(%%DWpu*A2I$!f?;gOnV2cT=R9!dk8HO71tkiy?8tMZ8vbkQ0s%> ziKtks)mv^5)M` zC>KBVG&_}0sBMU@!~P*EqFim8>KIS2>*F=EwFYDo=5O$Z4-0MgVcZ@>r5X$D6jX|l z9eAxxeHe4=V@!@Rd4b6(BvFy;$*t2XtXx9!OGCeo$H$k>Z~JZU0@BXbqn_J2Uw@?B z;C#^agY)gS@kTxu9Kno$0vVscd5z2V!ZeYZ*&EV|z9k@S9^Bn@Tpu z`sdivS7b|Tz4iuVFE*8^N-uD_E-q`mfg40&-RKDQn*PQ}Vn1`gm1pm_G`cDEbk}xD z&au}2`@C0Mpx4)9%}FGA?_DAQR`N}>w}qUiK})$*Jm>Sz+?dXwsgdp|(=R720!eeK!n+9EE-mL0MT>z`xQ z7i862!_~+dz#8crUwHP)rI8qWIJW-qE{$5BV;9F|7hiTCHrl;*dRyXc1f;+xe#bsW z5_I<|`xv!8$39LyeIMyz$X-7A?7b9I0sg_*R_2D{WhmY2B6rY(Pf9%ze_uid(8UZ* zz=W4V3%N|w4goWo*=BAJ7VBekFSBdn^0F7&n)y8|xEU6d)hvdk0iXu|33?zxGw!c( z>ux~{%Vke>ZUW+oH79{QYtml(!R)tUWH0TLX!b?}l30UlCZOZwWUvjOatJG8$V-%` z#Ii^sF69zMdb)$LectiHp6-m<9GpEC70=igWXCvSj@KWX@y0`F^Om=7%FCB8kJfkI z5OlsA0#&^6^4%|9e|g1e={H_oef6~!7&L82`(7Kx&sHX0oO5u6GHqaq{-NZaOp+`z->YZY9e=zllS_N?9PAltN1&Jc1Fj-zZ30OgT0n`n)5xEa=3zjAij4FH1LUL|nOIcJvzNfALL zWdOdw-9o97i}J*I;Lm94W~1f339AT@I4TETcvIdRG>M9%Yyl0oX(L4?SE;0XB^OhL zSS>1SVjeE0S6R%;;?**1YuoYk`;p6h4Rh7Td;A5qOu-aCQDWb<7hWCF!dT_kF7F0mR?ivUZ9jiiyh{h1sGPgP~Cu8 zlz~}dsqEtnXq^Ftb8cl^_QW`O?D{<^c!*|!=ASTJZ~I#g-+E-VinZ7Q0|#BN-q>t- zYQ^dJ=@p@9;2$Q82Z|*P%Z4In1b3uMYRFPD0jz3YkjiCfZWh3B9RyN}>{=!oe_ z5e0|{#5wjVze_gpy)kHL_g;h_C7ldspYHkp!;Y4|g)`H4nGjX=bMNo2Vj(C=)>TUS zHXhN`h8r~M*Zj_AV=F4c(*{Dq)YdO?+~=6cG_Npsk^}4}9EExv*@RWAWP6@Typ|Xl zWS44z^*ob(JgR>Wt$&4Ie7mP?@M8(LyNv%iv%fHYyD;kyHuW&6Pwy)cE|Ezi~BxB_tU;Il}{`_v;MvZNm(hV2t$2{>V5 z5H!1OXG{CN?wAW?t^etkruT+C=ytoUMx72T7|(T=&Wn*;Rz8q7dx?Fzn!YXHl-_8b zS3nDuM*F+vUFdCP95H@k-M8^}W@NRrkQfeKYxUJAD?8ao^p8uI_$GC~-M#$xoG*qH z{j&+R6T76=(yNzRKJ544GBU#vm5i8`5%DpFVQf8@<;839i-WJ8YDtYI{#!0vKw{)d z5al^DH2_9^qTJeBcfNe}b}EG^fk*>6@sE%hBA;L;wBRs-^no0=v=iF#`G=4&kUbWp zy;*}VcQ^C!W(cPYIHkB}DtpOL1y$V3TsPib*|q7ifn2hWKw|A=#jR6pmf+;E4?`R5 zLC&e{Zsu*{{+Hf?#0q}I-p!KAC2e`oztsPaJBiidxMFnx9K&NqkU~2`Y?CoAds3!J zufzrRCQX{e5hwMDGc>hpM$&+yY3L>wH(Mf|_=rKz+ldpM6LR9T`{X2?=(hhJz}W-0 z)q?@;xt+k-Zfp^+4ObWqi3-EI{>tfL_H^v0)E?YD4}jU|RS~0B&>P<*xT(o=>mrTT ze)~W8(=IA1Q<{gX^Gp_*@M1@$uW~QKXnKaV_gI^Sg}dcN8K_0E>)`z9(7{y63im}e zSCX#PLfqo1$ZYJp4AJz^P88S6KVs&9FvKX#KoY@IVdgB`JO*jB0F9Z$uYWMPCldox zF3n(U%B43^7;>qeA;y*xoqDf=UoGPH;HA(8{HUvAyG=34av1q3`f&0zN8`)f|S|kx` z#Ay5JR-(F7LbQKD|9}T$fMgWmARU}J>75?+Os$v+DxJp2hsP+taT6DIK^5Lux-5ym z?ib!c%#G4*4LFe&e3XZBB3BLfn&0fiPPt9A?%!yU9!S~ezoRwa*%*Y5LlY})jxzOb z?`B;(pbwt}KIF)NTjT}>_J}Zmu?64=NCPOzmc1;th(kKeINF$d++hgHIU7`Lak@n| zBUhS}3rI$_$V#fosN8nDHHutX@llS>Ai(Q=fS5!76Tg6)GelYT7ZaA;7sLCwELM39 z>b@Ipucw%}h=J*={RjndWf$}baHqT?B%U~*A6vV2XK`TTMfcSMwOPQ*NiC#oHiX3q za%6$T2O<;?$P7OMfu=SyVdCI zREly2)x6(hbP=ILLKTATS_qS`IwO(LR7vZQ)5w7dDvy24oQS!2rG00{h5rk`U;&Ad zEtreeqBSQlXU1Hx((~IFa!rK_1tRD8ef)w52?z?(@4*oY7|v(~Qy!8Hr3X{yAbQkt zBs*21X*~Q92#%&9>x zEJILzE1XsU^8i-)L2f6fe}?`u&DlXt<&PLh3r7qdA`aK6&B@GAvz*yBn)9mkI(!$k z`TG~4KDn={v~3}2Tae=~ufdk2^Rv3J;o+*_5Lk*m-qrbuI(k{6Qru2Y?0WyqzQDk8 zCAM|7_ye5Smk*dUu? zWD$4QRTxFrlU7VcgyjkL`~^l%H)o#8o(!(pzOFUG=*;O8B5M#zXo8PP4~^JflBk@-$}oE*egeSJLV{X-nq2xqtkuflniL--yNM}qqY;`-ewc;*kXR{v&DL}?*Br1H&$ za53)5z7M|{9v(n8VVyg>! zg~A?O^qhPY2VCwVvhS@Vk)euw!6I%zi3d+i#`QFO@buc@a~WJGE-ex{Saoi!Zml?9 zUw&Ek3D5q8%8Il6GB$?%Yn69nTltyuT}bAccvYGIwd4*1>x{kTQ zNg9meRl`qg)_suniJ(}zx3$P#du#1R|Lnw}uE0k@2iMtP#UWVdmo8&=$+T?;satFq?QI^adNmx@a+Xh>4&#TF<(U1VIfr#-eP{^1NSP|doVpDPZykxTv@ywIg8(DEQ%q&WSt=L8!B{6m}*DrasIC17km%N zP%4XLqWx~-^QM;6!Vc737%I@o!HRf0;HiV3EsF@Hb=ZK#Pk#rzBEdQ$lNos7V6=#w zEXDPREWjQeWMUtQ%)AgzA-+%y%Umh$omh{Eclq$JKGHOE`v-*YCjF}YtEchJDdLFm zbU($NGVuYc^9;YrbnI8*8hV?_btZILfxj9d`0ETS2#xo%Ov)`oty)ZXlQ>*>W2y!5 zz0_O5{)3%QJx>@X{Xjx!UQ@O=Q7J%$VQaR!c4!ZlcoD#u{Z{e{F`lV6MAcZ19#gw0?^ zfPz1hM^sKgAs8P%rovZm-1%DV_FM0u!##GeC88)}Vycq9L${uMG!-9Rwali?a=7;C6*A=?uVou;hh2YXJB!`>>s-mM`E8kpe~LV^yf3Z0u)A~f(yFb z0Np?l1|yb`Cxz%j#)UwR!FkXR0W-18)0#|(;MhbT*z%*AFt;b}Fnlja2=f5r@^OGt zKI^vn|9ofYdLDlJD6BD|Z^rcY_2dJ|xKpE3Z}pUSairJJITP0))g9+Xk>b0d*M(#7 zO}Gfhnn&M=-^GWb`&(~;A=V)s_t)Q+6X8_L!6?kV2KVJJK-l<(?J&oa(o;-GeqCVl z7f6QD(w`$=olBiH$t|xQ8r8@LhcO35^CLW)^wfXO87B?q=y1viLy!{l&PVCJn=6P2 zrT23|{wYlRC-(Evy2M}Qltkh1xgS44L^Kg! zBbIBm`}nL)+*GwFU#ls2FIs6xnvUR37cd#&3I$SCh>{1>MFtapMI9gc5bH4Ce zVYX0YongxCmD4djEI)utC0G?#oh@#Vvx7=Z_nMrj=TQmT*Pw zf(+Wi6Lpw7!WRvgw?tF4V7?}{L|b$&=-|4bPf73ETkuYfdbF&cN;A_^f!G3ZlxvfP zsf_UZ<%5_X!e3v9LC`TVbV&?bk|hIM?C8AqdU5~tqUV&}Fjr}46oZWl)&3%?hB6NC zzC9S_sr&}U>~ua0vx8_ZLVcj8xjsl!A&rbooM#|MFAvl#%cMG(nK;q?>AdU!x&Yw? zaW>35U{9kLLRB5cHC}Xz>lW{Q5DXbpK_^7*7&dkmB=cc*mt;W-qHcj)1DV3?!OSvW zumu&)f)%u2LyXToI(NSA-%=Eey~UwdFKZ@O(Wq<%!Aq$$U=D)P6|pvD8{owz4wLv? zigGK~@4fpNgbK4sxrNeMt`ZSw*xGXjtxZMOZfag_xS~;cfT5xTJz6@pVM*`6KxmWt z%%?8(X_L7yD_o8cH$F&>3xA|Ze+mQA^M)+xZ)6BG33lNx>4FxRB+k*A)k~ExP>ox) zC3(&kY~d6P*k$U*8{o5`zk{M6HY+S)y~=;jE!|L^>e8E>;jA!|N*X}cxx-l&RnghN z>SrpPPNn)1UEvC05E4DVk%rnx735hm@83!CC`|O-{%UiNl?}9G@;uRH4OexKsa?=k zAK`%c2UzTab<^4~XQRRp3ETA)va${w7m=%n@VDt-r4x)B>EG%8+)cY8=ziaQ+<+-6wcTc6pq+6wXs*YOR7c%9Q&l9o|`)4 zyt`xz_r-;Q>QcC$kcFGw0E@0^g2fBd63)17ny80Au!}w#>hD*)VB^k`JR-k*2zgOk zc;IQPs1@FlQS#!CpTNeD6b|M`&kgTEfIqD72@f-#pN!%5^P$R9F5<}an@tYSvyrr~ zJe_I7qdYTVoN)uI6FKL5WI+_BGNW35Ht=tOcEg8{`u z4s^%aDbE$Jw&@v|2{qqK;9~9J`tdP}$H#sCM5f>xwp&ZXSSCXAP)Tl-1g8S^5ErEP zER~tj9BLsV1}8J9Yv2bzSFl#mJ%FolXzgLFSl9SX48C{Bf2{aGT;L`H^Ott}YlSJe z&kVnL7$u?BhsS9yW{Ero?Z;MH@sft~OXd)Td^H880dNInZ4Dthl#i8}z!6t%82f#d`!if###K4GzL=ZqH&(z{xC5R%wCsRjGkc z;gSJ2N9X%ERp|joYtIx9?j=-Uu{ze1nc=6o;n1S4mCabka3*0ENi5lsDYqVIXn+{g zcS_}|bfX;ljWf-srHieo^q`8vDcW8c8W0>k*JZt;TPrLdW#bvZu=)xwX<3VnOm*4R z(yTayw&CItmtJ*lWqXrc^#N-6z)pYxl`eMSvIb$yHj9R>7aiP%HzlxifTG4qLAH14 zF4leQ0kqB9FnhEO@(#UD)os`T&HAXftv&`(-NfND7;Icq$ROLcOHb#~L>k+^RRh0o z+ius#Zlhu#6B=7<<3+X5QWpoTQMTdODyk5b_4~hN&%aYKco&}GoyO;0y KuD|8CeD^;b%x|p# literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/checkpoint.py b/CDARTS_detection/mmcv/runner/checkpoint.py new file mode 100644 index 0000000..d583846 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/checkpoint.py @@ -0,0 +1,238 @@ +import os +import os.path as osp +import pkgutil +import time +import warnings +from collections import OrderedDict +from importlib import import_module + +import torch +import torchvision +from terminaltables import AsciiTable +from torch.utils import model_zoo + +import mmcv +from .utils import get_dist_info + +open_mmlab_model_urls = { + 'vgg16_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/vgg16_caffe-292e1171.pth', # noqa: E501 + 'resnet50_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_caffe-788b5fa3.pth', # noqa: E501 + 'resnet101_caffe': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet101_caffe-3ad79236.pth', # noqa: E501 + 'resnext50_32x4d': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext50-32x4d-0ab1a123.pth', # noqa: E501 + 'resnext101_32x4d': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_32x4d-a5af3160.pth', # noqa: E501 + 'resnext101_64x4d': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_64x4d-ee2c6f71.pth', # noqa: E501 + 'contrib/resnet50_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_gn_thangvubk-ad1730dd.pth', # noqa: E501 + 'detectron/resnet50_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_gn-9186a21c.pth', # noqa: E501 + 'detectron/resnet101_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet101_gn-cac0ab98.pth', # noqa: E501 + 'jhu/resnet50_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet50_gn_ws-15beedd8.pth', # noqa: E501 + 'jhu/resnet101_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnet101_gn_ws-3e3c308c.pth', # noqa: E501 + 'jhu/resnext50_32x4d_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext50_32x4d_gn_ws-0d87ac85.pth', # noqa: E501 + 'jhu/resnext101_32x4d_gn_ws': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_32x4d_gn_ws-34ac1a9e.pth', # noqa: E501 + 'jhu/resnext50_32x4d_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext50_32x4d_gn-c7e8b754.pth', # noqa: E501 + 'jhu/resnext101_32x4d_gn': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/resnext101_32x4d_gn-ac3bb84e.pth', # noqa: E501 + 'msra/hrnetv2_w18': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w18-00eb2006.pth', # noqa: E501 + 'msra/hrnetv2_w32': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w32-dc9eeb4f.pth', # noqa: E501 + 'msra/hrnetv2_w40': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/pretrain/third_party/hrnetv2_w40-ed0b031c.pth', # noqa: E501 + 'bninception_caffe': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/pretrain/third_party/bn_inception_caffe-ed2e8665.pth', # noqa: E501 + 'kin400/i3d_r50_f32s2_k400': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/pretrain/third_party/i3d_r50_f32s2_k400-2c57e077.pth', # noqa: E501 + 'kin400/nl3d_r50_f32s2_k400': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/pretrain/third_party/nl3d_r50_f32s2_k400-fa7e7caa.pth', # noqa: E501 +} # yapf: disable + + +def load_state_dict(module, state_dict, strict=False, logger=None): + """Load state_dict to a module. + + This method is modified from :meth:`torch.nn.Module.load_state_dict`. + Default value for ``strict`` is set to ``False`` and the message for + param mismatch will be shown even if strict is False. + + Args: + module (Module): Module that receives the state_dict. + state_dict (OrderedDict): Weights. + strict (bool): whether to strictly enforce that the keys + in :attr:`state_dict` match the keys returned by this module's + :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. + logger (:obj:`logging.Logger`, optional): Logger to log the error + message. If not specified, print function will be used. + """ + unexpected_keys = [] + shape_mismatch_pairs = [] + + own_state = module.state_dict() + for name, param in state_dict.items(): + if name not in own_state: + unexpected_keys.append(name) + continue + if isinstance(param, torch.nn.Parameter): + # backwards compatibility for serialized parameters + param = param.data + if param.size() != own_state[name].size(): + shape_mismatch_pairs.append( + [name, own_state[name].size(), + param.size()]) + continue + own_state[name].copy_(param) + + all_missing_keys = set(own_state.keys()) - set(state_dict.keys()) + # ignore "num_batches_tracked" of BN layers + missing_keys = [ + key for key in all_missing_keys if 'num_batches_tracked' not in key + ] + + err_msg = [] + if unexpected_keys: + err_msg.append('unexpected key in source state_dict: {}\n'.format( + ', '.join(unexpected_keys))) + if missing_keys: + err_msg.append('missing keys in source state_dict: {}\n'.format( + ', '.join(missing_keys))) + if shape_mismatch_pairs: + mismatch_info = 'these keys have mismatched shape:\n' + header = ['key', 'expected shape', 'loaded shape'] + table_data = [header] + shape_mismatch_pairs + table = AsciiTable(table_data) + err_msg.append(mismatch_info + table.table) + + rank, _ = get_dist_info() + if len(err_msg) > 0 and rank == 0: + err_msg.insert( + 0, 'The model and loaded state dict do not match exactly\n') + err_msg = '\n'.join(err_msg) + if strict: + raise RuntimeError(err_msg) + elif logger is not None: + logger.warning(err_msg) + else: + print(err_msg) + + +def load_url_dist(url): + """ In distributed setting, this function only download checkpoint at + local rank 0 """ + rank, world_size = get_dist_info() + rank = int(os.environ.get('LOCAL_RANK', rank)) + if rank == 0: + checkpoint = model_zoo.load_url(url) + if world_size > 1: + torch.distributed.barrier() + if rank > 0: + checkpoint = model_zoo.load_url(url) + return checkpoint + + +def get_torchvision_models(): + model_urls = dict() + for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): + if ispkg: + continue + _zoo = import_module('torchvision.models.{}'.format(name)) + if hasattr(_zoo, 'model_urls'): + _urls = getattr(_zoo, 'model_urls') + model_urls.update(_urls) + return model_urls + + +def load_checkpoint(model, + filename, + map_location=None, + strict=False, + logger=None): + """Load checkpoint from a file or URI. + + Args: + model (Module): Module to load checkpoint. + filename (str): Either a filepath or URL or modelzoo://xxxxxxx. + map_location (str): Same as :func:`torch.load`. + strict (bool): Whether to allow different params for the model and + checkpoint. + logger (:mod:`logging.Logger` or None): The logger for error message. + + Returns: + dict or OrderedDict: The loaded checkpoint. + """ + # load checkpoint from modelzoo or file or url + if filename.startswith('modelzoo://'): + warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' + 'use "torchvision://" instead') + model_urls = get_torchvision_models() + model_name = filename[11:] + checkpoint = load_url_dist(model_urls[model_name]) + elif filename.startswith('torchvision://'): + model_urls = get_torchvision_models() + model_name = filename[14:] + checkpoint = load_url_dist(model_urls[model_name]) + elif filename.startswith('open-mmlab://'): + model_name = filename[13:] + checkpoint = load_url_dist(open_mmlab_model_urls[model_name]) + elif filename.startswith(('http://', 'https://')): + checkpoint = load_url_dist(filename) + else: + if not osp.isfile(filename): + raise IOError('{} is not a checkpoint file'.format(filename)) + checkpoint = torch.load(filename, map_location=map_location) + # get state_dict from checkpoint + if isinstance(checkpoint, OrderedDict): + state_dict = checkpoint + elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + raise RuntimeError( + 'No state_dict found in checkpoint file {}'.format(filename)) + # strip prefix of state_dict + """2019/11/15 for resume one-stage module""" + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} + # load state_dict + if hasattr(model, 'module'): + load_state_dict(model.module, state_dict, strict, logger) + else: + load_state_dict(model, state_dict, strict, logger) + return checkpoint + + +def weights_to_cpu(state_dict): + """Copy a model state_dict to cpu. + + Args: + state_dict (OrderedDict): Model weights on GPU. + + Returns: + OrderedDict: Model weights on GPU. + """ + state_dict_cpu = OrderedDict() + for key, val in state_dict.items(): + state_dict_cpu[key] = val.cpu() + return state_dict_cpu + + +def save_checkpoint(model, filename, optimizer=None, meta=None): + """Save checkpoint to file. + + The checkpoint will have 3 fields: ``meta``, ``state_dict`` and + ``optimizer``. By default ``meta`` will contain version and time info. + + Args: + model (Module): Module whose params are to be saved. + filename (str): Checkpoint filename. + optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. + meta (dict, optional): Metadata to be saved in checkpoint. + """ + if meta is None: + meta = {} + elif not isinstance(meta, dict): + raise TypeError('meta must be a dict or None, but got {}'.format( + type(meta))) + meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) + + mmcv.mkdir_or_exist(osp.dirname(filename)) + if hasattr(model, 'module'): + model = model.module + + checkpoint = { + 'meta': meta, + 'state_dict': weights_to_cpu(model.state_dict()) + } + if optimizer is not None: + checkpoint['optimizer'] = optimizer.state_dict() + + torch.save(checkpoint, filename) diff --git a/CDARTS_detection/mmcv/runner/dist_utils.py b/CDARTS_detection/mmcv/runner/dist_utils.py new file mode 100644 index 0000000..7c92b8f --- /dev/null +++ b/CDARTS_detection/mmcv/runner/dist_utils.py @@ -0,0 +1,76 @@ +# Copyright (c) Open-MMLab. All rights reserved. +import functools +import os +import subprocess + +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + + +def init_dist(launcher, backend='nccl', **kwargs): + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method('spawn') + if launcher == 'pytorch': + _init_dist_pytorch(backend, **kwargs) + elif launcher == 'mpi': + _init_dist_mpi(backend, **kwargs) + elif launcher == 'slurm': + _init_dist_slurm(backend, **kwargs) + else: + raise ValueError('Invalid launcher type: {}'.format(launcher)) + + +def _init_dist_pytorch(backend, **kwargs): + # TODO: use local_rank instead of rank % num_gpus + rank = int(os.environ['RANK']) + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(rank % num_gpus) + dist.init_process_group(backend=backend, **kwargs) + + +def _init_dist_mpi(backend, **kwargs): + raise NotImplementedError + + +def _init_dist_slurm(backend, port=29500, **kwargs): + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = int(os.environ['SLURM_NTASKS']) + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(proc_id % num_gpus) + addr = subprocess.getoutput( + 'scontrol show hostname {} | head -n1'.format(node_list)) + os.environ['MASTER_PORT'] = str(port) + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['RANK'] = str(proc_id) + dist.init_process_group(backend=backend) + + +def get_dist_info(): + if torch.__version__ < '1.0': + initialized = dist._initialized + else: + if dist.is_available(): + initialized = dist.is_initialized() + else: + initialized = False + if initialized: + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + return rank, world_size + + +def master_only(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper \ No newline at end of file diff --git a/CDARTS_detection/mmcv/runner/hooks/__init__.py b/CDARTS_detection/mmcv/runner/hooks/__init__.py new file mode 100644 index 0000000..991c434 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/__init__.py @@ -0,0 +1,16 @@ +from .hook import Hook +from .checkpoint import CheckpointHook +from .closure import ClosureHook +from .lr_updater import LrUpdaterHook +from .optimizer import OptimizerHook, OptimizerArchHook +from .iter_timer import IterTimerHook +from .sampler_seed import DistSamplerSeedHook +from .memory import EmptyCacheHook +from .logger import (LoggerHook, TextLoggerHook, PaviLoggerHook, + TensorboardLoggerHook) + +__all__ = [ + 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook', 'OptimizerHook', 'OptimizerArchHook', + 'IterTimerHook', 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', + 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook' +] diff --git a/CDARTS_detection/mmcv/runner/hooks/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2bf8ee13a57cd39e351e5a6124e1eadf4114959 GIT binary patch literal 781 zcmZva&yLeD5XO@>ZQ3-+_J8*kdLa^A5wPkafwTgomWwY|h^;27Cywl7MfYL2bK`k@ z<-{xO3ERoCJ&-6re={17N1yX5ih{R~dHSp8IKQ2XC(`Ju5-wCReshQWZSEN3EZ>>*`w5$ zKl$@~Dhdj12-dz)VJASLY|X$@yJ6>1L!iLL+0Lbg%=+-Z`IL`w${5B!9kDS(8$8}7 zIS2Yuvpk-gv`n%yNo67{E)xJkYns&sFSX9@6Va3<6}pz6WP))~7B$8%s>X2)im#Z8wYU^{q>_&;8(kw8%fR1j3fE~XOGB2rOvKXBz_<-)7 zAoS2P$$E|my+GuBgeVk4I+$mXPmxO7ZBhHFE4Esibyqdjg|>cVHl0+J^-*EZTo(Jt z>H5?rNNYy~DS}T{=QRA4*$IB$yqzsfqhBDlXS+?&&dQxC+{~?wn>CHnoi4k|v>2D$ znccLlwm8?U+^oFq?D=}ZGKd9PPy71n9YV{I>?rf6pwbb2569BFE_N2%%@o zpu`d`xqCppV}`rK9+rb9utZgs{1g=GwTvsmT%cU15e}1kE&*tb!>3yJebdv1M8q*oZ0D z*#jFLd;^w^AZ2&XEequT!I52Ih9QDsQ_u`Wp!t?fg%&8zQg>3iP>HS3$+-)8IM;AWizwwGjYGg-06lmc?#({_DXuwnv#rp+HAJ=WHhQLZEBzIZo8m=+BE2i EU(gAK%K!iX literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/__pycache__/hook.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/__pycache__/hook.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae59cfe2fbd753526e7bc6040c58b58f2f6a14cc GIT binary patch literal 3030 zcmb_e&2G~`5MJ9!n*aZwHl@%)X$cYyq7o7kf=WohDHm?NSZ>B4X%f4vlS)CkwU5Gu zC*WOt<h|QG8** zK)b|cvkT)`1N|GT8BMeVkB++|T;4AfHbWnFtOI&pvSR}b2%!!^7$T^{um(}o5r{z? z^%^7~i8=}?NTZHH2C}H*kb^wx1Qei%Ite8xqfS8u)={Tn1FEPqunCV)XJHE-qt3w- z*hZa)9e9em0MB3-brGJ!9_kY8BmWnE=&S2?o5Y+R(i*N$xI79U7jzlA+)a;NnCs_sbFQ5s8$6viK~N2%-n)fjLSzAmr)T?UO|#2 zF}2MAQQ3&fP2*PZUg3WbtNA@Fz@0V>G*vXGPMRRY{p7?hQ=Qe0L`AtNaS0X|j;6cqx2!^7a?i!v}% z2ULSQ3IUmcTP~}mkWs3D2<$2ZWCrFO9tNLSkb$gP1BAb?;L9SM_gF1~lEs^Ca~|WR z!lEkc;4Zh-Qb|vXkuvd0;ZT^XMY1=k`_Mx=xRR)X$2bghL%cfHP8sMG{9VRzqKktB z7}#b*jVpTXiW>0en&yYa1ynPp?abGl`Hq&h&uu<3C%2XN8sEKXpOW0HeF<`$u0md( zkc)R{3NR^Pl_96gYMx`^hQst7?@%GSF0y!s)5_R}{)=ZSpny?%Zxx35S%zeusWCcg z(RpZ66vwMo*g(UbejyIHrZ+_DFjrM{@r|DC_zAEEmf3SHz$D%?wIpo^Ot&Sw!8?VP z94Az3qgM8#rs-IH+cf>SY4%+h_Gq6p%^yRnCoP=DAHGI{dLpNm!0A@xbTM!mBY27g zbyCsld6srL64Z}*fdoa%H%L&2;9DfNNd&{8+|lgCsjO*vdnlrM)L8r{-f7_I8F5ce Vaf%eb@A|kOeJ=z3AP^9M{R7kpLGu6r literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/__pycache__/iter_timer.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/__pycache__/iter_timer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae4356896bef4d588ca60606ffaf78a2a40ad9ea GIT binary patch literal 899 zcma)*y>8nu5XXZxcIY~7e}O&3l>3kQ!nFDg+U{~QlwDqJROxcJHZE^lrF&iVOVgoW{m6Hz7;COv zzCh^TK@zV=k8>^*Wn`)5|QNU!^Cy(Gy$6?jD66$xT5ZiIiR0YAJmxWoy*XkUy33`%pH!H=*{RX`d4f-Hef;R#`fX+Udw7JQ0Zqdsqg+ e`-pCZ73hZTQF>4d?M~k9&VNt8N5D_`RQv(?io#g{ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/__pycache__/lr_updater.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/__pycache__/lr_updater.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..257c3133fc5cb5e699812feebc60982414bcfd03 GIT binary patch literal 7176 zcmb_h-H#hr6~A}BJobLn*>yLYG%cg;2Q`FjnnEck%C;nJQ`(w9LW`_M*2FWrvDY3u zcgE?)coFJGw0u>9HxLL3A*6~2p7}R;oF^n6s=k%iiU)q@T#skHcDK+ntGS;0eeOBG z`#a~}eR^rBwDs|A|A|vV{8JoyEPOwQH~t-xKm@WQy0R`MeH$I4Yu3%KRkx5g1FK_q zow_5%OCqoX=e`J>&^a*c1>_5Xi@X~akav+U1|{T6oG&835G*3U$oUfTOTjYo%bZ_8 z{(+#3e3|o$$e##SkYC~aQm}emR4X4sLQ$2e8?^4W;#M!J8mZar#i`M6eJFLEb+y;q zZBoPRnJln|l2FuTAnHc&ATL1HO)Yrc(gN3QEoj}*0@e#!u)3=SsuzRRpmJZ-OMwBA z*3zX{)%W{>pM;7gzEM*o#|OXt4LyI`YT9Ed9Q=|Xui%Y;gk)p{Vz;Os!EbT5pdLv? zV72AQIFJYOZTxcW>mxI;cdhuD#0VTJ5A2a;2~+Hf-qoP6D|XSt8OdW@do{~-d(@b(p9nNtI*rX2Ho)dDDXD)$Q!JcVReJL z4*HE2)D>@d-9emqw?fR)@5Qa8bvN`{Q4;QiY6lv^^U>Du@vM*XlU6V9(~3OhM?0bS zrsM=AD@amj7o+XO?+bMj z*^gh`yxr@D7m!9f_Xd8n*}NC{@n($GZ+5#un1oFd4d2bXn`#h6p+eiZ8Ta@D`_-n&9R3>6kO>_Lt02(S(1)CEgi$bFaGd5T$_UFxom)@NuBW!P6?lv z@y1Ug+1p6ONVa9$049wq;%50xW8d7jfG0DM7sZ|O8(;_^$*83U)L^FOu+>kCy?)Z_ zwuYff7yG{QyNw;y8}#F88)yR_G|ru`yNxkGtA<_#FNYUF zBYj-D*lEQ{v)Aok`Z5hqb2(yoes&ExzRz9k^qPJrzJ!mgGiD|6FiWm4nq_KQHr+BQ zBp>Wc*{smqnd4@iXpl5VCT9)4hYwOiAa@PrL6gv9V6rA9X>#9)+FWm;{s-VnQkwCQ zSckX5m$@z~;-|0y`Z-7XUEqEN^!pjLJluCR>*G%M8Ic1h)|hyv%t!LF`N-mIzU6%h zSwtK2PaBU|0$O_%KdDJ=GjV9A|Yx@#<>t1VS~I2 zZDmqI8fwQoU@WD20tMJVayQ=>EaQb-Mke5?*(MsPpcQLmQEMcdDc@<+^C;SDM`rq3 zK2z1wj7!7jv^(C%^x4Adn8>ZH+EXbQ580GF=eUFWnSD{}bI zF$S1|EvfD@4fIVUdmhMCv;otbm3>fZ8zcZHZ=4bbh7@;7AX9`__la`3)d4$l<}16F zmWb|Keg_C)hMZCEb(&{dxq&z}1{=&s8!uZqgUx30&DE|*3v9?sG}M?G&Q7oORtPs5 zHbCL?kOpI%p+O!-l9oWnLAdP?I*I0r!`w>bb7bnYln?$aNk9RUFqdV;7_NVk6ccrm zxWo{wZt`9u?6tT=>cbcI9;*{kQQI6rv2A!Xif^?uu(a!b9NiQH$-@_5IL&QZUdGDG zSky%Mtw@It&!bCBUPi#-T5?eiy}3dk>M{{Nqc;}*&lrL*0E|HJ_y!3(79ZhU+d^3A z#%C$OYumfFx`t?RU`n%M4xpx+n9=MbrcCFrqoO0zl2sXVT-iuK)V%L zHcE0C-xX-P#JS-Yj?p-cQnkj>xgtYV7buL1{BGE2q@_lq+Y1IA`d(}_?hO2nu2DEG z3FT2jepWF^Xg!K3LY<@JJSBOgkW+U774)*_cTI#N_$#fsmTTf~!7aLuqWUJ@$wM%v zCnn<0PAlxPIB39mg?XZGy#v2kw*g^0U3sy!7Y0+ypJE6alY(LvBoPU=zyTO&8%TSP z4|;kdn5m6uALjfj+VaY?T|kZuq+e__njJrm8;yK7aJx5YaN30M>krKeGaGJ_M3d-@ zHUK+QS>tg(Ee>N-G|(xV^3Ez_m(YMNoEldXuO7^$A2 zmduExiaK77J6&VZWaUg*jKrrWCfAr*EO6Ln6rL1eGb^xhX3T9?NKaf(!v2)W@e-QnP|1onsO6oF zH$>!dd#bC$*;Cyus*Bvj3cwmTc#_|-`2gu8g~Vl~6oC|Tf)rDqafjYUj3wkJGT);B z(Zd08e2A--;JD%zA2OR^YEgA+qEanmEot!v4wU*BX3VtzK zx1_cxAuy&ghF4=V<@=3H8{%b0DC7!=p(M}XT{c`Mj^XJUrO!f>b|fkNGL6(n0tysr zkeHD0>q(C{4u(kIXsgS;5nl+rD?0stM>t zWba$o#GR{e0Q_jKo{H z%q~-3pPDZ-8c$0$rkyn2Tgyvq^{Ks;?S%h@4a+a=O>eA;r=`*8<^4qdgwYN@Oke;o zq6wTE6P&{gmPQ;S@xtFR7p>!RyH~zT^pS# z4u+?_4AWav6bIAWson3rg=-84cU=^BGYDQne+`0hJo9ZTc!d(}Bj<6_FHx=I*)a>v zh(8Mk2Yn`BAhj_VD#Stv?TCe}h&ajQoV5?42d`Lk)5GCM|XSJsrlzjG?qh1$;-RChNl>$AETW6n@l(OSKXNMf7 z)){c$Asvuo%l0(04%dG&;Qm01CIo3^4^JJHcNTB7OX$6fH$IDG9{B)cbb>M~;w05u z3uAvbL6x1ODrY0cP$qtUw{``S%tOkbNCFatJoxbBjOY{3a}7;CBQi*Z6SNr_6UT56>TSB#pkKO03XB9nq~@gx?&nUTj9JL6?O-@O|Jc%Wp#d5yk!VJ3YMD8~25hwf+@M>VRSVTNaCxD;QeONIINhNq literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/__pycache__/memory.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/__pycache__/memory.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f455fe50fea1308bda4ec4b9f6343c6e303153b GIT binary patch literal 1081 zcmbVL&5qMB5VjpB&8F$HAbteA0J*dhToJosMd7p;_JDG+LdK=-HgR!Mq^^3~z6`JA zE2n*hJu#EE+O*=ry7K4ocsl;(8|PiG7oFaXvu}d2Kddz*+GjYcHweIh&sfSpfH&iX zNCjt~8A$Ng416taywn5!l_kL~Mzf1sj+k&KzZi3slWR9TG|q5VeFV)?4lETAg9mcW zF^Lbsnxzst5aQ~?5k$BK(8c6Ecl@cC*UR%PA8X1yyv&Iv4O+1Tc8;@pjuz@wbS2WsO3y|v-upwh9UpBr6#kpp z7_YwjI2fBkzeOy6EEich$d`~+gKBQ7K~aFNbzVLU>9YnEykKFJk^D<%l>@n zLZv3dT*w>)Hi_58e8nlxBjTBGOSg|RyCpDjnsds}%J0Ya);HmR# zV^IZ}FCcSWO$w`=Bu<2-%U!Uc*wnUx9=dH8fsCr@0a1H-Kl1Bt51dkE6 zjO-I~kkl!&qX%doY-R`Lh@TG+R{ZWH5y%-4pC!>bVY3pMdXJvl$N}SYzlFbo}HzS WvT^?_iwoaxXndeY)ufgc5Z$$%#0g28qEcunAaMYNLn0N41FA|9LU0O)N*sJyxn8Gr>W_5R0X6C^ zJ@Y^KOMB(izt9si>*lj95)#7FtasP5vv1zcTfc5JYVWR&{jUyVzu4S^W44c`cCnF6 z@|Y#uLDUSb5+;<_5d(Lfx5 zFE4p9<9yDtlYl-?9veIRXzB$vA@jIoo+Vw{^n!bKv7P4>+ju3(X#?H(GRUJO8ihas zyMcv^>7h5-2Q*c~=A551f$88X-X%zcMi8!+r5zQMH80=56le0e0Ey<;Ifq9Ir9_G@wf6$RoK6W=CR_UQP*#VlOe9&>4as$HC zVFaNvCxIOBfl0P#s_3oV-PlhKrN39F0BCIVY7y=9BvpGi&H_JHd)O%!wH%-n_3G3~ zq!}bamwQp1hhP|PSwLjy^1uf_QOgKw)3xT<6cNF=!$*(q!m+^H^vw5h**Whcb7N!# zd&u7=vSvoC??`r1g&m})gx|{j%9xFjPqV@$AH%HG)lQr#g@sm~o~y26wPl7wUCD;| zV3;fI9{Rz_cORt55!P^bT}m<;#=2$_#fkxig_eg{aMuhJb+gPs)9<0L$ia_N5n*=) zgBdayukjVMCLcXrWMg>=)p(hzFZGj92wf8b!3<;S*M&G8`f>3ENE-$&Jf$Z!XA_S%#>1J@72L-Nt&(2JCfB9#u>xQnr*-{WRmIc>PQfO*7rFsLzm%1JK&!>F z4CwNlDyPuS=;h69Z(VC71avU)gc^!9rSWc0A0H@608U+fkdY+-MUyNccni`9P9+Ch^>7QMqbG)6R*&T zvm;da=zf4Ju`?v92f}Xd$EWh zp0iQwms8t_yLhVgHl_Mx5mgg%drbk^f&(KEhv>i-0tA6$clETk)m-IehGvckfy4v7 z`Rx6XJdk|y4cqWdL|N=f5ila*Gveuis+7#M0yOTzHiYidVkNQE>B_Z28bvO)m@Qq~ z)J1{jo0%?=AE(Jm=XgW0c-_^iNYWiBo7hs@B+mgWOslmns7~LL21>npOD)YPX-WLJ z+`0Y>F15jCYTFAB+whO!KbwxvoBbLcDGQaOl&&jfuA$C+-IwyMR$236`a~L3pM{>k o6BPXCg^8UgoM$89Vz1PA8R6;-=65$9>pFMMw0r-XfuJw`07MsujQ{`u literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/checkpoint.py b/CDARTS_detection/mmcv/runner/hooks/checkpoint.py new file mode 100644 index 0000000..c27fc00 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/checkpoint.py @@ -0,0 +1,25 @@ +from ..utils import master_only +from .hook import Hook + + +class CheckpointHook(Hook): + + def __init__(self, + interval=-1, + save_optimizer=True, + out_dir=None, + **kwargs): + self.interval = interval + self.save_optimizer = save_optimizer + self.out_dir = out_dir + self.args = kwargs + + @master_only + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + + if not self.out_dir: + self.out_dir = runner.work_dir + runner.save_checkpoint( + self.out_dir, save_optimizer=self.save_optimizer, **self.args) diff --git a/CDARTS_detection/mmcv/runner/hooks/closure.py b/CDARTS_detection/mmcv/runner/hooks/closure.py new file mode 100644 index 0000000..8087d98 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/closure.py @@ -0,0 +1,9 @@ +from .hook import Hook + + +class ClosureHook(Hook): + + def __init__(self, fn_name, fn): + assert hasattr(self, fn_name) + assert callable(fn) + setattr(self, fn_name, fn) diff --git a/CDARTS_detection/mmcv/runner/hooks/hook.py b/CDARTS_detection/mmcv/runner/hooks/hook.py new file mode 100644 index 0000000..c7c1797 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/hook.py @@ -0,0 +1,58 @@ +class Hook(object): + + def before_run(self, runner): + pass + + def after_run(self, runner): + pass + + def before_epoch(self, runner): + pass + + def after_epoch(self, runner): + pass + + def before_iter(self, runner): + pass + + def after_iter(self, runner): + pass + + def before_train_epoch(self, runner): + self.before_epoch(runner) + + def before_val_epoch(self, runner): + self.before_epoch(runner) + + def after_train_epoch(self, runner): + self.after_epoch(runner) + + def after_val_epoch(self, runner): + self.after_epoch(runner) + + def before_train_iter(self, runner): + self.before_iter(runner) + + def before_val_iter(self, runner): + self.before_iter(runner) + + def after_train_iter(self, runner): + self.after_iter(runner) + + def arch_after_train_iter(self, runner): + self.after_iter(runner) + + def after_val_iter(self, runner): + self.after_iter(runner) + + def every_n_epochs(self, runner, n): + return (runner.epoch + 1) % n == 0 if n > 0 else False + + def every_n_inner_iters(self, runner, n): + return (runner.inner_iter + 1) % n == 0 if n > 0 else False + + def every_n_iters(self, runner, n): + return (runner.iter + 1) % n == 0 if n > 0 else False + + def end_of_epoch(self, runner): + return runner.inner_iter + 1 == len(runner.data_loader) diff --git a/CDARTS_detection/mmcv/runner/hooks/iter_timer.py b/CDARTS_detection/mmcv/runner/hooks/iter_timer.py new file mode 100644 index 0000000..13b2876 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/iter_timer.py @@ -0,0 +1,16 @@ +import time + +from .hook import Hook + + +class IterTimerHook(Hook): + + def before_epoch(self, runner): + self.t = time.time() + + def before_iter(self, runner): + runner.log_buffer.update({'data_time': time.time() - self.t}) + + def after_iter(self, runner): + runner.log_buffer.update({'time': time.time() - self.t}) + self.t = time.time() diff --git a/CDARTS_detection/mmcv/runner/hooks/logger/__init__.py b/CDARTS_detection/mmcv/runner/hooks/logger/__init__.py new file mode 100644 index 0000000..8cbaf12 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/logger/__init__.py @@ -0,0 +1,8 @@ +from .base import LoggerHook +from .pavi import PaviLoggerHook +from .tensorboard import TensorboardLoggerHook +from .text import TextLoggerHook + +__all__ = [ + 'LoggerHook', 'TextLoggerHook', 'PaviLoggerHook', 'TensorboardLoggerHook' +] diff --git a/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e7f71b050cfe03d9b3c9d0332e32455ec7274d1 GIT binary patch literal 383 zcmY+AyH3L}6o&1_Y0^Tw@eo#$W#2OjQFiv`+C#GOU)`cdfWM$SwPff)t z(50NpO4co5wVZvTX8w!8rorR<&9$xaFV5}YwEPfJDhz zVJ|G*3{nIddKaiMQbo0iowJ}f2kQ6MDi6JG7pTq;q?~uQi=5x~!&pBUC-+)CrcVep J;2P&$`~%zKWtac} literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/base.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/base.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b15eab47e56bf215569dd7d5d2264aaa3ef88126 GIT binary patch literal 2493 zcmd5;OK%%D5GJ`RX}y+Z$Bu2}(R3RGh1G*qw6`Eg6Q@X8pfw8AKmY*&O)aU1Bkele zl~Eh&$?lKoU)pO={tLOZ-*9b7wv(W}b%hB#182Nl zH8ow+!5KT{?LaT+@Qk$=^rEg~4E2(3U|iJ8dIe)$-_olXm-L#xjj^HE0dcbke@c5j zV+nz3a>jO4W}^S-wB)PFiY8Y*SYd(%h-5}}B z&6^`*kE6HI0D>wXq)FD?o^9_XsWlSz@@T7*rbDRs(alA9-;ntz?M7~SwjatPqvC#K z4%4_l)#czo*Z_OpS4p&g32uL8(3;HTvO840slC^IWW(4fPvN*xU|=~N=ZE7w>WsTx zEU7NQyI{b#QrUTk)a<>CsYd9bk@EQhZ?*w4;S(m=M1b;%2P!l}$oX#5D}qa8#o}NV zfg+rtYx;$snPJxkP-Rh=%#y)BnXO}Z|EkqbN9G&Qq<1`4Nh?0qDr;qjY1SHzw8>3O zp#~oxwd^=ajD;+c*eWyH>LAbChsQ-Ai^WwI=c?Ms=M9HfUv z6iewWd4A!V1J@LG5mQCAwCmsSP>K=qLN(|#0Jfvu?&p9!ZoBDDkUJdLcPY(Dg zf_PaJq6p!=EDNrvxJtZ;qyu#wPW{4+I|E-3AQw-5vUwAVleq$hg6jgS2@E@M)e1nu zJ^(avfO^Fj=PZf9`4jN=DY#0VVZ0*YzN&T~t2L72q%XiXNiEE~7#3@;8OQ`Rhl-SR zq_eWZsUnJ+La$IgQ`AkOW!jYvxU2r6VvW+rb_H@SVMX1ANR|inQ;}M6r&pr!4qzB2 zA3;Y(iWqP36}~DOA}G^%Z!UJ%cl{6IXYZ2tt|L&qEY+dCPsBBd&xo4_$Uea5r{c1% zY}}XE-K3ND;o!L5e$f#=xb;ees2W{`eF+A&fO@hIQ5j-dBObnK-k-0Liglj-M^|pm zd*O=a3As@o(Tm*P|443c9*!yWttoVZcn$ockAin;`S*EPF2UmxE$e+}20bjx^6m}j zQ(@dDwuTLRFPqEM%o8;-QWl|Z#dnkRb4dFTzUQ52}F|)bXo2iQQ(k l@Kh&u7)pEVw7YVtVot5I;x3NhN$Jv4YU&wX4J)GV{R2?$GM@kd literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a449fd054e3c1d105f768d3b4342c853204bf616 GIT binary patch literal 5071 zcmai2TaVku73T0Jin{E2eT!2kRc>{Z8oNajBxxF?c4OyKBx`5Ai5)ftL2E|4(o&>6 zL#=leDG!x_07c#AwMCof_N_mmkA3drycQ_nzmTVXXDDeEBS5s^aE6>2&YW|;?|ieb zG#cSAzuiy%dBL*&YpwiT)UV)D-=N?YXQ?${5o1)hQ#-QF-H9A?cl%!Cn_4BR;O?Zs zAcz9f+fA#3Fbd}*Yf%mFy|g}PM2$f+YT~)iE9u%`Jz5`}icX;&@M^j-I31m4)_WEY zdF_G4YvRnziO!;3=MA(Q;xyXlxP8lNHy>e*R-5VOQ1-GS?vAof(aW=KJPLSHh@v+T zsQbJ7QY5@>Yw!ACSR8Bb(w!Fn4leZuiV2%ou{E*! zY+`dZv(dtZmcwn+awaaGxg33B&y;W*KHzLejhk0L{UHA2*7cq3s~=sTH*a3Ob?f$x zoonric1AMQ^_Yw#vcvc=kvbSDA+uy4bTCYmx}Qs~YrRYrN!Af@kLzlh@5T4X$l6C0 zrfYlJRU+-m29_(=s92}s6pDXX@+@xOzPGiX55&tTv%TX{l5KU4d7`$|FjrfH0T+cZ z?nS+GxFttfCM5di2WqSQ#?}zF+8iG1AciY@MI3KnT2)73F`qT@=i_gD`oyN2i=G{_ zC;jNYwC&H(n82wARk>cEHiJ_N*0(42j7`~usmq1KS>L7hjQtKzU{ia0{Mxf$OrLG_ zR4Ws`VqeIX8j4P@+Y`K%<+8;^x0j)ws1{7xf=PSH-Bc(T$x(&WcCNG!gX5(RKTXn+ zxGrTbwcpL6kh#pp6|nL7RTHeZ zc4>x1h~1zmUPFO!0tdhl&r*#7ZNj?t#QDaaxPJscWa|YzLC7thm;htjSnX+y*AASd zJ%iM?H{ZZO;ZZ6MMXKFI?qSyl_v!xFW^@>r2*(sJ(1Lsv77N)E_W11TPL{nM(fT{4 z8!A;?WCnwmSAppXFo3NzC*A@#@FF3`AwM#!a0{>S`<0nB1G4%7t}3n&IJ4#ZMXg^a zk7qWA0s0MIqKs;;!5h^)sufKD-G3NP{o`}RT7R9sF!3iYuRL_7mD^VKhHDk44np}a zW=%nxxkAD)6LCIZRxraN`ZpoZZ8!ltwJ0XIvnI&q%qao zp>~O!A0qu0O}rOg@829{n4|_|odz#VElqw^nGZ9HjjMAt{R8@zo#r$mJFouox*!#{~_( z+!9i@1~5soXCA!Ue%gRRK2KY^NCoksc6qN;$S0^x+@qZ=zyG*xoLmicCAq7HNwKdT ztVjmbH;@8Ms|uxE5M`mO*N-}4NO@3Kh!^22EwMcJE@cIInWl0no60Skn(!Z0 z&D2QC2`w3OXZ7LnRTyN;6H zhqyO!p}$>@@KGOctWd@LBeH_|Wr(6f`==j~1$n}d#cLpoui4S&@dm&$Pp=Qz)CF{R z0bLu=1)tDg;q(doAdkzNLWFqG>XfXn;2t8kDmjEkTdV^i>yT&mm5OB_-PrYW=%NE^#o1`kUMt~2IZ#qokuRYD z2vQ-2^5^v61k7(z^A}XSP6bJ`Qt-S*wW}!RaEYpey3gi)oN=Jz2a<$HLeD`5EHs8F(1_A_*~Bun`Uo|<9WL9d0j@P_*d2*xEp z%3q^PmM(Ai#jT}>_gGoTg6y7WkBr_KIXw`WA z0h}0BzOq~^3ZMbjv@ywt%j3}YBSQCXhgg$-bf?ImG^Pl|B%8_q@Hibon2}s|;S^}i zY(#=T0q4w|c{D&T`eq}@W)K*<2BPlv_DVhBsnHPL+`+WQoO3XJ-zSzCx>SAL3qW=lG}E08fojH8VShqhg&R=+&!fKDUo zdlC@8LG#bJl*wJdb&1=RC9cf)0y-|`J*9n89`Dd-nzgWI*3`lJ%G1sjh*<=UY% zA#Ysd=0lNbmk#LK9}Vf)E_bP?0mQGUMsmVv2!sY5eB|d;kg|w^PIs?7wzF=uh!rDj zjFnDIMYH`4SBY~FUkGyC0CwtRwsVB@6|>FNL1*DuifFR2F;^cIYz~|6+b{|agMDH* zL^I4lb^MQn>PIi4<<6{Y)^9IKiB>_S8btmI~8Y zm!ea9sLol_ST+h$h(64xG&|7j@cSt9?--+q%q)m5`r9xDEhW~9Au&_C0@B=; zx6pTY7coq62;gqxCh2r3Z&G2{k>XhrgOtuUYi!KX*%4{FKq$F&fd<2j#0bB~rKlCS zMv#@Vj5WQw3w{g@#Me^b+wei zkRL`Wn+zYVsov4%AU_mp(`4~dArQK>H!6CmG8*0NNxOHEkYtVC`T5;Gbc!LmlFm-T jbs4idc&kh$Z<8icl<+7+Q>G5c=61l=?R8dn*2n(`zt^Nc literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-36.pyc b/CDARTS_detection/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1a2aae8e0ee4b77bf43c5df08f6cfc6c8a544bc GIT binary patch literal 1952 zcmZ`)TW=dh6rS0OcfEE@DB%()W!0w|AzKB31VROI5g;L}QX?f;Lc?ZfY_Gk$j#V_JfziBf}7&?AW&tM5(1ZmWkrqq=SnH5*(yHCWC?B zm>VJMdteMPbVb(A)LA(zcjc{JN>(t#Tu$IzI_P@V*VSAo)qrptGM1H@Vax`(95bG& zu5Udp@{z(!Hq1)k;55l?D;t+m36=vZHc~4GP^ujf+ti-HRyQj^IN{PeW1sGbkrkOXIGKg&g$btNicXunv38&g4JZq& zt+6CgU6Nq1bRYH}s!N6KJgJHzQHw*B0rAT}ALk;`A_k)3`}1rbXQehtp2wrgR7%7* z$hsdJdA)2VBIaT)N-oMY6B^elILAnpPI{Hea^1VJH-3=J=XsVU$2rEtj~53R7z40a zs(6x(Cqi|XUtXfQ0&(MnqpJ+Jmu4845rYEjZp->L2-{-pR45HvU~KsA{{T~WplY`D zOEH(q{HT;v!}iUr8xd5^=Y_v2^OLoile6vvT&ynP% z$9C-y8Y0|ze_%b66~Z=oViJb#(tm!|e}2|=)Lp=&J_OT^Y=j-)xEhTFkS8lMuZ(Q~ zFGXUkS4b{wl%!p?Yg_^Yy{?=)dC(ul4@)Z(l-P1%_*Cb=8 zg-E61ub06E01qHiW6_Dzp&@cl+cctk&K>*AfS5)5V0Q1CBB4i36aY_DZf2xzYn zV?be*qu*xiq)PJ5O5NZTb}ID&nvc%|Ae3(1#j!g zdl&8a?Y;I+y#^oxseALpOst1iy@T7{0%QHU57fD>cMNStp;qf{+iV-Jz9!+jSKs2+b&WoRi7UBr+O(QKr+#NvlpF;piMij-oH^(F&fZ>HihlX> zAovQQ!?^)P&Y~7cQX8~>z z$nY7%Eh5=O>C2W}LfMdQxr{Q9D{>WO_&IY~H-c2_ru{H|qR&o$5#gYR_j-$isifo} zRo9Sdrumf3MJA?vDyGidfqKvrJCb2Tv7_vjT;o;aQL2=OH71DTK{3#AY+8CSOyer; z#k$lT*SJ-h_f0S;bgCXFxoHmei&CX=o>baIDy>o-_w!^QItt^shwdtl|G{2J7=W0A)DmbAkTo zL{pRI)7d7(+03f4=rmOWTh;oC_R>CS0fHNM0M(eN(uvZstvh8kGHzB53KM)*s>4_g zl<~1(m}nD{f-!kGGE1~?j056Pq7OPk(c2oo)<%b`Hl4Rj*$QQ(g$qa(sf6(r9^gN@ za!wm(lAY>DInnPwT>YR+Z?HD@Yh<0{+dsJlp>6mu?}=g!Q5PJHPey6T>8 zKtqo7P=;9~{pYZ%sgpJ5WK+S;fb?zL-dH9bVLYjiN2&2;`gqVw6`5vjbliw?G+=Cn zby~*LrRp81s~9yK!{kXkOoyd9j*~nudx=h^@pW2Mr7{kzPkoo>MNnrh<7zNTjVlK* zK_8Af?xiMpV9Sq2#`*nwpV~(mc3i7b^%^a8uzlA@77zDorJicmEXVsW<1-mZri!%B zc*I-Wm@}t7 z?+Tl{Ge?q1x^s5utN%=)*UZD1kNGU~H`qI{?$|@!!@35_;4+hqOYAC}xl9>j=T8J9fJS|vcM0jMhrSxv%JCs>39 zlkd|6yTNVRKqfkgzu9HN)6Pk&w%dBGAq#^QPu8tauRoo27J@wrjXNx5YCJnM@gd;b z{Ta#`t(}OQ*E@lF3sS1vl##sZhm>tnc8jtfQFaqq=R!U666(gu)50{ZM0uHjyNr97 z9#<-)*-gsqUf^72Lb7r~smeH&Rwlr*lrC2TlT35q*bSQLVb-uR(V!X>z^ejI=Jl~Z zd@dP{(n2Z%BjeY24Ua4OWhbgJEFe&m;7{v&-3B#rTK9}ylE?eq22@T|b5g;0NE7RG z-6DZd4mjt$w}uun>9`+2fa<5n$WL8(D0%B;)YeenfYyg+pf|XO1vN|xQuG=W-3BdvTd9i$r;J1*6IfER52yhn8tkjS zW5Xpb#WOJST>#H66pk^dPTkUABE_!xr6}dY-if8DzpnbCQ>LRjzVCJLtjDNv0 zESU4Ziz9cpW>m22earw^iOpN;b*l1O6{oE5GTP3;Fuig(y0c4L(*8c1Y@IimS9HF) zP281NcTicZeL>UXX=+*{C+wOoN<5SE(>@XJLh3L%N;DF}eoVgw|A36?8;+d>R$TMgUt=1c~_ zej!8K_9Z(E)t6Y+kOA0@Y5@X-aOTPg^$2H7bo0+(m6O&Nw$(ameMv3gyujLjp$}3? zZujSQe`xY1&?IeDq1i!gf9&2vZ$Zj0K9Zlug}>D$E@ts*!w25s7Q@9=#tf16s~cV?9T$V z3HFlS;a*uFI4RN|eD5Gl7BgkVM_3j4aQ1BZcs50ThH0%Ay4k71+ZD^4Rj%G+gpHuk}vnR)f7U1= '1.1': + try: + from torch.utils.tensorboard import SummaryWriter + except ImportError: + raise ImportError( + 'Please run "pip install future tensorboard" to install ' + 'the dependencies to use torch.utils.tensorboard ' + '(applicable to PyTorch 1.1 or higher)') + else: + try: + from tensorboardX import SummaryWriter + except ImportError: + raise ImportError('Please install tensorboardX to use ' + 'TensorboardLoggerHook.') + if self.log_dir is None: + self.log_dir = osp.join(runner.work_dir, 'tf_logs') + self.writer = SummaryWriter(self.log_dir) + + @master_only + def log(self, runner): + for var in runner.log_buffer.output: + if var in ['time', 'data_time']: + continue + tag = '{}/{}'.format(var, runner.mode) + record = runner.log_buffer.output[var] + if isinstance(record, str): + self.writer.add_text(tag, record, runner.iter) + else: + self.writer.add_scalar(tag, runner.log_buffer.output[var], + runner.iter) + + @master_only + def after_run(self, runner): + self.writer.close() diff --git a/CDARTS_detection/mmcv/runner/hooks/logger/text.py b/CDARTS_detection/mmcv/runner/hooks/logger/text.py new file mode 100644 index 0000000..e397a17 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/logger/text.py @@ -0,0 +1,123 @@ +import datetime +import torch.nn.functional as F +import os +import os.path as osp +from collections import OrderedDict + +import numpy as np +import torch +import torch.distributed as dist + +import mmcv +from .base import LoggerHook + + +class TextLoggerHook(LoggerHook): + + def __init__(self, interval=10, ignore_last=True, reset_flag=False): + super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag) + self.time_sec_tot = 0 + + def before_run(self, runner): + super(TextLoggerHook, self).before_run(runner) + self.start_iter = runner.iter + self.json_log_path = osp.join(runner.work_dir, + '{}.log.json'.format(runner.timestamp)) + + def _get_max_memory(self, runner): + mem = torch.cuda.max_memory_allocated() + mem_mb = torch.tensor([mem / (1024 * 1024)], + dtype=torch.int, + device=torch.device('cuda')) + if runner.world_size > 1: + dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX) + return mem_mb.item() + + def _log_info(self, log_dict, runner): + if runner.mode == 'train': + log_str = 'Epoch [{}][{}/{}]\tlr: {:.5f}, '.format( + log_dict['epoch'], log_dict['iter'], len(runner.data_loader), + log_dict['lr']) + if 'time' in log_dict.keys(): + self.time_sec_tot += (log_dict['time'] * self.interval) + time_sec_avg = self.time_sec_tot / ( + runner.iter - self.start_iter + 1) + eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1) + eta_str = str(datetime.timedelta(seconds=int(eta_sec))) + log_str += 'eta: {}, '.format(eta_str) + log_str += ('time: {:.3f}, data_time: {:.3f}, '.format( + log_dict['time'], log_dict['data_time'])) + log_str += 'memory: {}, '.format(log_dict['memory']) + else: + log_str = 'Epoch({}) [{}][{}]\t'.format(log_dict['mode'], + log_dict['epoch'] - 1, + log_dict['iter']) + log_items = [] + for name, val in log_dict.items(): + # TODO: resolve this hack + # these items have been in log_str + if name in [ + 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', + 'memory', 'epoch' + ]: + continue + if isinstance(val, float): + val = '{:.4f}'.format(val) + log_items.append('{}: {}'.format(name, val)) + log_str += ', '.join(log_items) + runner.logger.info(log_str) + + def _dump_log(self, log_dict, runner): + # dump log in json format + json_log = OrderedDict() + for k, v in log_dict.items(): + json_log[k] = self._round_float(v) + # only append log at last line + if runner.rank == 0: + with open(self.json_log_path, 'a+') as f: + mmcv.dump(json_log, f, file_format='json') + f.write('\n') + + def _round_float(self, items): + if isinstance(items, list): + return [self._round_float(item) for item in items] + elif isinstance(items, float): + return round(items, 5) + else: + return items + + def log(self, runner): + log_dict = OrderedDict() + # training mode if the output contains the key "time" + mode = 'train' if 'time' in runner.log_buffer.output else 'val' + log_dict['mode'] = mode + log_dict['epoch'] = runner.epoch + 1 + log_dict['iter'] = runner.inner_iter + 1 + # only record lr of the first param group + log_dict['lr'] = runner.current_lr()[0] + if runner.optimizer_arch is not None and (runner.rank == 0): + # os.system('df -h /dev/shm/') + detector = runner.model.module.module + # searching code can not be open sourced now. + if 'backbone' in runner.arch_name: + raise NotImplementedError + if 'neck' in runner.arch_name: + raise NotImplementedError + if 'head' in runner.arch_name: + raise NotImplementedError + + if mode == 'train': + log_dict['time'] = runner.log_buffer.output['time'] + log_dict['data_time'] = runner.log_buffer.output['data_time'] + # statistic memory + if torch.cuda.is_available(): + log_dict['memory'] = self._get_max_memory(runner) + for name, val in runner.log_buffer.output.items(): + if name in ['time', 'data_time']: + continue + log_dict[name] = val + + self._log_info(log_dict, runner) + self._dump_log(log_dict, runner) + if runner.rank == 0: + print() diff --git a/CDARTS_detection/mmcv/runner/hooks/lr_updater.py b/CDARTS_detection/mmcv/runner/hooks/lr_updater.py new file mode 100644 index 0000000..bd443e6 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/lr_updater.py @@ -0,0 +1,183 @@ +from __future__ import division +from math import cos, pi + +from .hook import Hook + + +class LrUpdaterHook(Hook): + + def __init__(self, + by_epoch=True, + warmup=None, + warmup_iters=0, + warmup_ratio=0.1, + **kwargs): + # validate the "warmup" argument + if warmup is not None: + if warmup not in ['constant', 'linear', 'exp']: + raise ValueError( + '"{}" is not a supported type for warming up, valid types' + ' are "constant" and "linear"'.format(warmup)) + if warmup is not None: + assert warmup_iters > 0, \ + '"warmup_iters" must be a positive integer' + assert 0 < warmup_ratio <= 1.0, \ + '"warmup_ratio" must be in range (0,1]' + + self.by_epoch = by_epoch + self.warmup = warmup + self.warmup_iters = warmup_iters + self.warmup_ratio = warmup_ratio + + self.base_lr = [] # initial lr for all param groups + self.regular_lr = [] # expected lr if no warming up is performed + + def _set_lr(self, runner, lr_groups): + for param_group, lr in zip(runner.optimizer.param_groups, lr_groups): + param_group['lr'] = lr + + def get_lr(self, runner, base_lr): + raise NotImplementedError + + def get_regular_lr(self, runner): + return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] + + def get_warmup_lr(self, cur_iters): + if self.warmup == 'constant': + warmup_lr = [_lr * self.warmup_ratio for _lr in self.regular_lr] + elif self.warmup == 'linear': + k = (1 - cur_iters / self.warmup_iters) * (1 - self.warmup_ratio) + warmup_lr = [_lr * (1 - k) for _lr in self.regular_lr] + elif self.warmup == 'exp': + k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) + warmup_lr = [_lr * k for _lr in self.regular_lr] + return warmup_lr + + def before_run(self, runner): + # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, + # it will be set according to the optimizer params + for group in runner.optimizer.param_groups: + group.setdefault('initial_lr', group['lr']) + self.base_lr = [ + group['initial_lr'] for group in runner.optimizer.param_groups + ] + + def before_train_epoch(self, runner): + if not self.by_epoch: + return + self.regular_lr = self.get_regular_lr(runner) + self._set_lr(runner, self.regular_lr) + + def before_train_iter(self, runner): + cur_iter = runner.iter + if not self.by_epoch: + self.regular_lr = self.get_regular_lr(runner) + if self.warmup is None or cur_iter >= self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + elif self.by_epoch: + if self.warmup is None or cur_iter > self.warmup_iters: + return + elif cur_iter == self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + + +class FixedLrUpdaterHook(LrUpdaterHook): + + def __init__(self, **kwargs): + super(FixedLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + return base_lr + + +class StepLrUpdaterHook(LrUpdaterHook): + + def __init__(self, step, gamma=0.1, **kwargs): + assert isinstance(step, (list, int)) + if isinstance(step, list): + for s in step: + assert isinstance(s, int) and s > 0 + elif isinstance(step, int): + assert step > 0 + else: + raise TypeError('"step" must be a list or integer') + self.step = step + self.gamma = gamma + super(StepLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + + if isinstance(self.step, int): + return base_lr * (self.gamma**(progress // self.step)) + + exp = len(self.step) + for i, s in enumerate(self.step): + if progress < s: + exp = i + break + return base_lr * self.gamma**exp + + +class ExpLrUpdaterHook(LrUpdaterHook): + + def __init__(self, gamma, **kwargs): + self.gamma = gamma + super(ExpLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * self.gamma**progress + + +class PolyLrUpdaterHook(LrUpdaterHook): + + def __init__(self, power=1., min_lr=0., **kwargs): + self.power = power + self.min_lr = min_lr + super(PolyLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + coeff = (1 - progress / max_progress)**self.power + return (base_lr - self.min_lr) * coeff + self.min_lr + + +class InvLrUpdaterHook(LrUpdaterHook): + + def __init__(self, gamma, power=1., **kwargs): + self.gamma = gamma + self.power = power + super(InvLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * (1 + self.gamma * progress)**(-self.power) + + +class CosineLrUpdaterHook(LrUpdaterHook): + + def __init__(self, target_lr=0.001, **kwargs): + self.target_lr = target_lr + super(CosineLrUpdaterHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + return self.target_lr + 0.5 * (base_lr - self.target_lr) * \ + (1 + cos(pi * (progress / max_progress))) diff --git a/CDARTS_detection/mmcv/runner/hooks/memory.py b/CDARTS_detection/mmcv/runner/hooks/memory.py new file mode 100644 index 0000000..6bd11d0 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/memory.py @@ -0,0 +1,23 @@ +import torch + +from .hook import Hook + + +class EmptyCacheHook(Hook): + + def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): + self._before_epoch = before_epoch + self._after_epoch = after_epoch + self._after_iter = after_iter + + def after_iter(self, runner): + if self._after_iter: + torch.cuda.empty_cache() + + def before_epoch(self, runner): + if self._before_epoch: + torch.cuda.empty_cache() + + def after_epoch(self, runner): + if self._after_epoch: + torch.cuda.empty_cache() diff --git a/CDARTS_detection/mmcv/runner/hooks/optimizer.py b/CDARTS_detection/mmcv/runner/hooks/optimizer.py new file mode 100644 index 0000000..b2ef579 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/optimizer.py @@ -0,0 +1,35 @@ +from torch.nn.utils import clip_grad + +from .hook import Hook + + +class OptimizerHook(Hook): + def __init__(self, grad_clip=None): + self.grad_clip = grad_clip + + def clip_grads(self, params): + clip_grad.clip_grad_norm_( + filter(lambda p: p.requires_grad, params), **self.grad_clip) + + def after_train_iter(self, runner): + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + if self.grad_clip is not None: + self.clip_grads(runner.model.parameters()) + runner.optimizer.step() + + +class OptimizerArchHook(Hook): + def __init__(self, grad_clip=None): + self.grad_clip = grad_clip + + def clip_grads(self, params): + clip_grad.clip_grad_norm_( + filter(lambda p: p.requires_grad, params), **self.grad_clip) + + def arch_after_train_iter(self, runner): + if runner.optimizer_arch is not None: + runner.optimizer_arch.zero_grad() + runner.outputs_arch['loss'].backward() + if runner.optimizer_arch is not None: + runner.optimizer_arch.step() \ No newline at end of file diff --git a/CDARTS_detection/mmcv/runner/hooks/sampler_seed.py b/CDARTS_detection/mmcv/runner/hooks/sampler_seed.py new file mode 100644 index 0000000..43cc228 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/hooks/sampler_seed.py @@ -0,0 +1,7 @@ +from .hook import Hook + + +class DistSamplerSeedHook(Hook): + + def before_epoch(self, runner): + runner.data_loader.sampler.set_epoch(runner.epoch) diff --git a/CDARTS_detection/mmcv/runner/log_buffer.py b/CDARTS_detection/mmcv/runner/log_buffer.py new file mode 100644 index 0000000..7cb8b3c --- /dev/null +++ b/CDARTS_detection/mmcv/runner/log_buffer.py @@ -0,0 +1,40 @@ +from collections import OrderedDict + +import numpy as np + + +class LogBuffer(object): + + def __init__(self): + self.val_history = OrderedDict() + self.n_history = OrderedDict() + self.output = OrderedDict() + self.ready = False + + def clear(self): + self.val_history.clear() + self.n_history.clear() + self.clear_output() + + def clear_output(self): + self.output.clear() + self.ready = False + + def update(self, vars, count=1): + assert isinstance(vars, dict) + for key, var in vars.items(): + if key not in self.val_history: + self.val_history[key] = [] + self.n_history[key] = [] + self.val_history[key].append(var) + self.n_history[key].append(count) + + def average(self, n=0): + """Average latest n values or all values""" + assert n >= 0 + for key in self.val_history: + values = np.array(self.val_history[key][-n:]) + nums = np.array(self.n_history[key][-n:]) + avg = np.sum(values * nums) / np.sum(nums) + self.output[key] = avg + self.ready = True diff --git a/CDARTS_detection/mmcv/runner/parallel_test.py b/CDARTS_detection/mmcv/runner/parallel_test.py new file mode 100644 index 0000000..1f5a3ec --- /dev/null +++ b/CDARTS_detection/mmcv/runner/parallel_test.py @@ -0,0 +1,74 @@ +import multiprocessing + +import torch + +import mmcv +from .checkpoint import load_checkpoint + + +def worker_func(model_cls, model_kwargs, checkpoint, dataset, data_func, + gpu_id, idx_queue, result_queue): + model = model_cls(**model_kwargs) + load_checkpoint(model, checkpoint, map_location='cpu') + torch.cuda.set_device(gpu_id) + model.cuda() + model.eval() + with torch.no_grad(): + while True: + idx = idx_queue.get() + data = dataset[idx] + result = model(**data_func(data, gpu_id)) + result_queue.put((idx, result)) + + +def parallel_test(model_cls, + model_kwargs, + checkpoint, + dataset, + data_func, + gpus, + workers_per_gpu=1): + """Parallel testing on multiple GPUs. + + Args: + model_cls (type): Model class type. + model_kwargs (dict): Arguments to init the model. + checkpoint (str): Checkpoint filepath. + dataset (:obj:`Dataset`): The dataset to be tested. + data_func (callable): The function that generates model inputs. + gpus (list[int]): GPU ids to be used. + workers_per_gpu (int): Number of processes on each GPU. It is possible + to run multiple workers on each GPU. + + Returns: + list: Test results. + """ + ctx = multiprocessing.get_context('spawn') + idx_queue = ctx.Queue() + result_queue = ctx.Queue() + num_workers = len(gpus) * workers_per_gpu + workers = [ + ctx.Process( + target=worker_func, + args=(model_cls, model_kwargs, checkpoint, dataset, data_func, + gpus[i % len(gpus)], idx_queue, result_queue)) + for i in range(num_workers) + ] + for w in workers: + w.daemon = True + w.start() + + for i in range(len(dataset)): + idx_queue.put(i) + + results = [None for _ in range(len(dataset))] + prog_bar = mmcv.ProgressBar(task_num=len(dataset)) + for _ in range(len(dataset)): + idx, res = result_queue.get() + results[idx] = res + prog_bar.update() + print('\n') + for worker in workers: + worker.terminate() + + return results diff --git a/CDARTS_detection/mmcv/runner/priority.py b/CDARTS_detection/mmcv/runner/priority.py new file mode 100644 index 0000000..caf1439 --- /dev/null +++ b/CDARTS_detection/mmcv/runner/priority.py @@ -0,0 +1,53 @@ +from enum import Enum + + +class Priority(Enum): + """Hook priority levels. + + +------------+------------+ + | Level | Value | + +============+============+ + | HIGHEST | 0 | + +------------+------------+ + | VERY_HIGH | 10 | + +------------+------------+ + | HIGH | 30 | + +------------+------------+ + | NORMAL | 50 | + +------------+------------+ + | LOW | 70 | + +------------+------------+ + | VERY_LOW | 90 | + +------------+------------+ + | LOWEST | 100 | + +------------+------------+ + """ + + HIGHEST = 0 + VERY_HIGH = 10 + HIGH = 30 + NORMAL = 50 + LOW = 70 + VERY_LOW = 90 + LOWEST = 100 + + +def get_priority(priority): + """Get priority value. + + Args: + priority (int or str or :obj:`Priority`): Priority. + + Returns: + int: The priority value. + """ + if isinstance(priority, int): + if priority < 0 or priority > 100: + raise ValueError('priority must be between 0 and 100') + return priority + elif isinstance(priority, Priority): + return priority.value + elif isinstance(priority, str): + return Priority[priority.upper()].value + else: + raise TypeError('priority must be an integer or Priority enum value') diff --git a/CDARTS_detection/mmcv/runner/runner.py b/CDARTS_detection/mmcv/runner/runner.py new file mode 100644 index 0000000..c070f5b --- /dev/null +++ b/CDARTS_detection/mmcv/runner/runner.py @@ -0,0 +1,426 @@ +import logging +import os +import os.path as osp +import time + +import math +import torch +import numpy as np +import mmcv +from . import hooks +from .checkpoint import load_checkpoint, save_checkpoint +from .hooks import (CheckpointHook, Hook, IterTimerHook, LrUpdaterHook, + OptimizerHook, OptimizerArchHook, lr_updater) +from .log_buffer import LogBuffer +from .priority import get_priority +from .utils import get_dist_info, get_host_info, get_time_str, obj_from_dict + + +class Runner(object): + """A training helper for PyTorch. + + Args: + model (:obj:`torch.nn.Module`): The model to be run. + batch_processor (callable): A callable method that process a data + batch. The interface of this method should be + `batch_processor(model, data, train_mode) -> dict` + optimizer (dict or :obj:`torch.optim.Optimizer`): If it is a dict, + runner will construct an optimizer according to it. + work_dir (str, optional): The working directory to save checkpoints + and logs. + log_level (int): Logging level. + logger (:obj:`logging.Logger`): Custom logger. If `None`, use the + default logger. + """ + + def __init__(self, + model, + batch_processor, + optimizer=None, + optimizer_arch=None, + work_dir=None, + log_level=logging.INFO, + logger=None, + arch_name=None): + assert callable(batch_processor) + self.model = model + self.arch_name = arch_name + if optimizer is not None: + self.optimizer = self.init_optimizer(optimizer) + else: + self.optimizer = None + if optimizer_arch is not None: + self.optimizer_arch = self.init_optimizer(optimizer_arch) + else: + self.optimizer_arch = None + self.batch_processor = batch_processor + + # create work_dir + if mmcv.is_str(work_dir): + self.work_dir = osp.abspath(work_dir) + mmcv.mkdir_or_exist(self.work_dir) + elif work_dir is None: + self.work_dir = None + else: + raise TypeError('"work_dir" must be a str or None') + + # get model name from the model class + if hasattr(self.model, 'module'): + self._model_name = self.model.module.__class__.__name__ + else: + self._model_name = self.model.__class__.__name__ + + self._rank, self._world_size = get_dist_info() + self.timestamp = get_time_str() + if logger is None: + self.logger = self.init_logger(work_dir, log_level) + else: + self.logger = logger + self.log_buffer = LogBuffer() + + self.mode = None + self._hooks = [] + self._epoch = 0 + self._iter = 0 + self._inner_iter = 0 + self._max_epochs = 0 + self._max_iters = 0 + + @property + def model_name(self): + """str: Name of the model, usually the module class name.""" + return self._model_name + + @property + def rank(self): + """int: Rank of current process. (distributed training)""" + return self._rank + + @property + def world_size(self): + """int: Number of processes participating in the job. + (distributed training)""" + return self._world_size + + @property + def hooks(self): + """list[:obj:`Hook`]: A list of registered hooks.""" + return self._hooks + + @property + def epoch(self): + """int: Current epoch.""" + return self._epoch + + @property + def iter(self): + """int: Current iteration.""" + return self._iter + + @property + def inner_iter(self): + """int: Iteration in an epoch.""" + return self._inner_iter + + @property + def max_epochs(self): + """int: Maximum training epochs.""" + return self._max_epochs + + @property + def max_iters(self): + """int: Maximum training iterations.""" + return self._max_iters + + def init_optimizer(self, optimizer): + """Init the optimizer. + + Args: + optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an + optimizer object or a dict used for constructing the optimizer. + + Returns: + :obj:`~torch.optim.Optimizer`: An optimizer object. + + Examples: + >>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9) + >>> type(runner.init_optimizer(optimizer)) + + """ + if isinstance(optimizer, dict): + optimizer = obj_from_dict(optimizer, torch.optim, + dict(params=self.model.parameters())) + elif not isinstance(optimizer, torch.optim.Optimizer): + raise TypeError( + 'optimizer must be either an Optimizer object or a dict, ' + 'but got {}'.format(type(optimizer))) + return optimizer + + def _add_file_handler(self, + logger, + filename=None, + mode='w', + level=logging.INFO): + # TODO: move this method out of runner + file_handler = logging.FileHandler(filename, mode) + file_handler.setFormatter( + logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) + file_handler.setLevel(level) + logger.addHandler(file_handler) + return logger + + def init_logger(self, log_dir=None, level=logging.INFO): + """Init the logger. + + Args: + log_dir(str, optional): Log file directory. If not specified, no + log file will be used. + level (int or str): See the built-in python logging module. + + Returns: + :obj:`~logging.Logger`: Python logger. + """ + logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s', level=level) + logger = logging.getLogger(__name__) + if log_dir and self.rank == 0: + filename = '{}.log'.format(self.timestamp) + log_file = osp.join(log_dir, filename) + self._add_file_handler(logger, log_file, level=level) + return logger + + def current_lr(self): + """Get current learning rates. + + Returns: + list: Current learning rate of all param groups. + """ + if self.optimizer is None: + raise RuntimeError( + 'lr is not applicable because optimizer does not exist.') + return [group['lr'] for group in self.optimizer.param_groups] + + def register_hook(self, hook, priority='NORMAL'): + """Register a hook into the hook list. + + Args: + hook (:obj:`Hook`): The hook to be registered. + priority (int or str or :obj:`Priority`): Hook priority. + Lower value means higher priority. + """ + assert isinstance(hook, Hook) + if hasattr(hook, 'priority'): + raise ValueError('"priority" is a reserved attribute for hooks') + priority = get_priority(priority) + hook.priority = priority + # insert the hook to a sorted list + inserted = False + for i in range(len(self._hooks) - 1, -1, -1): + if priority >= self._hooks[i].priority: + self._hooks.insert(i + 1, hook) + inserted = True + break + if not inserted: + self._hooks.insert(0, hook) + + def build_hook(self, args, hook_type=None): + if isinstance(args, Hook): + return args + elif isinstance(args, dict): + assert issubclass(hook_type, Hook) + return hook_type(**args) + else: + raise TypeError('"args" must be either a Hook object' + ' or dict, not {}'.format(type(args))) + + def call_hook(self, fn_name): + for hook in self._hooks: + getattr(hook, fn_name)(self) + + def load_checkpoint(self, filename, map_location='cpu', strict=False): + self.logger.info('load checkpoint from %s', filename) + return load_checkpoint(self.model, filename, map_location, strict, + self.logger) + + def save_checkpoint(self, + out_dir, + filename_tmpl='epoch_{}.pth', + save_optimizer=True, + meta=None): + if meta is None: + meta = dict(epoch=self.epoch + 1, iter=self.iter) + else: + meta.update(epoch=self.epoch + 1, iter=self.iter) + + filename = filename_tmpl.format(self.epoch + 1) + filepath = osp.join(out_dir, filename) + linkpath = osp.join(out_dir, 'latest.pth') + optimizer = self.optimizer if save_optimizer else None + save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) + # use relative symlink + mmcv.symlink(filename, linkpath) + + def train(self, data_loader, data_loader_arch, **kwargs): + self.model.train() + self.mode = 'train' + self.data_loader = data_loader + self._max_iters = self._max_epochs * len(data_loader) + self.call_hook('before_train_epoch') + + for i, data_batch in enumerate(data_loader): + self._inner_iter = i + self.call_hook('before_train_iter') + + outputs = self.batch_processor( + self.model, data_batch, train_mode=True, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('batch_processor() must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], + outputs['num_samples']) + self.outputs = outputs + self.call_hook('after_train_iter') + + self._iter += 1 + + self.call_hook('after_train_epoch') + self._epoch += 1 + + def val(self, data_loader, data_loader_arch, **kwargs): + self.model.eval() + self.mode = 'val' + self.data_loader = data_loader + self.call_hook('before_val_epoch') + + for i, data_batch in enumerate(data_loader): + self._inner_iter = i + self.call_hook('before_val_iter') + with torch.no_grad(): + outputs = self.batch_processor( + self.model, data_batch, train_mode=False, **kwargs) + if not isinstance(outputs, dict): + raise TypeError('batch_processor() must return a dict') + if 'log_vars' in outputs: + self.log_buffer.update(outputs['log_vars'], + outputs['num_samples']) + self.outputs = outputs + self.call_hook('after_val_iter') + + self.call_hook('after_val_epoch') + + def resume(self, checkpoint, resume_optimizer=True, + map_location='default'): + if map_location == 'default': + device_id = torch.cuda.current_device() + checkpoint = self.load_checkpoint( + checkpoint, + map_location=lambda storage, loc: storage.cuda(device_id)) + else: + checkpoint = self.load_checkpoint( + checkpoint, map_location=map_location) + + self._epoch = checkpoint['meta']['epoch'] + self._iter = checkpoint['meta']['iter'] + if 'optimizer' in checkpoint and resume_optimizer: + self.optimizer.load_state_dict(checkpoint['optimizer']) + + self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) + + def run(self, data_loaders, data_loaders_arch, workflow, max_epochs, **kwargs): + """Start running. + + Args: + data_loaders (list[:obj:`DataLoader`]): Dataloaders for training + and validation. + workflow (list[tuple]): A list of (phase, epochs) to specify the + running order and epochs. E.g, [('train', 2), ('val', 1)] means + running 2 epochs for training and 1 epoch for validation, + iteratively. + max_epochs (int): Total training epochs. + """ + assert isinstance(data_loaders, list) + assert mmcv.is_list_of(workflow, tuple) + assert len(data_loaders) == len(workflow) + + self._max_epochs = max_epochs + work_dir = self.work_dir if self.work_dir is not None else 'NONE' + self.logger.info('Start running, host: %s, work_dir: %s', + get_host_info(), work_dir) + self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs) + self.call_hook('before_run') + + while self.epoch < max_epochs: + for i, flow in enumerate(workflow): + mode, epochs = flow + if isinstance(mode, str): # self.train() + if not hasattr(self, mode): + raise ValueError( + 'runner has no method named "{}" to run an epoch'. + format(mode)) + epoch_runner = getattr(self, mode) + elif callable(mode): # custom train() + epoch_runner = mode + else: + raise TypeError('mode in workflow must be a str or ' + 'callable function, not {}'.format( + type(mode))) + for _ in range(epochs): + if mode == 'train' and self.epoch >= max_epochs: + return + if data_loaders_arch is not None: + epoch_runner(data_loaders[i], data_loaders_arch[i], **kwargs) + else: + epoch_runner(data_loaders[i], None, **kwargs) + + time.sleep(1) # wait for some hooks like loggers to finish + self.call_hook('after_run') + + def register_lr_hooks(self, lr_config): + if isinstance(lr_config, LrUpdaterHook): + self.register_hook(lr_config) + elif isinstance(lr_config, dict): + assert 'policy' in lr_config + # from .hooks import lr_updater + hook_name = lr_config['policy'].title() + 'LrUpdaterHook' + if not hasattr(lr_updater, hook_name): + raise ValueError('"{}" does not exist'.format(hook_name)) + hook_cls = getattr(lr_updater, hook_name) + self.register_hook(hook_cls(**lr_config)) + else: + raise TypeError('"lr_config" must be either a LrUpdaterHook object' + ' or dict, not {}'.format(type(lr_config))) + + def register_logger_hooks(self, log_config): + log_interval = log_config['interval'] + for info in log_config['hooks']: + logger_hook = obj_from_dict( + info, hooks, default_args=dict(interval=log_interval)) + self.register_hook(logger_hook, priority='VERY_LOW') + + def register_training_hooks(self, + lr_config, + optimizer_config=None, + optimizer_arch_config=None, + checkpoint_config=None, + log_config=None): + """Register default hooks for training. + + Default hooks include: + + - LrUpdaterHook + - OptimizerStepperHook + - CheckpointSaverHook + - IterTimerHook + - LoggerHook(s) + """ + if optimizer_config is None: + optimizer_config = {} + if checkpoint_config is None: + checkpoint_config = {} + self.register_lr_hooks(lr_config) + self.register_hook(self.build_hook(optimizer_config, OptimizerHook)) + self.register_hook(self.build_hook(optimizer_arch_config, OptimizerArchHook)) + self.register_hook(self.build_hook(checkpoint_config, CheckpointHook)) + self.register_hook(IterTimerHook()) + if log_config is not None: + self.register_logger_hooks(log_config) diff --git a/CDARTS_detection/mmcv/runner/utils.py b/CDARTS_detection/mmcv/runner/utils.py new file mode 100644 index 0000000..710f7eb --- /dev/null +++ b/CDARTS_detection/mmcv/runner/utils.py @@ -0,0 +1,78 @@ +import functools +import sys +import time +from getpass import getuser +from socket import gethostname + +import torch +import torch.distributed as dist + +import mmcv + + +def get_host_info(): + return '{}@{}'.format(getuser(), gethostname()) + + +def get_dist_info(): + if torch.__version__ < '1.0': + initialized = dist._initialized + else: + initialized = dist.is_initialized() + if initialized: + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + return rank, world_size + + +def master_only(func): + + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper + + +def get_time_str(): + return time.strftime('%Y%m%d_%H%M%S', time.localtime()) + + +def obj_from_dict(info, parent=None, default_args=None): + """Initialize an object from dict. + + The dict must contain the key "type", which indicates the object type, it + can be either a string or type, such as "list" or ``list``. Remaining + fields are treated as the arguments for constructing the object. + + Args: + info (dict): Object types and arguments. + parent (:class:`module`): Module which may containing expected object + classes. + default_args (dict, optional): Default arguments for initializing the + object. + + Returns: + any type: Object built from the dict. + """ + assert isinstance(info, dict) and 'type' in info + assert isinstance(default_args, dict) or default_args is None + args = info.copy() + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + if parent is not None: + obj_type = getattr(parent, obj_type) + else: + obj_type = sys.modules[obj_type] + elif not isinstance(obj_type, type): + raise TypeError('type must be a str or valid type, but got {}'.format( + type(obj_type))) + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + return obj_type(**args) diff --git a/CDARTS_detection/mmcv/utils/__init__.py b/CDARTS_detection/mmcv/utils/__init__.py new file mode 100644 index 0000000..b540b7c --- /dev/null +++ b/CDARTS_detection/mmcv/utils/__init__.py @@ -0,0 +1,17 @@ +from .config import ConfigDict, Config +from .misc import (is_str, iter_cast, list_cast, tuple_cast, is_seq_of, + is_list_of, is_tuple_of, slice_list, concat_list, + check_prerequisites, requires_package, requires_executable) +from .path import (is_filepath, fopen, check_file_exist, mkdir_or_exist, + symlink, scandir, FileNotFoundError) +from .progressbar import ProgressBar, track_progress, track_parallel_progress +from .timer import Timer, TimerError, check_time + +__all__ = [ + 'ConfigDict', 'Config', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', + 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', + 'check_prerequisites', 'requires_package', 'requires_executable', + 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink', + 'scandir', 'FileNotFoundError', 'ProgressBar', 'track_progress', + 'track_parallel_progress', 'Timer', 'TimerError', 'check_time' +] diff --git a/CDARTS_detection/mmcv/utils/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c442ef92e43a50d88c60c3a033524cb23cddd336 GIT binary patch literal 1060 zcmbu8Id9uA6vrhWk}df@<2Yy0#Y+veONU{=~94e zr+$S@<&)F2$W)*L_|v;c{`E*bYc_+|x2yPj*>Qe2e?3!czp+Dovj7KN;h@W0EU}VH zeo2(E!YWv0RrHvLKJ&50YFKA=3|N363$ei(*kny?u@<&jyO>uN9qh6$_E-=5td9dW zz#$vrh>dW}#zjp<9N>ga@Q@wilue6RRUF|lJH``sf~V}%#ys$$26YG^ga$OB1#Rd+ z7kbc#0SsXTV>o~b9KsZi;22Ke6wa1Yb_VC4oY|wFuKloC$pr86Y>_VCr-?S+-kvp# zm#Ro2lO=i^sj|B%(MNjPLwd#zJs$pRn_)pHt-01jVaZ%mjN)ni3`3b!jh~BP)a|!6W#D z8lg@I2qB?CXcAh4Hlai45_*I_VL%uXMuaipfG{B(66jLPBa6$~x$*2l*n|J0$lm5u zCI6$#ROl%FK^+T!xAH2M52TuE(WnQCjUPp}*HQG~Uogxi<%RgV0rWwzXH<7}Sn zAXam=$<-VYxaNsY^UT`Gm-$wwLd~Nn%~BmjPc}PKdx1G@1%E~Vf}#d?+~?)TrGYyr MH{8JW?5MfF0M_Lh`v3p{ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/utils/__pycache__/config.cpython-36.pyc b/CDARTS_detection/mmcv/utils/__pycache__/config.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aef28b7614f8e51f36a95ec6eb021698a999494 GIT binary patch literal 5667 zcmcIo&5s;M74Pb=`P|v@+Hvf}i8`Ao>q(qhJIMvdHbk+BOwi_&IGJE|dZv1Id$y;0 zQdOJmjAuZ~F600XLLek0#EA=6PW&x>LP8vT;mBbn6n?LIruPFUz=0WcRdv;?s#ovh z_g?i&OH0*nA8ZBxIAa)pHIDpjw7-K#PEl}!v)CB0j;UK#$3kt!_R#4#j7F{49hN#J z-L~WMu+piZ?Ql1)4r`s7ZkJ*=oV{zbnh(H%vC(2#<5jV_GYk{;Rv=_3(6bZ`N2yT$Fy%XOhz|Q^)Kd=_Cg5ec zm&S3}Q&E~oKj`+*)$DDBy=}iA#i1YWMpE@?@x{-YH{>&Dghq#P14m}u+&4Pb7e=3P zi`)C3|Xn;LuOz@!7r z%sWQ%ecMphc2!(s2K|-I*}$0^19wmYjR`92LnC<(RAo?o%#4}Mj7fEB&#XJfhh#12 z`H}sBk+>fiQ}bP8YRv^>ZcLt9yEj|&((VA~^R@!%C8-xEC8F+*3OyQZRWe8Jj6xyG z;E>L`87d<2eNmyF#;Xf@E#J)Tr(zhWtm6BA$+${P^?dE4RGyEpUFzDH}J*=l4?q{5-hsuA2z zsvzlwnI)CT>H+6I*3kHsSy7@gryr-VOS_wNLHRxNkLl6%C}W0f^HnnnHUMo z6J&G%9#=CVGOO3$#ES^~5v+APW1k*cOKz^uW5SboBozkCvo>w(p1#NFc?{4vz0H1@7Ql}}`D4rsyO z0C2B*{h$}cks5n&crQmoFG%>JNtiF&)vAZzZRG(&D0D~+4~J>u(J;*fR7elNlP@5z zdZV!d3a<|&ig7TE+unOyp$A2T0y~C!y-0crof4oza|4gGxqva^A&4b_ERNI9Fxu;d za2IlV84E z^lDz#iq|<&@xGeN*86A_sI9Bs|GN9#tKK04tM}GuZ!J$K>BYO3Jl*jk1blBTKzVsK z?|KO@?*WSDuP@^@XLPoj+ij0viGBnRJ8??=kUH zj3P_}ERkgW#5`}zkj+flwkvKbmjft2r}-tKn3<|P0OWFKiok#nU(>ECRqiw)hr=CpWFWBG%2B$I39Udfc^8lLP((QW-E#X%k6nqw`t(KLU?aMj~5NEN*2`oLRULzj^W{hs!p?zpW z*L2zB>D*)rb8laZ(;nRZM*DEaf!!Vt;{&HnID(2WW%9zCNjwJ1tYb(6jE0&#q>vEi zKm`#b>1R)Y+w#Ia9{lsGufA$o;#JbA`c;*lWtL*yFE%+`yz2xp;Cf`pt|qjW?`6o~@K$l{?iOUaIh!61*%CGTI_C%yp*38YNIBDi}9>YjoAhfr_c%;*qR!PtW zKs<{gYib7kZP-#m`G^$n&=@I7yhQ~i0LX>kM%8J6SVckX=UjKRlF0hRI+3=iC^A!> zYh(>N2rZmX>48L2x@_2_qXslZb(KvXnQP|+J=quiNIjH~)4Ti~iYb6=yDXXt+3SoY z7m%3(u=Y%ON#Tk%u#`<8MSlR914YoSrNjYcx}RX|u-D3gZE6A*D^nBjXa3^C%sN8h z0NSd1Sc4gN032@{?-;nTtQ@7_$Z6@u3niB}CQmO6*c zOAZ-}FF20&&)ieV@idF#BeW2ZwYAr%_aP~SCi*o7wXYb!LRi3|dqDFrFVF2mfn%Qg(y%roU$C>|<*|=Mw6IEIjGjCIE zRd<*4*SRgXHGj{DFFJ;(J*deqOoRUPH={NGnGsu`8rll&xK!F`3F;-_&x#aHYF^+M z_=~uo;TQQ!xS!>h_{+GTqGexj~M5f`cO(;$iQ>_56X zNP`g7f%4b;yID0*ZH#-81h!BZmUL12qy_yPToa9cjhCrL>U3&zbI<(RM4g#BdzShU zYAjt-*{W{Mjm(j;XN~QV#q9^K{9t6?F-F$N-h(kYo|}#r!elo*l`;{ENW`gPNrwy) zq(XX|pY6T8-g%U3S%mexf&RPsJJ^7hRhh?C!+O`7rB~#$2V^ zkgP%Dq3MT0r9teGor)=FO!`TY4rDyjdW|K0YUhRM_i=}*zVL%iAo!YI*JO~=!WM#W z2VuxI#bja{(NE$>LZ*TWdAZ9RN>wY1JEn>1sOyfA>k5cF2lmE;rI2|o$p{bUQ(_BQ>@ zEp4C8-$lcZq)cSyP#;N-)T%_uBcU=o^7>5&zDE(DGPj&ATbQwRs}4i9OFz(#o)>d8 z^*nuO`|zAIcvZ_NjC$R4&6??2OO|UomUe1iJ2gJiy@E@J<`N#rk>R!`H@_iQw1)8y z_Ct?_0jvj4eaa(Q-;-(Jg)Bm#C!E@BbOy1%@#RF|FLN(!nv$e$=5l5Z4T%>LO`f2F zLVH$YYM?c~iVu0PR^%x>KZB0RGw7HO`V@cvPbDjQ%W6K=d@k(lqtkeb`T}qv=%@Gg?H3o3UQg3h|@y>;j@u6{lW$d8d z8JVbmi|QY~b+5#w*9ez-!hgWF3&;*~tY%6)90*|ak(N0CNNC9#cZ%i(8clj5@)3PL zO(FOcm&6Xh1jsN0H|Y6MQLDpGPh1KCuZq;PJBFX>c>t&;V@yRrTpoumn@JLGu-WZu z(hYE>u^^xI*3G-kik!zPWbU0pDvb`3_P=cDx>A5TAcO{kW(9khc$h67&@*#Ur{!+d z@u6M5We%zLR3Olnh4xA3eaxUvqc9d+3VYK*_|pXtFy%$mKSOAIcJy*TPo$UfO(XpR zWP#h0NOC2A28lA{c`6PCIyU~cj`gH-51-3Oe+kdepib*(b)GAzzh56(0O)qCJ`2z% zkLsk;o>A!1tU?si1nfGdALBuD3X06rDWaP=Mchbj#44BuScUn(mhYw&Zf#X(Yxgw! z=o(`%j|TQ&e%c7sPEW}%3?Y3~@GqL-I7G~(x*wf3?c z5)@51g9sQ&H83&l^D{ll04io8P!OKqW7srxv*QArhS*@&r#qOQuINJKWccc85Oc_& zW_N!rAm)G|qAR2t{!9%bnv}!?UE%qFx!Q~;La{hWS>&aDZ-ad`W|!YXDTyFA=W~3Z zIFOV&01^gKW}|Ob`;8Y43f@rdBoh6ziuB^H$f{nyFJcY^nG0fV!Indlw_M^0BwuC+ zd?z!5rvgfwR5$5jIz^sp)CeV(O$&Ijf{V%vc$W=)JDShuq=DG{$oLjMqZ$35ZzQqr zr2@=x;w5Aih?8?xkAcQ_v5`{Mv-v|gPVN_xkX`Z%%_N^4rZwc|PwXZc4IE>{Kxn#LV^)Ggv<3iS@+pIQbIpnPYQAk!0WU z(M-OlQ_2XPuiIiLZ#91jFBt3KG)_j(a2 zI`7TqkdhNr$QLmZvZJ}eMKXu-hs-S(G+UhY1eJ%Z&WY;Vk}#6Dsly*JO3sZeOCua{ zB4SAA%$hZPTMynoFw*+FVFE8z*VbvAgKy=iO)HzH(F^RLh6;N>QFXaNa@+DPdj2^T z?@)!Af7Ajx)cy~sRTSBVb#ZBU`M(OH6>K_sw3)%*!vOiG!&6E<*Qp~0{{3X?lglge z8r~)zOB)mIMK`F)>nL{XlpvC;;6UoMgHs$FNU`ET3eJLqC@_S=Wx#j9Z z9CG^`i#uRN7kLG8m9kZD4c+Zt0Aldf_%05ODyk>KQ18Px>5vH|rY{q0_9}A-qO>2Q zo$W{bIHARk@CPZu#a|(YBKm5;t&nlw#M99NPY=V8Zxgk$y!(exdls#AUfthC3LPV( z)+PkY0B9H6#RRlJ+VOy8VtNX6?bxtN-V1#Rut7~sU6-DLYm0dn;u)m{GzS%FzU*h?PF6!`av-|KGUGI#FMApLtm|V$ZJRn@8D;ocS1gotoHdD$PG8CYzVP(p_;-jDNXCFXuCGsmWoP z`~~V{lL`uF@G*@VVX&!DL?4zNI9*$((={7~&a@>ZzWSV2pK<9!AW26gd4frqqNMCf zlUYsHrXHYURIjO)a>f&M8mh6=eK$YXzDB61)=?n6)_--&bu54r{%p5WJMZFGJ4dB^ P&OK3k#hrKO+~xlO2KPZ5 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/utils/__pycache__/path.cpython-36.pyc b/CDARTS_detection/mmcv/utils/__pycache__/path.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f3a370b29e6331f71b9ec12377e2505eb5b269e GIT binary patch literal 2193 zcmZuyU2oeq6eTIyk{#!>UtZP^BP%d?SmUitfpr+xq1}SD8`1&`Hq5|axT2iMwj__F z+jw(cn!dI_q3`=keBD$2LSMFXDcjA0$>!zd6-AxHb9r@VdAWN3-C_6Bkg>nnnahL! z4Vr!nBbnqw)-HP97j4f){I-u?3`=RbUFOBzpdCn0`ol1-v@4v&e!nU|7;yDCUTW7e zKd!ZFvMhsFti3ElSwX)dH|3J7z2fav`Jr5qtMJz3Rk8^wY@7lk+iP;`Kkb^sQ;rb^d})|2n11QJmhP<%GE0TcXe8$vGwQiRzv2oZe+P> z#7BuX5p5Y=)Gs8sih&`FBtjjydEX=dgumnqyw=P0i1ehj?fNe4UT${taVEc0Dp$7L z%T?MnO;J$Dx@l~~R3CIqIvSpbzZMzdb#%5GAC0=19P3zFPbWv#d;04gD{>8%-9ao9rEMwE=xGbkb!ZFR z66e{kqyxI2tAhJSSYEHdkcvL|m-&XkE>L_wBAMu-%_08>G`$T&tosn_1URQcZDQ=v zh|?XP`1i!rdxP;@#FDy!=d4djZiAsCiVRb6nm> zOsojuIVS33#0bmmxVRLg*h05C>WC(N4|DW@aFZ6_vfp-&aaj7|gz3%SYw)~dFpz8z zs?Vn0&nOg#df-AF;T4fz&hrU$1Yr_ogV`arevy4~CiRQN9D!$2a-m)@#6bpImz8P*FS33=Pcn52F3Kdyp;~1A zkJ{21YWFc|mUnBYbYf162rp<+r63U>(&Ps)4st~`Q1`guM)W|Y#xV9IfWPE)z@$GY zsU3qO2Qnz*CYXD(1NX^QkEMferU+ISd$O`l!e cfT7$Cc};|(hB8wYz83~zP%TxLs%5n5zjKP$umAu6 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/utils/__pycache__/progressbar.cpython-36.pyc b/CDARTS_detection/mmcv/utils/__pycache__/progressbar.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..492c0109e9712c6e711d56966493672d39ad597c GIT binary patch literal 5359 zcmdT|Pj4H?72khck}Ham71gnm)Sc=Oh=?pXw$r3)8ryA>B5ffAwtxUkoz-e*Brdt! zWoDL}clg%zHEM&+om_ zoz+$E-iuD~__|?yXDmD}`uFjwuh4LVo3YWRzgBGat+r(v+~!Vf_no$*`<1xTciXOM z{KnueuRb$)Ra7T-yT-5ZI`^KL?Ky?NZay+vo6jM`IB1!L^Ld)a z&rOUlSPRoQ7GV~piSmQv5Twds)E6?O>9fa1=RRI_8;vm9CO6s^H`_LE@(On$#Nkz5 zduFsNdLu^L)!Q)IRlW-0YsJdvGCh?-sb2+h^v(k|m=4)-AlX?b3Og(e5|E=LV_8Sc zCrH}pbCP=)I%w&s;I;7n8n5~pn%v9{-^h)wIWfjgZsktTlyBr#f*!tLt7z@qmbWp- znHbQffo)j_ntQ8K)UrVJ{AAcKHvChO`TgLDkDd8v5zjhBeW4qs{Xr};!HbH@0-0g4 zqeOfnYuJbMsA-@nR+m`()sSnTzPP{FN&Dh9+T`@$2(nE?PC2GYT zqmw~pyw-E%HEwY`w)*ziG>xR8M@~7)q0~QEV{2?@mCVg8?ra(p6Mrk$jEMzxJKXIV z@~f=MtKC{|P0R^+aF@P7$@PQh2iO_}Nkam=>Q$E_U(D+Efg5RMyh0VhJ6XxEvqJxL`y78%BKcOrNDCQ`#7 zjz$4_ean&?v^{EC^}<$9RnhoHNQH0E^hsK{sza8LuVF@e>o^E|gH&c}wMf-E%Vk7+kt|C`SlxEbNuPJ@Z^ktJilADyxI`!&kHt{NI4AV24Rs-!- zvu3(hjVyj+$>J|L@7(s}19RK|76XI=AP2VJ&djdWwYk-GxD9G$Q-7PllH?21uMUlL z-=zS`suRreYdVlBWgv~-`tp4InBCetAMY>`2LmPeKD(uM*e8b%_FtH6bbTH^pfAm7 z)yoL_5q1WZXFMH3e`k_B;Z4wVXcd){c&Iv({J5xOgi|fERO%(YQDAK0okoZUoo#B( zjjk-}ONa1WMdW{^a2+21KqrUBNJ;MlZc2RVT0j_}Nf(&&rZKkAgW{mXzYr}0+H~y+ zeL=9~CUD2T2K(jqo51qd5jJB7bffF$HU-hR!krxI<|PdQwQ0O=jNJ@@)U9*3>*X%- zC9b)pIchGsGjX+X*mPzUGf6(lEN55~?1Rh%PAhZ7s~yOBFrMOoc>a0sh53>EF?yrB ztJ;3&V{O3m{oQv@#*C104&b|EZB6(@Ot6`7vNm0AqltSPtmkj zRFs6m*EZ0`C-bI3CXpwnP)gn)Z&33SYP2IT^jcMI^`bi6RogxpsJ1si-V#X`oQlF0 zSkYin^t|mW*-E$3UF8*W-P}d? zaDy`d*P#UJE|mo=J<*=2n0O*W;B%TF17sZ*%%Pr9#`HXf=MX-uSRe)CX(DD5RYa+r z0i18fX*!tBeIQTOzFtKuPlidzwnLLoHDb8KkR~33?E_X$n4Kz>a|X1HZOvr|BC;T6s8d=O z0_MdD+Xgf6`htoUQqW+b$eNz+p~!|ZSvVkxEl))v7)rEZjO3=JKc^zAbZhdf(J#03 zHnv!Qh{Mpy6Gr$Mv7sU%5F~CNMnx#^ouT>tpFuhi@@FGM#LLQM11Se$_b}W*j z`9LWlNvoeonMz5is;JHMq#>cGA3hz3@-umS;X?Bw;bpumLj@wxAu~Y+>A%b#ZtpCB zbxW+Nk#?xaE2`5&w;h_h5O@@GMKdGk^Jlue2b!YHVl)XKHq8yBV$VcUwk+U;2ZZoI zYv3n;j#=}FBn{5<@5dM*l8jzWt|N|+3LY7#sA#-JV??O+2-y~(j~G&SGn*q;bgbeV z;>vzBD`7`(Y*9f-2U{>qsq&U1wFz%RoZO{`3h9>JUcqrfRO7njU7__6RYXD-wHdR^ zdZ_ENbAY=;03M2p6$3}7pJ#b!<%ghrdKgj$rH8aPKk!G0VhtGoJp|SNEd@SxkvdFigrP5CQXc2 zF2+v|^CrOT_v5vEwcE(oG+0&tY|0J5;3}2IBm*V!_g{aTytPELXjTr0(}4Fadd;#2 z=v(WqcdtyL&+Bdw9dcjOf9VqC1`namABMs~AOTS#*4XoEkO&+*9!7C?GfLP#64d@t zACVtT9V1Fkk64+usQ4bDk|UrbNFl#rbR6E;esqCRPC8RzU$U=nS^ejXg+diON1g$J&mxtm6 zmlY(E{V)!cddVicq|T!`J7P&z!NoDaU2$0sA;N_a4Yf!PuO@-GE=hSwLf9E5J-WWl z&i?9*U*7JkrZ74$J*pQC{F6u`3vC}mBb4+y0{=G9x}&eUSSV+xV3y{~^jz9z>2zlu zTpfkNg#vfNF$@GBIHN;3B+E-GeE(-zUhaTfYg1mNoIPTHiyF!X z@?C0lg-^LiCkF7ML3_t3N}Ixgit z5q?1pRm??w?zL_2!iU=4g_+9Qz#(`mrM^##KcL0-DasVLtQ9UN$JeJt!W4$*)k2Jg z>LWD9)(=$+4O9%9s1zLh)~)i^;i9T=bX8&LDuNQMqY4rSj?CRoV!Z literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/utils/__pycache__/timer.cpython-36.pyc b/CDARTS_detection/mmcv/utils/__pycache__/timer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdf2d62c2c6af6d9bc3394745123c00294d166c8 GIT binary patch literal 3491 zcmcInOK;mo5ayDUM9GghiIJvHY@e2pLbmfL3L$Y~1a8qojiUBRJuozPCDEoxb$6A- zs&c6U+{pJOZ62rB?*So_p^rY4^uIYalGvN zKaYbrpCMt{N?}?kpD0~OB%EFm5hVB$~aBEF8#1izLSQabL*Tt&10)0cQiL1~r2?y}Lo>l0K!4IH)m-k}% zJUWUcryn`t+HN#BJe|Gg!EhAIPX4*Gv%{m|C{>0dfV5=cH^jI6?O9|79Diw(*k-F! zPNC#axsHb8I53h2JWPe;M{ydSa5D%De-_0tSL1~DQpLCRxs9l}tz#)i&FvP=Y`3?z zw#qdZ^^R1O7~jAt9&AKlcW@##|E1I3?!DkzhG`=7#{XwqsS%7kQ&*abM^XI2Ih4*# z7U$e*-^)*mSkrl$w6(3HB$U23filfjz691TfUyA98-zeI89yAz@Z@qyZ%{k;!Qp<0 zv5OD(7~hZK)t^Blhy$j!TAkX7Er1DpHf4%Wtz&xz1V(J&?0++1T+@O)86E#$)B#h<#7O1)Tc|CkIgd@QoIy~`hBO09ut(1k z1|Pw|APVdlymjo%tXa<6=t-CSW$%B}s$>;%4Y?qzLfQ@klda|PIS;KI7MEawJLhx+ zZ2vTf$1CA2v06J5P28q39f*B`ho}Z$#YLhXCIy>aWgcEAOF7aMub12T z$(fz22JY7x`xS0Uve!Y(;dO7KtL;hmFf~C;0SEjE$v-z50H*n=wxQnu3@ zCt>Hwf}Kw|+%xBXp2zGH1QxC+q(%@|;*~;?5@1=epx~|1E~4%{+AW(B2Cn|Gx{w{} z9l+^3L>a3J%R`ix5$79=IL{B$vLUpW6;za#LMBlw&_GoCA`PKdLB`9xk;1c?Q*|9z ztYh;&HpH<=2&8qv5+XP%Yp%d73fIVg-c8qTRF^BB3x74Qp>VczH_|Mg^D(?Scek-l zYU&0iCsNLRxGoN8^e$Zw2Tc15(Wle#3PPXLrQ#7wC zaE6LCigOqpqPcA!+6UYb8~;fI3nJ?;jspDZH275->T&t xUf+8j$`R&k{+~rPJvxSe3vx9d!%AAdAioUi%)6hL`#WeQsaST_YHQ!S&OfkvJ(>Uj literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/utils/config.py b/CDARTS_detection/mmcv/utils/config.py new file mode 100644 index 0000000..9de6700 --- /dev/null +++ b/CDARTS_detection/mmcv/utils/config.py @@ -0,0 +1,159 @@ +import os.path as osp +import sys +from argparse import ArgumentParser +from importlib import import_module + +from addict import Dict + +from .misc import collections_abc +from .path import check_file_exist + + +class ConfigDict(Dict): + + def __missing__(self, name): + raise KeyError(name) + + def __getattr__(self, name): + try: + value = super(ConfigDict, self).__getattr__(name) + except KeyError: + ex = AttributeError("'{}' object has no attribute '{}'".format( + self.__class__.__name__, name)) + except Exception as e: + ex = e + else: + return value + raise ex + + +def add_args(parser, cfg, prefix=''): + for k, v in cfg.items(): + if isinstance(v, str): + parser.add_argument('--' + prefix + k) + elif isinstance(v, int): + parser.add_argument('--' + prefix + k, type=int) + elif isinstance(v, float): + parser.add_argument('--' + prefix + k, type=float) + elif isinstance(v, bool): + parser.add_argument('--' + prefix + k, action='store_true') + elif isinstance(v, dict): + add_args(parser, v, k + '.') + elif isinstance(v, collections_abc.Iterable): + parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+') + else: + print('connot parse key {} of type {}'.format(prefix + k, type(v))) + return parser + + +class Config(object): + """A facility for config and config files. + + It supports common file formats as configs: python/json/yaml. The interface + is the same as a dict object and also allows access config values as + attributes. + + Example: + >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1]))) + >>> cfg.a + 1 + >>> cfg.b + {'b1': [0, 1]} + >>> cfg.b.b1 + [0, 1] + >>> cfg = Config.fromfile('tests/data/config/a.py') + >>> cfg.filename + "/home/kchen/projects/mmcv/tests/data/config/a.py" + >>> cfg.item4 + 'test' + >>> cfg + "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: " + "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}" + + """ + + @staticmethod + def fromfile(filename): + filename = osp.abspath(osp.expanduser(filename)) + check_file_exist(filename) + if filename.endswith('.py'): + module_name = osp.basename(filename)[:-3] + if '.' in module_name: + raise ValueError('Dots are not allowed in config file path.') + config_dir = osp.dirname(filename) + sys.path.insert(0, config_dir) + mod = import_module(module_name) + sys.path.pop(0) + cfg_dict = { + name: value + for name, value in mod.__dict__.items() + if not name.startswith('__') + } + elif filename.endswith(('.yml', '.yaml', '.json')): + import mmcv + cfg_dict = mmcv.load(filename) + else: + raise IOError('Only py/yml/yaml/json type are supported now!') + return Config(cfg_dict, filename=filename) + + @staticmethod + def auto_argparser(description=None): + """Generate argparser from config file automatically (experimental) + """ + partial_parser = ArgumentParser(description=description) + partial_parser.add_argument('config', help='config file path') + cfg_file = partial_parser.parse_known_args()[0].config + cfg = Config.fromfile(cfg_file) + parser = ArgumentParser(description=description) + parser.add_argument('config', help='config file path') + add_args(parser, cfg) + return parser, cfg + + def __init__(self, cfg_dict=None, filename=None): + if cfg_dict is None: + cfg_dict = dict() + elif not isinstance(cfg_dict, dict): + raise TypeError('cfg_dict must be a dict, but got {}'.format( + type(cfg_dict))) + + super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) + super(Config, self).__setattr__('_filename', filename) + if filename: + with open(filename, 'r') as f: + super(Config, self).__setattr__('_text', f.read()) + else: + super(Config, self).__setattr__('_text', '') + + @property + def filename(self): + return self._filename + + @property + def text(self): + return self._text + + def __repr__(self): + return 'Config (path: {}): {}'.format(self.filename, + self._cfg_dict.__repr__()) + + def __len__(self): + return len(self._cfg_dict) + + def __getattr__(self, name): + return getattr(self._cfg_dict, name) + + def __getitem__(self, name): + return self._cfg_dict.__getitem__(name) + + def __setattr__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setattr__(name, value) + + def __setitem__(self, name, value): + if isinstance(value, dict): + value = ConfigDict(value) + self._cfg_dict.__setitem__(name, value) + + def __iter__(self): + return iter(self._cfg_dict) diff --git a/CDARTS_detection/mmcv/utils/misc.py b/CDARTS_detection/mmcv/utils/misc.py new file mode 100644 index 0000000..e83e6d9 --- /dev/null +++ b/CDARTS_detection/mmcv/utils/misc.py @@ -0,0 +1,218 @@ +import collections +import functools +import itertools +import subprocess +from importlib import import_module + +import six + +# ABCs from collections will be deprecated in python 3.8+, +# while collections.abc is not available in python 2.7 +try: + import collections.abc as collections_abc +except ImportError: + import collections as collections_abc + + +def is_str(x): + """Whether the input is an string instance.""" + return isinstance(x, six.string_types) + + +def iter_cast(inputs, dst_type, return_type=None): + """Cast elements of an iterable object into some type. + + Args: + inputs (Iterable): The input object. + dst_type (type): Destination type. + return_type (type, optional): If specified, the output object will be + converted to this type, otherwise an iterator. + + Returns: + iterator or specified type: The converted object. + """ + if not isinstance(inputs, collections_abc.Iterable): + raise TypeError('inputs must be an iterable object') + if not isinstance(dst_type, type): + raise TypeError('"dst_type" must be a valid type') + + out_iterable = six.moves.map(dst_type, inputs) + + if return_type is None: + return out_iterable + else: + return return_type(out_iterable) + + +def list_cast(inputs, dst_type): + """Cast elements of an iterable object into a list of some type. + + A partial method of :func:`iter_cast`. + """ + return iter_cast(inputs, dst_type, return_type=list) + + +def tuple_cast(inputs, dst_type): + """Cast elements of an iterable object into a tuple of some type. + + A partial method of :func:`iter_cast`. + """ + return iter_cast(inputs, dst_type, return_type=tuple) + + +def is_seq_of(seq, expected_type, seq_type=None): + """Check whether it is a sequence of some type. + + Args: + seq (Sequence): The sequence to be checked. + expected_type (type): Expected type of sequence items. + seq_type (type, optional): Expected sequence type. + + Returns: + bool: Whether the sequence is valid. + """ + if seq_type is None: + exp_seq_type = collections_abc.Sequence + else: + assert isinstance(seq_type, type) + exp_seq_type = seq_type + if not isinstance(seq, exp_seq_type): + return False + for item in seq: + if not isinstance(item, expected_type): + return False + return True + + +def is_list_of(seq, expected_type): + """Check whether it is a list of some type. + + A partial method of :func:`is_seq_of`. + """ + return is_seq_of(seq, expected_type, seq_type=list) + + +def is_tuple_of(seq, expected_type): + """Check whether it is a tuple of some type. + + A partial method of :func:`is_seq_of`. + """ + return is_seq_of(seq, expected_type, seq_type=tuple) + + +def slice_list(in_list, lens): + """Slice a list into several sub lists by a list of given length. + + Args: + in_list (list): The list to be sliced. + lens(int or list): The expected length of each out list. + + Returns: + list: A list of sliced list. + """ + if not isinstance(lens, list): + raise TypeError('"indices" must be a list of integers') + elif sum(lens) != len(in_list): + raise ValueError( + 'sum of lens and list length does not match: {} != {}'.format( + sum(lens), len(in_list))) + out_list = [] + idx = 0 + for i in range(len(lens)): + out_list.append(in_list[idx:idx + lens[i]]) + idx += lens[i] + return out_list + + +def concat_list(in_list): + """Concatenate a list of list into a single list. + + Args: + in_list (list): The list of list to be merged. + + Returns: + list: The concatenated flat list. + """ + return list(itertools.chain(*in_list)) + + +def check_prerequisites( + prerequisites, + checker, + msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' + 'found, please install them first.'): + """A decorator factory to check if prerequisites are satisfied. + + Args: + prerequisites (str of list[str]): Prerequisites to be checked. + checker (callable): The checker method that returns True if a + prerequisite is meet, False otherwise. + msg_tmpl (str): The message template with two variables. + + Returns: + decorator: A specific decorator. + """ + + def wrap(func): + + @functools.wraps(func) + def wrapped_func(*args, **kwargs): + requirements = [prerequisites] if isinstance( + prerequisites, str) else prerequisites + missing = [] + for item in requirements: + if not checker(item): + missing.append(item) + if missing: + print(msg_tmpl.format(', '.join(missing), func.__name__)) + raise RuntimeError('Prerequisites not meet.') + else: + return func(*args, **kwargs) + + return wrapped_func + + return wrap + + +def _check_py_package(package): + try: + import_module(package) + except ImportError: + return False + else: + return True + + +def _check_executable(cmd): + if subprocess.call('which {}'.format(cmd), shell=True) != 0: + return False + else: + return True + + +def requires_package(prerequisites): + """A decorator to check if some python packages are installed. + + Example: + >>> @requires_package('numpy') + >>> func(arg1, args): + >>> return numpy.zeros(1) + array([0.]) + >>> @requires_package(['numpy', 'non_package']) + >>> func(arg1, args): + >>> return numpy.zeros(1) + ImportError + """ + return check_prerequisites(prerequisites, checker=_check_py_package) + + +def requires_executable(prerequisites): + """A decorator to check if some executable files are installed. + + Example: + >>> @requires_executable('ffmpeg') + >>> func(arg1, args): + >>> print(1) + 1 + """ + return check_prerequisites(prerequisites, checker=_check_executable) diff --git a/CDARTS_detection/mmcv/utils/path.py b/CDARTS_detection/mmcv/utils/path.py new file mode 100644 index 0000000..d390da2 --- /dev/null +++ b/CDARTS_detection/mmcv/utils/path.py @@ -0,0 +1,79 @@ +import os +import os.path as osp +import sys +from pathlib import Path + +import six + +from .misc import is_str + +if sys.version_info <= (3, 3): + FileNotFoundError = IOError +else: + FileNotFoundError = FileNotFoundError + + +def is_filepath(x): + if is_str(x) or isinstance(x, Path): + return True + else: + return False + + +def fopen(filepath, *args, **kwargs): + if is_str(filepath): + return open(filepath, *args, **kwargs) + elif isinstance(filepath, Path): + return filepath.open(*args, **kwargs) + + +def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): + if not osp.isfile(filename): + raise FileNotFoundError(msg_tmpl.format(filename)) + + +def mkdir_or_exist(dir_name, mode=0o777): + if dir_name == '': + return + dir_name = osp.expanduser(dir_name) + if six.PY3: + os.makedirs(dir_name, mode=mode, exist_ok=True) + else: + if not osp.isdir(dir_name): + os.makedirs(dir_name, mode=mode) + + +def symlink(src, dst, overwrite=True, **kwargs): + if os.path.lexists(dst) and overwrite: + os.remove(dst) + os.symlink(src, dst, **kwargs) + + +def _scandir_py35(dir_path, suffix=None): + for entry in os.scandir(dir_path): + if not entry.is_file(): + continue + filename = entry.name + if suffix is None: + yield filename + elif filename.endswith(suffix): + yield filename + + +def _scandir_py(dir_path, suffix=None): + for filename in os.listdir(dir_path): + if not osp.isfile(osp.join(dir_path, filename)): + continue + if suffix is None: + yield filename + elif filename.endswith(suffix): + yield filename + + +def scandir(dir_path, suffix=None): + if suffix is not None and not isinstance(suffix, (str, tuple)): + raise TypeError('"suffix" must be a string or tuple of strings') + if sys.version_info >= (3, 5): + return _scandir_py35(dir_path, suffix) + else: + return _scandir_py(dir_path, suffix) diff --git a/CDARTS_detection/mmcv/utils/progressbar.py b/CDARTS_detection/mmcv/utils/progressbar.py new file mode 100644 index 0000000..0687a3d --- /dev/null +++ b/CDARTS_detection/mmcv/utils/progressbar.py @@ -0,0 +1,174 @@ +import sys +from multiprocessing import Pool + +from .misc import collections_abc +from .timer import Timer + + +class ProgressBar(object): + """A progress bar which can print the progress""" + + def __init__(self, task_num=0, bar_width=50, start=True): + self.task_num = task_num + max_bar_width = self._get_max_bar_width() + self.bar_width = ( + bar_width if bar_width <= max_bar_width else max_bar_width) + self.completed = 0 + if start: + self.start() + + def _get_max_bar_width(self): + if sys.version_info > (3, 3): + from shutil import get_terminal_size + else: + from backports.shutil_get_terminal_size import get_terminal_size + terminal_width, _ = get_terminal_size() + max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50) + if max_bar_width < 10: + print('terminal width is too small ({}), please consider ' + 'widen the terminal for better progressbar ' + 'visualization'.format(terminal_width)) + max_bar_width = 10 + return max_bar_width + + def start(self): + if self.task_num > 0: + sys.stdout.write('[{}] 0/{}, elapsed: 0s, ETA:'.format( + ' ' * self.bar_width, self.task_num)) + else: + sys.stdout.write('completed: 0, elapsed: 0s') + sys.stdout.flush() + self.timer = Timer() + + def update(self): + self.completed += 1 + elapsed = self.timer.since_start() + fps = self.completed / elapsed + if self.task_num > 0: + percentage = self.completed / float(self.task_num) + eta = int(elapsed * (1 - percentage) / percentage + 0.5) + mark_width = int(self.bar_width * percentage) + bar_chars = '>' * mark_width + ' ' * (self.bar_width - mark_width) + sys.stdout.write( + '\r[{}] {}/{}, {:.1f} task/s, elapsed: {}s, ETA: {:5}s'.format( + bar_chars, self.completed, self.task_num, fps, + int(elapsed + 0.5), eta)) + else: + sys.stdout.write( + 'completed: {}, elapsed: {}s, {:.1f} tasks/s'.format( + self.completed, int(elapsed + 0.5), fps)) + sys.stdout.flush() + + +def track_progress(func, tasks, bar_width=50, **kwargs): + """Track the progress of tasks execution with a progress bar. + + Tasks are done with a simple for-loop. + + Args: + func (callable): The function to be applied to each task. + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + bar_width (int): Width of progress bar. + + Returns: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], collections_abc.Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, collections_abc.Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + prog_bar = ProgressBar(task_num, bar_width) + results = [] + for task in tasks: + results.append(func(task, **kwargs)) + prog_bar.update() + sys.stdout.write('\n') + return results + + +def init_pool(process_num, initializer=None, initargs=None): + if initializer is None: + return Pool(process_num) + elif initargs is None: + return Pool(process_num, initializer) + else: + if not isinstance(initargs, tuple): + raise TypeError('"initargs" must be a tuple') + return Pool(process_num, initializer, initargs) + + +def track_parallel_progress(func, + tasks, + nproc, + initializer=None, + initargs=None, + bar_width=50, + chunksize=1, + skip_first=False, + keep_order=True): + """Track the progress of parallel task execution with a progress bar. + + The built-in :mod:`multiprocessing` module is used for process pools and + tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`. + + Args: + func (callable): The function to be applied to each task. + tasks (list or tuple[Iterable, int]): A list of tasks or + (tasks, total num). + nproc (int): Process (worker) number. + initializer (None or callable): Refer to :class:`multiprocessing.Pool` + for details. + initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for + details. + chunksize (int): Refer to :class:`multiprocessing.Pool` for details. + bar_width (int): Width of progress bar. + skip_first (bool): Whether to skip the first sample for each worker + when estimating fps, since the initialization step may takes + longer. + keep_order (bool): If True, :func:`Pool.imap` is used, otherwise + :func:`Pool.imap_unordered` is used. + + Returns: + list: The task results. + """ + if isinstance(tasks, tuple): + assert len(tasks) == 2 + assert isinstance(tasks[0], collections_abc.Iterable) + assert isinstance(tasks[1], int) + task_num = tasks[1] + tasks = tasks[0] + elif isinstance(tasks, collections_abc.Iterable): + task_num = len(tasks) + else: + raise TypeError( + '"tasks" must be an iterable object or a (iterator, int) tuple') + pool = init_pool(nproc, initializer, initargs) + start = not skip_first + task_num -= nproc * chunksize * int(skip_first) + prog_bar = ProgressBar(task_num, bar_width, start) + results = [] + if keep_order: + gen = pool.imap(func, tasks, chunksize) + else: + gen = pool.imap_unordered(func, tasks, chunksize) + for result in gen: + results.append(result) + if skip_first: + if len(results) < nproc * chunksize: + continue + elif len(results) == nproc * chunksize: + prog_bar.start() + continue + prog_bar.update() + sys.stdout.write('\n') + pool.close() + pool.join() + return results diff --git a/CDARTS_detection/mmcv/utils/timer.py b/CDARTS_detection/mmcv/utils/timer.py new file mode 100644 index 0000000..3214eca --- /dev/null +++ b/CDARTS_detection/mmcv/utils/timer.py @@ -0,0 +1,117 @@ +from time import time + + +class TimerError(Exception): + + def __init__(self, message): + self.message = message + super(TimerError, self).__init__(message) + + +class Timer(object): + """A flexible Timer class. + + :Example: + + >>> import time + >>> import mmcv + >>> with mmcv.Timer(): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + 1.000 + >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'): + >>> # simulate a code block that will run for 1s + >>> time.sleep(1) + it takes 1.0 seconds + >>> timer = mmcv.Timer() + >>> time.sleep(0.5) + >>> print(timer.since_start()) + 0.500 + >>> time.sleep(0.5) + >>> print(timer.since_last_check()) + 0.500 + >>> print(timer.since_start()) + 1.000 + """ + + def __init__(self, start=True, print_tmpl=None): + self._is_running = False + self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}' + if start: + self.start() + + @property + def is_running(self): + """bool: indicate whether the timer is running""" + return self._is_running + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + print(self.print_tmpl.format(self.since_last_check())) + self._is_running = False + + def start(self): + """Start the timer.""" + if not self._is_running: + self._t_start = time() + self._is_running = True + self._t_last = time() + + def since_start(self): + """Total time since the timer is started. + + Returns (float): Time in seconds. + """ + if not self._is_running: + raise TimerError('timer is not running') + self._t_last = time() + return self._t_last - self._t_start + + def since_last_check(self): + """Time since the last checking. + + Either :func:`since_start` or :func:`since_last_check` is a checking + operation. + + Returns (float): Time in seconds. + """ + if not self._is_running: + raise TimerError('timer is not running') + dur = time() - self._t_last + self._t_last = time() + return dur + + +_g_timers = {} # global timers + + +def check_time(timer_id): + """Add check points in a single line. + + This method is suitable for running a task on a list of items. A timer will + be registered when the method is called for the first time. + + :Example: + + >>> import time + >>> import mmcv + >>> for i in range(1, 6): + >>> # simulate a code block + >>> time.sleep(i) + >>> mmcv.check_time('task1') + 2.000 + 3.000 + 4.000 + 5.000 + + Args: + timer_id (str): Timer identifier. + """ + if timer_id not in _g_timers: + _g_timers[timer_id] = Timer() + return 0 + else: + return _g_timers[timer_id].since_last_check() diff --git a/CDARTS_detection/mmcv/version.py b/CDARTS_detection/mmcv/version.py new file mode 100644 index 0000000..51e2f03 --- /dev/null +++ b/CDARTS_detection/mmcv/version.py @@ -0,0 +1 @@ +__version__ = '0.2.12' diff --git a/CDARTS_detection/mmcv/video/__init__.py b/CDARTS_detection/mmcv/video/__init__.py new file mode 100644 index 0000000..ed05cc1 --- /dev/null +++ b/CDARTS_detection/mmcv/video/__init__.py @@ -0,0 +1,10 @@ +from .io import Cache, VideoReader, frames2video +from .processing import convert_video, resize_video, cut_video, concat_video +from .optflow import (flowread, flowwrite, quantize_flow, dequantize_flow, + flow_warp) + +__all__ = [ + 'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video', + 'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow', + 'dequantize_flow', 'flow_warp' +] diff --git a/CDARTS_detection/mmcv/video/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/video/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27da1747b6f6b55259f936307f72a0a3d3896318 GIT binary patch literal 568 zcmZ{hy>8nu5XVJ*Sl_k_bm-6*&{6@(+BN~YcSwK^TnIwP*aCE-O3F6eH_A)oLAYk> zD|jkT2!gg~3Gj!zKaa<|<8@sZH=R!@fesbJw~{EYWEt| zmFKuS`}W%I4AQoC(zZneHLGs!_2}*H*e*kCh3LnA7UIKncG*YNa2V8Q`V&jtyk+m$ J`?z2Q`vno-lP3TG literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/video/__pycache__/io.cpython-36.pyc b/CDARTS_detection/mmcv/video/__pycache__/io.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c573d51a3c1ccd84eb9eb7ffcfbba613025a2ec2 GIT binary patch literal 10948 zcmcIq&2Jn>cJJ=_9+E?f`nF`dSL?MowoJ+Lha5tym6nohEn-R$X=yXNMyom9Bzrht zuI`~I?l2peTG#;+1Ia1J9CAp2`~d-S3vvvQTY&U6Cm}%)C_sv&TM10 zGuN2w9BUlw%s1vc#~a5@qq~My#rBEL$;OOODIXutF^DEwI@1%F?nb~;NGgpoJ zndca@u~IkVse8)vmG9jOnh_sAUA^txyMKFS`Qep%KAvlC`OR%-BWU}MzZ-;6TMC8{4lJS;uFg^?>qM&+`I4Gd2sWS+s>oAx7I$6PaZ1$ z`1ak8K3T@$9(Gy-y!3FUQBzg8TPYbuX*&{KLTWdyiBQ`O4Yo z4OFvkLQutxI&SxHKIQn$^`vX=vBNBu?L7-_hH4kw%=8! zQSt2^K}Nxz|5~P}svZf8o75Nnc;_XfSVCrq>1-L>MOEVzM)JfdZduy}b&++g9QEo? zOw$-zd>)$DVD)6W??6HGZ8wbSdErdipeQ4bOZ{FS%d1n*iB#g;w*NHF?YQkj@`-c( zL9~F^LXO$M9OTV8vu5s9H20`oNx9FXMJgi-Hs$_==IpG|wT9MyZkQX*j0h~RXNJOw zOP;^s4%(6W5uRRPX48)@p*&|a|C!Thk&j)T> z*Qi(VZd`_$>-#EtD)LZ2K$+rNQXFiiUn;T^O*w_!8C)WXvQ;il=cn`Kl45xiSNba; zcLP`W??}drzmfCam4$CM@_u2w?2(s+4|7mLzT#EI9F&ot@}?zUL0+5#+EkGj$AJ74 z^2fY+X)}%daqooWXOO3}poyu$ah!o!nf1dj)snEIq5BUb=*&RZ+dUxe%ia}y^Z@B|4ieox9D9${w43S_YUsoyepW} z_v0xs-4A>hF3J5DD1$uPO=w|gKMA5OI}AEO+g0`;bT@rF>e;T{hFh|G>n)gr#cI{Y zZ*40GZLtXU6Ib;yQ3(mm8jC}_+3W83-N5fgw%bQz5^6T#w(JcB1qc@tI`E9P_`T3} zyLJ$U1K;j#*!TK=clqHPp)LF^yZvZ@hMLW2%Z=>Tpwkb!n?hGeHK8nS(ADJaj{B5` zGw7nzfpQ~%(Y_mjyuA_Z`X0*dh(pi>*Pq(rg+Wh9MO%JiaC=?b-GG-%yYpdf?I`H@ zntxejBs&N}^~sjswLQPt^T?TqCtS2wdv>sa?tDAMz!U!P%1!{2j<+^?D(yAI%Uw@8 zxX4#9X0+_~?5>ZNkfBhAL)hd{K{k*TuG{Ii{Utqj*RNl< zgHFGvBAbR?nzL-j{;?D+ipaH!EW81p@#9P9p&bsCfWlpG>miUQ{>bEF&;C#}iku zcS@uV^=$i??+98k=z2@GAcFb{Mc(6FbLUDtom~-e!P$W;jB7W;5c&^iEiPYdp12fX z0u3E{T}9)l%_luJpu>~Ei?&pS4<%>I4>q?VMXAMkXQLmgDQ2sVh}3CzBQ>pCCWtTb zOl7i6%iX9osdOm@$rx#|&ot{l!xf%DV!%gD<6kw4*1S27r+IVlL`L(z2ZGppe+kBE z=`p0EbvH2=kC$Mld|SCsCap+f$$$gi0V7Aj0m}6e*`>!ca(i=G=2$raJyK{x`O%pI zdI-GPduzSdYcJV%0pXjlLo^ax*s^MNh^eUfaCKeZTdZ5cQd|x~8bIGWg1Mq@|H}+} z8k6f)yQr(boxv>j+B*l>xMV+~9?nHv_t&fY>appDe3Vq=5VYY(`f>;)Oi zMO)LWbCM_*^}n!I9lySvVNb8|WRAv7CRj`rX=0wvY_xkWX6nxURZ5A@MCNJ{v-1IZ zIAW4=SjPPf1GCvFLfv=h`Bj;qwXu1JvL0gK>d_2TQsXhmuyJyN4Z;DMm+!#LLKvwf z`@$V;IpMKgY=`Na;cxv5FL3qY(Oea^1>4K8G@&`V`aAG*RfMxVP>MJQKqG`(8`;~7 zaBi~Z2?j(CiTB1jH`TjGvsWHb_;rTC3EMboFu`K#-e_MVy%(`sxhPM^Guj6g#0$g) zMfW1cMtg*rquH5v;N%_UyY1AKJWClf_g=|P=8H|!T`}3C7ZwkE0~u{4WlszbjHldz zv2X3Z66LWivCaS+-!eX{VKYK&S~$S&@$=Un$(9A)SZwP`y`=4BF7zYCA*kC(Fgmxp z>Bo8S>}f&k9ZX!->KJ&~Hub{TbPoBbc-{e}$+3P3ipl1xh7B%V)uA!J@Jc!@9B(>i z3XNk?kM+or0$c0@gCG`AQ5QUG-}=1EQe2S5c1~SGF0b2(gbJYU!&{bOTccHC zJ>m+GVYi0-WSc)60`{~lFLxC?KL_n#>+)_DR)%Is3`y7D1>E2&Oz*Im8F$949W zi|E6;KSH>Go9jMM9Iz+=tKezia{>SXu3f=kq@;X6cVN}XMhpu(i1yQqc8?zLNf7$U zinyrX6jEvX3^xyiM!1v`CE>2;Dg~I2CAZS+!gA|YGA6y;fRDq0Y&tdu?QE}XuK_#> zt*kw#)vcHHCR{d{T%Mv#>UWtVP*5K;;T|;Ilmm{&IWT-kjilw*Xc#g+VpNOJnKFP5 zAx{k(^m*j+)T0SZL2aE_E$M>(OArw)F~a#|%U)2gM+hs3t{`;E)xtQE)aJG=^&vDN zPXt1Z5RHRgF#5EQ4ZfF$zC8xT+-cM!8U+uCb|uz}o|XA7^el(gIrSGpk!2tTZnR&daHV;kWXhGTwFr{^YN&fE0mixPXC=9nq z)Xt0c<+s;e?6Wci|9=%D3CCR+F?k^k$gTU)6E20zqO!l*SD#>tR_awnUl>;b_WWHz z;xT-U>bQ}ZA+13Ssc54+RxQ!0B+PP`^(##7F_8dA8s&&9G{(ZvU!gX94T({iH!X?3 zF#=bFk}(#S$3-UhwE&rNBhmWm4?wNy49FwY3sLz8Nr)(-BFSSGqgE3)@@oNGc6 zI6iTJ!j`?DZ?a1Vv~4klne27z-+Ss3J0xI-KzReOY3t(1sBj%;*7byW;L66*r0pS( z!qj-p%hayOv|%;NYES3o2rxC13{y-0A0VTU7|keA)_Z5OZg8TdboEX3S4)>a@b(v5 z{mr#ziSOY@7)lzHeE9cBBI7H)1`8tl^IuslbC^G4?3cQ8ku`c677nca(y$~k74Jm=L4TIGju`lj)v@um5twShH?aCUjWJS?hR>}bnT5ur$HSmNGS{KlQ#TPQ1u z$Ng}>0tzMg;_9&K6}HRj&+i*ixmEef{K{xmhn1m)_uy~#r=qFW^l)leC5_L&c!Z-F z=ta@}Pbkwe!#{{HxXGRZp$n}RK?h-_P)8y-MMs$gI18I_4>sZ6AVL^3aUVVPlyMM= zc-U^cJ)T^|6G(bI$0RT3Dq-Y;mBhl_Nw%vDcj0wix1o9+p%lX61RDWtj*eoyNKJy2 z5rQHk_C^G4glqe4+2S#dMx_(`XS;}gZRwZ>SsMM&w&`#6+K9wW_AFJJh4ZMj2+K2O!eUmX6 z(K#SM(Kjo~w-ArfSZ9O)i(*vcTrcd$`BpFJs^0?*#VB-K3OXFBU(W&g@qjJxcC}^w z9aN)qhNbC*#>s94s+T#6VC*8E5;}V2{~HSQ%7>jr)Z(p)mA?~R(Xw8=*6uakc6fa; z^X^K0Mlt-XZjx%Q19k}!G{~S;P#-Z7r12?p51FJRQjhRhpVr(pkZ)9FTq1V$`zVMD zl8;M?M#M9mwsc{KN6DWqZ>DjF3oTtmVTU<5;^7QQq#0K#$lSLSx4m4l&!0sY#z1;qn2 z0#MpE)X#=AfcdLX%~45I@X+oHmJ7H7+l7^MWTOs6vv~@!iLqfv!{#7*VE(D~d45%U zcntCZTHvhCTD+`WX0zvEoP=+x*BFRz_B#FRFt~cq#Y6h%a0kYNBgYvl;P^egOy{*- z>cObOG{|NNhsjmMqr1?aZqtVrwF3mysf<`f+{XH$OoqCTqJ&k^kGLSjwDKe@>m`$6 z<+pe#oI`>GDywRq$U$2~NA^xn&REKPDnzOV1u5!LLq-D=4OAg-OCOp-T0~P5LK}l7 zo>%g$+QC-@qr<7{i}6NjlJ?AA)1KwD4AVx#gxNO$pigi!6^#yWp{|xTMKh~WuYZ@3 zY=)0eD#w}v=ni2y9)@A_tBn45j9NDH*5Od7ZkDn}q3{x+P^X7_#Z1h?q0TCf zgCGTkaF)GsWQMsn0SnU>P159#_q3yt^cmXZw-I`gCLzm`^4mVpIgT|MQ1;Z%v|1Q7BM9hKo)ohb#OqB*UVh%SGBDVn=W4tg&Cl zei9!$JTrhZ&0+b#qVdCiu^8oAdGW&q;7;Pc3b10u!D1X z-Q(2$^bq)P!`h#TN^>&6J)xy z`{C#r(V#c?XEWT)3O940T^{0eWxopKPy-q@fkyxK5pl$h$us*&E{xhoqdDh8f>fp69#R@ zbii__57RX|h8w|5InIMX?Br$gXLEsPy3C@fwCAMo|A5E#j;jKm{f)_KFqy#iRbu!f zF|dyrOB&vWw@L%9hoJHV7W!ToS1e3yF3OHEwZk-`Tvap&;@`x>m+2xhzFk{oOL=Go z+kj9;#v+I@@KJMhWcZ54UZN#e^pvJRa_qxbmEQrA(4R+Hrv)G>Z`q zJ!C;+LoZ9n@*LauqWTsf8W)2O<5h7X0ody2Aglc<57Hmc)wshz@4*+QW4XUN4lXq<9%ZZ{$fS0Umu&~*){ zue14&nTTKEcm>sI5z!4~MWp_K^^{CJot1)35xs_D<0No3zNcaIxr&reMZw$iz~2;K z%~ZcZyLwUW;HI81*<}I}I$5jRaj6$BVsUT9d8Q(_c&gcJxAm7^p?p*(qQgg(Ja^|w zVsi&ydm)TPMZ))(EXK)LJCs9dy`7O+5OLD!$dR&~B+D7HoEz)#JjLE9@}Rc4qh{HZ zs8G>Esd*+;QpE!dbrMNj)MvH&Jew|42ANqtl%y71`I`Qm=sE`+(#=<~IVoGy=9ygH Wd_B+8=JcPNJ3sepqc`%ZhWS5HR7k1- literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/video/__pycache__/optflow.cpython-36.pyc b/CDARTS_detection/mmcv/video/__pycache__/optflow.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..577f50c69ebf8acce6b44b6d7d20353e49a653cb GIT binary patch literal 5814 zcmd5=%WoUU8Q&L|Lk*ea4^JAtdXPVB}`1+;eKICV>XA(!;`&2ss0Y^3O^BzAUop5J`) zeZTKFbAEo_dF_j~8?9*CzqF~JiTvw$k^>}M<9eVqbi9qg2+fA6Q<)i9VX09H?S`$s zt)LuM8kMlxsH*Q$;DmFHIpmjlnO8p28#R8KSGj}SJU_$d`2una{3*W3mylcJEBqus zh1?R?-`47@4>7lPqpoLjtsp+!IdnyLr&%{LhkGCN+(`PzUREh`D7XDkcrMS%et0N+ z>7l3XCp(E0*PFB;`px56#&Z)-@@rkwq?T$@?-)ZvTuil&$@P6R`6hA}Ke>uU?wjI* zEOD!2Q`t}->L2Q<_O>R=sW#M;Te8AS9a~oEYgDU$rYDYcQjOa~w33#&*)kzV^^V4CZ)vx*ceUssHTbE08?vv+ zP1~vIqI+xa3Mr!`vNUcvGL0avsK+=;#+VW-hAJ~#x*hJ<=EbdCb=?08; zpM`FB!*LjXSHxbjrE>J8Od}RMU01f*TEtx;+&+s1!?xGA*p+c_Dr@-u7y`7M@slB#cup0_?8h-497==zZ#?J*ZJvO_-z z*sh0;N#b$pveoKE&8_z*_PaykzdsSD8AnZ5?zl&O!q)sqLZI7ikEw6jb|vFS&jEl>7DG^OB? zO1Id}>Fp3+S9pmRVPfVc%w=osb#}PU&ed7e3%ev9_CaaqA^F}z?5p#xw^vn#g`_Q7{OFh|o z@#696nOSJ#x2T!*)9q_NIzIo{%5~Ppj$`k6>?LYk=vQJ#8FjiLXbD*rI|#ts^Elwe zY6b4z@vHX-q(u_MW%<$y?T!{n)5COR!nhxLQR2rD^|(LSph0MNAAX5n-B5FgDkiG0 zW{#iuQ6k-_>19UL&B~*#$gBv%WF@7w%=wuc^t@|A#3Hj>u?Sr$%1YAP{qCsTlE`LO z-1VZ&QW$A-uNiYMt6n>5df3=F%F5d}^HC}-5r-slR)PthKbMu04|<*lv@K(ITvdY2 zr#;ISuE4eh^;dmGiDoV(-N2XG;v;sR*=|CD)aM(u>1`IYZ`s_G?L#MUpv>L}=Hsom zGCSXzOh3%@Ht9V+Y*ffqD3=-h2v7e{nmCW4zPPm6jzjMp(rB-bNU_=Mb2r&cx^c1@ zhTM~0lO#sId9Zolb1&Wm*3d{B-F{Y~3@M4WA$c8%Heb_MbX#|HORwk#(wbrGHRP7{ z1%27b%PPh)zRwy9sG&NvLa$~0q#<6$7)@Fw{Tw`VcvJ}b3o@yZbPNo z$u8DJ#a|P-?o1+Aq2`JHDQJ+IN$ptHIiAMw7zzC5{r!mLU-rUd>0J`)m9TOv?e(!Y(=0dUm_o^~#b0?udEZ+hWCbfrA!QG#6rH!d;rgl;}r%uY!jh zC1FVRU_lhBS3py5?$chd6)_0~{&>>UYApsI47Epvb335e24tN(&LcpRe*1#evic3_? zGlr@ivjZYfW2gHNALMkY-qH4rWJ#*SCzyebuUAlGaAV&P=co<2Ar5IMr&d$0hu*pC zA-Zb=JvBua$2aO>u4>=cz!q?z#TVQeUwY0qQoV)4-M~s|Q!4H{cJvk);{jZe*x~s2 zkQ3a;`Vs7-IFG>>rtM~8wK&H-zDxjPD`}RgePTQyBgWk8tnDnSSoDCuqD<9=|9;iPMxQ&$BsB zAI*K#jzf(y>IO~^$`>e7ebw5(gVIAhxwY32Zj7f53rv~*f@*P;@o;{H!xp5Nlw#xg zA@%zl2?qt{=I6%1q8pkHh%mQc+|Nn_q?H_ERDxR@*mxWd3hTT9|L2Zmq5{Z0wvjJ{K%!sQ1Rk#pq`e}qH411_qJ z2SJN2r+}(^kK=+U9$Wc3^}snEr`R0Uw%E49VvK=vE)6q{>T3AHLq>h!OvO?BIxuHq z>tGB4jGtl<^+_P8-lRYk?hojgkCwg4E13yqKJk;x+V}cN&hnJE zSuHhCF%@cM^2mYn*;`*d6XSWuuCw#Fbr{nQ>3%ac%6}Ig?ZOpPyoo zn)@YR7MJ1;w`kBE?%NhV)QBk(JbqigNl! zl}^PQJKm903u!Ez?-9TTXqCBgqOjNEcY389{G`kB^V!c={XbsEG@hvXX^aS`R&su z?j)FmniQdSr>^{Q5z4Iak1CuTc||aV6%*&#g(BeYu`8bxA-{Yas86*2l31xaNQDb2 z9Jh+&fajJYA3OX4a4Lnv-#ZEle@Y56^(NKAm4DG;QAH_TJ*pK|evx1L!X*x0gPmSs zE!T1G=h=ynnlgHlaFNr~ME7aA$*m}&@I2}#iVkIt{R-rIA`_COlnoQA^O@`>)qO!r zLkUjfd@x`cr-|CzE484er=?!ndQSI;Qgrg6PV!9MrLBbSCt0VPq~b2!U$FP{kuI4} zOC_I{0~*JtO!ichGAZbQ4Tmt}VkPsrRk;;hRyR-6{4~jWRE|=iW-DsZWAXgX2}}3r zdJ8@&e6HIampzZ6u}1a~T_m zt%dM>cXR1aNBV8zOBP#?Ro;@9eB*Cs`^PS6VM7rk3WtPAX1BYJ5P5(8eFKEQyWZWtss~NSR9$ z!Xaf~bmJviH#Z@;*IwIc2*?KSqpt9MXX-+1s_r|_{?2P}?#VW^TU2yq>Z(6Nq^oq@NrgusTd`Uy z%UZNoI0Y$!Zl$RHi9oc&xW2*+ah%VDO2%Ry>}CP({|8|^c|(tesQ~ajI2*z#LhLHp zr<=6j+@zwnNgKCzcQ)zn?VTpg<*e2EVe=fxlELZIQWJ`F)12vLm5{b@qgD>|?QYVY ztsgb%K(I_L0uV;5#_0xh_BXKmT>r5U!w!R~zaA&B3#LqZHcBvsdno5QAHE0>^FZ~> zya)03pc?l== ztshyyLL9t-{O=&&!`&I*PSg%(Cux^2KR}`HA^AQMj5H;=316mOI!zfdc7dZ%)VX>Q z;>AUX4ONH`9=YLqPokUEjK zp(n6>?jZdsFw%8U{^}x(%6kFo~VCDq0FE<>;XF=@xk0lAnJUn&V5YE&v-3nYI+` z3c3TOaTO}#Ar3a-;8#y^!%HQUT%D!Ce(kR7Ta(|wzJjGHSHNHNGyM6=$G z&X3-vqot`|hnOLqc3lsK_JW5OBSX5zE*=|iPEh5VOAP)VHd2Kt5er-V z1%O5@XZGzazUuNW$U$wcws4Qevpd}fp1h9aCqUW} z*4_CE6*=X3T{a`*zBu^!gEY*c@BkALJY^p?9%vP&+`XuyhC@ZoLwadLyVf RP?O`Y{^9vIv_6=({tIYFz5f6J literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/video/io.py b/CDARTS_detection/mmcv/video/io.py new file mode 100644 index 0000000..76b2260 --- /dev/null +++ b/CDARTS_detection/mmcv/video/io.py @@ -0,0 +1,332 @@ +import os.path as osp +from collections import OrderedDict + +import cv2 + +from mmcv.opencv_info import USE_OPENCV2 +from mmcv.utils import (check_file_exist, mkdir_or_exist, scandir, + track_progress) + +if not USE_OPENCV2: + from cv2 import (CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS, + CAP_PROP_FRAME_COUNT, CAP_PROP_FOURCC, + CAP_PROP_POS_FRAMES, VideoWriter_fourcc) +else: + from cv2.cv import CV_CAP_PROP_FRAME_WIDTH as CAP_PROP_FRAME_WIDTH + from cv2.cv import CV_CAP_PROP_FRAME_HEIGHT as CAP_PROP_FRAME_HEIGHT + from cv2.cv import CV_CAP_PROP_FPS as CAP_PROP_FPS + from cv2.cv import CV_CAP_PROP_FRAME_COUNT as CAP_PROP_FRAME_COUNT + from cv2.cv import CV_CAP_PROP_FOURCC as CAP_PROP_FOURCC + from cv2.cv import CV_CAP_PROP_POS_FRAMES as CAP_PROP_POS_FRAMES + from cv2.cv import CV_FOURCC as VideoWriter_fourcc + + +class Cache(object): + + def __init__(self, capacity): + self._cache = OrderedDict() + self._capacity = int(capacity) + if capacity <= 0: + raise ValueError('capacity must be a positive integer') + + @property + def capacity(self): + return self._capacity + + @property + def size(self): + return len(self._cache) + + def put(self, key, val): + if key in self._cache: + return + if len(self._cache) >= self.capacity: + self._cache.popitem(last=False) + self._cache[key] = val + + def get(self, key, default=None): + val = self._cache[key] if key in self._cache else default + return val + + +class VideoReader(object): + """Video class with similar usage to a list object. + + This video warpper class provides convenient apis to access frames. + There exists an issue of OpenCV's VideoCapture class that jumping to a + certain frame may be inaccurate. It is fixed in this class by checking + the position after jumping each time. + Cache is used when decoding videos. So if the same frame is visited for + the second time, there is no need to decode again if it is stored in the + cache. + + :Example: + + >>> import mmcv + >>> v = mmcv.VideoReader('sample.mp4') + >>> len(v) # get the total frame number with `len()` + 120 + >>> for img in v: # v is iterable + >>> mmcv.imshow(img) + >>> v[5] # get the 6th frame + """ + + def __init__(self, filename, cache_capacity=10): + check_file_exist(filename, 'Video file not found: ' + filename) + self._vcap = cv2.VideoCapture(filename) + assert cache_capacity > 0 + self._cache = Cache(cache_capacity) + self._position = 0 + # get basic info + self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH)) + self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT)) + self._fps = self._vcap.get(CAP_PROP_FPS) + self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT)) + self._fourcc = self._vcap.get(CAP_PROP_FOURCC) + + @property + def vcap(self): + """:obj:`cv2.VideoCapture`: The raw VideoCapture object.""" + return self._vcap + + @property + def opened(self): + """bool: Indicate whether the video is opened.""" + return self._vcap.isOpened() + + @property + def width(self): + """int: Width of video frames.""" + return self._width + + @property + def height(self): + """int: Height of video frames.""" + return self._height + + @property + def resolution(self): + """tuple: Video resolution (width, height).""" + return (self._width, self._height) + + @property + def fps(self): + """float: FPS of the video.""" + return self._fps + + @property + def frame_cnt(self): + """int: Total frames of the video.""" + return self._frame_cnt + + @property + def fourcc(self): + """str: "Four character code" of the video.""" + return self._fourcc + + @property + def position(self): + """int: Current cursor position, indicating frame decoded.""" + return self._position + + def _get_real_position(self): + return int(round(self._vcap.get(CAP_PROP_POS_FRAMES))) + + def _set_real_position(self, frame_id): + self._vcap.set(CAP_PROP_POS_FRAMES, frame_id) + pos = self._get_real_position() + for _ in range(frame_id - pos): + self._vcap.read() + self._position = frame_id + + def read(self): + """Read the next frame. + + If the next frame have been decoded before and in the cache, then + return it directly, otherwise decode, cache and return it. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + # pos = self._position + if self._cache: + img = self._cache.get(self._position) + if img is not None: + ret = True + else: + if self._position != self._get_real_position(): + self._set_real_position(self._position) + ret, img = self._vcap.read() + if ret: + self._cache.put(self._position, img) + else: + ret, img = self._vcap.read() + if ret: + self._position += 1 + return img + + def get_frame(self, frame_id): + """Get frame by index. + + Args: + frame_id (int): Index of the expected frame, 0-based. + + Returns: + ndarray or None: Return the frame if successful, otherwise None. + """ + if frame_id < 0 or frame_id >= self._frame_cnt: + raise IndexError( + '"frame_id" must be between 0 and {}'.format(self._frame_cnt - + 1)) + if frame_id == self._position: + return self.read() + if self._cache: + img = self._cache.get(frame_id) + if img is not None: + self._position = frame_id + 1 + return img + self._set_real_position(frame_id) + ret, img = self._vcap.read() + if ret: + if self._cache: + self._cache.put(self._position, img) + self._position += 1 + return img + + def current_frame(self): + """Get the current frame (frame that is just visited). + + Returns: + ndarray or None: If the video is fresh, return None, otherwise + return the frame. + """ + if self._position == 0: + return None + return self._cache.get(self._position - 1) + + def cvt2frames(self, + frame_dir, + file_start=0, + filename_tmpl='{:06d}.jpg', + start=0, + max_num=0, + show_progress=True): + """Convert a video to frame images + + Args: + frame_dir (str): Output directory to store all the frame images. + file_start (int): Filenames will start from the specified number. + filename_tmpl (str): Filename template with the index as the + placeholder. + start (int): The starting frame index. + max_num (int): Maximum number of frames to be written. + show_progress (bool): Whether to show a progress bar. + """ + mkdir_or_exist(frame_dir) + if max_num == 0: + task_num = self.frame_cnt - start + else: + task_num = min(self.frame_cnt - start, max_num) + if task_num <= 0: + raise ValueError('start must be less than total frame number') + if start > 0: + self._set_real_position(start) + + def write_frame(file_idx): + img = self.read() + filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) + cv2.imwrite(filename, img) + + if show_progress: + track_progress(write_frame, range(file_start, + file_start + task_num)) + else: + for i in range(task_num): + img = self.read() + if img is None: + break + filename = osp.join(frame_dir, + filename_tmpl.format(i + file_start)) + cv2.imwrite(filename, img) + + def __len__(self): + return self.frame_cnt + + def __getitem__(self, index): + if isinstance(index, slice): + return [ + self.get_frame(i) + for i in range(*index.indices(self.frame_cnt)) + ] + # support negative indexing + if index < 0: + index += self.frame_cnt + if index < 0: + raise IndexError('index out of range') + return self.get_frame(index) + + def __iter__(self): + self._set_real_position(0) + return self + + def __next__(self): + img = self.read() + if img is not None: + return img + else: + raise StopIteration + + next = __next__ + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self._vcap.release() + + +def frames2video(frame_dir, + video_file, + fps=30, + fourcc='XVID', + filename_tmpl='{:06d}.jpg', + start=0, + end=0, + show_progress=True): + """Read the frame images from a directory and join them as a video + + Args: + frame_dir (str): The directory containing video frames. + video_file (str): Output filename. + fps (float): FPS of the output video. + fourcc (str): Fourcc of the output video, this should be compatible + with the output file type. + filename_tmpl (str): Filename template with the index as the variable. + start (int): Starting frame index. + end (int): Ending frame index. + show_progress (bool): Whether to show a progress bar. + """ + if end == 0: + ext = filename_tmpl.split('.')[-1] + end = len([name for name in scandir(frame_dir, ext)]) + first_file = osp.join(frame_dir, filename_tmpl.format(start)) + check_file_exist(first_file, 'The start frame not found: ' + first_file) + img = cv2.imread(first_file) + height, width = img.shape[:2] + resolution = (width, height) + vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, + resolution) + + def write_frame(file_idx): + filename = osp.join(frame_dir, filename_tmpl.format(file_idx)) + img = cv2.imread(filename) + vwriter.write(img) + + if show_progress: + track_progress(write_frame, range(start, end)) + else: + for i in range(start, end): + filename = osp.join(frame_dir, filename_tmpl.format(i)) + img = cv2.imread(filename) + vwriter.write(img) + vwriter.release() diff --git a/CDARTS_detection/mmcv/video/optflow.py b/CDARTS_detection/mmcv/video/optflow.py new file mode 100644 index 0000000..6404ea4 --- /dev/null +++ b/CDARTS_detection/mmcv/video/optflow.py @@ -0,0 +1,171 @@ +import numpy as np + +from mmcv._ext import flow_warp_c +from mmcv.arraymisc import dequantize, quantize +from mmcv.image import imread, imwrite +from mmcv.utils import is_str + + +def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs): + """Read an optical flow map. + + Args: + flow_or_path (ndarray or str): A flow map or filepath. + quantize (bool): whether to read quantized pair, if set to True, + remaining args will be passed to :func:`dequantize_flow`. + concat_axis (int): The axis that dx and dy are concatenated, + can be either 0 or 1. Ignored if quantize is False. + + Returns: + ndarray: Optical flow represented as a (h, w, 2) numpy array + """ + if isinstance(flow_or_path, np.ndarray): + if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2): + raise ValueError('Invalid flow with shape {}'.format( + flow_or_path.shape)) + return flow_or_path + elif not is_str(flow_or_path): + raise TypeError( + '"flow_or_path" must be a filename or numpy array, not {}'.format( + type(flow_or_path))) + + if not quantize: + with open(flow_or_path, 'rb') as f: + try: + header = f.read(4).decode('utf-8') + except Exception: + raise IOError('Invalid flow file: {}'.format(flow_or_path)) + else: + if header != 'PIEH': + raise IOError( + 'Invalid flow file: {}, header does not contain PIEH'. + format(flow_or_path)) + + w = np.fromfile(f, np.int32, 1).squeeze() + h = np.fromfile(f, np.int32, 1).squeeze() + flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2)) + else: + assert concat_axis in [0, 1] + cat_flow = imread(flow_or_path, flag='unchanged') + if cat_flow.ndim != 2: + raise IOError( + '{} is not a valid quantized flow file, its dimension is {}.'. + format(flow_or_path, cat_flow.ndim)) + assert cat_flow.shape[concat_axis] % 2 == 0 + dx, dy = np.split(cat_flow, 2, axis=concat_axis) + flow = dequantize_flow(dx, dy, *args, **kwargs) + + return flow.astype(np.float32) + + +def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs): + """Write optical flow to file. + + If the flow is not quantized, it will be saved as a .flo file losslessly, + otherwise a jpeg image which is lossy but of much smaller size. (dx and dy + will be concatenated horizontally into a single image if quantize is True.) + + Args: + flow (ndarray): (h, w, 2) array of optical flow. + filename (str): Output filepath. + quantize (bool): Whether to quantize the flow and save it to 2 jpeg + images. If set to True, remaining args will be passed to + :func:`quantize_flow`. + concat_axis (int): The axis that dx and dy are concatenated, + can be either 0 or 1. Ignored if quantize is False. + """ + if not quantize: + with open(filename, 'wb') as f: + f.write('PIEH'.encode('utf-8')) + np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) + flow = flow.astype(np.float32) + flow.tofile(f) + f.flush() + else: + assert concat_axis in [0, 1] + dx, dy = quantize_flow(flow, *args, **kwargs) + dxdy = np.concatenate((dx, dy), axis=concat_axis) + imwrite(dxdy, filename) + + +def quantize_flow(flow, max_val=0.02, norm=True): + """Quantize flow to [0, 255]. + + After this step, the size of flow will be much smaller, and can be + dumped as jpeg images. + + Args: + flow (ndarray): (h, w, 2) array of optical flow. + max_val (float): Maximum value of flow, values beyond + [-max_val, max_val] will be truncated. + norm (bool): Whether to divide flow values by image width/height. + + Returns: + tuple[ndarray]: Quantized dx and dy. + """ + h, w, _ = flow.shape + dx = flow[..., 0] + dy = flow[..., 1] + if norm: + dx = dx / w # avoid inplace operations + dy = dy / h + # use 255 levels instead of 256 to make sure 0 is 0 after dequantization. + flow_comps = [ + quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy] + ] + return tuple(flow_comps) + + +def dequantize_flow(dx, dy, max_val=0.02, denorm=True): + """Recover from quantized flow. + + Args: + dx (ndarray): Quantized dx. + dy (ndarray): Quantized dy. + max_val (float): Maximum value used when quantizing. + denorm (bool): Whether to multiply flow values with width/height. + + Returns: + ndarray: Dequantized flow. + """ + assert dx.shape == dy.shape + assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) + + dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] + + if denorm: + dx *= dx.shape[1] + dy *= dx.shape[0] + flow = np.dstack((dx, dy)) + return flow + + +def flow_warp(img, flow, filling_value=0, interpolate_mode='nearest'): + """Use flow to warp img + + Args: + img (ndarray, float or uint8): Image to be warped. + flow (ndarray, float): Optical Flow. + filling_value (int): The missing pixels will be set with filling_value. + interpolate_mode (str): bilinear -> Bilinear Interpolation; + nearest -> Nearest Neighbor. + + Returns: + ndarray: Warped image with the same shape of img + """ + interpolate_mode_dict = {'bilinear': 0, 'nearest': 1} + assert len(img.shape) == 3 + assert len(flow.shape) == 3 and flow.shape[2] == 2 + assert flow.shape[:2] == img.shape[:2] + assert interpolate_mode in interpolate_mode_dict.keys() + + interpolate_mode = interpolate_mode_dict[interpolate_mode] + img_float = img.astype(np.float64) + + out = flow_warp_c( + img_float, + flow.astype(np.float64), + filling_value=filling_value, + interpolate_mode=interpolate_mode) + + return out diff --git a/CDARTS_detection/mmcv/video/optflow_warp/__init__.py b/CDARTS_detection/mmcv/video/optflow_warp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.cpp b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.cpp new file mode 100644 index 0000000..ad16fc0 --- /dev/null +++ b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.cpp @@ -0,0 +1,75 @@ +#include "flow_warp.hpp" + +void FlowWarp(double* img, double* flow, double* out, const int height, + const int width, const int channels, const int filling_value = 0, + const int interpolateMode = 0) { + for (int h = 0; h < height; h++) { + for (int w = 0; w < width; w++) { + int offset_cur = h * width + w; + int offset_img = offset_cur * channels; + int offset_flow = offset_cur * 2; + double x, y; + x = h + flow[offset_flow + 1]; + y = w + flow[offset_flow]; + + if (x < 0 || x >= height - 1 || y < 0 || y >= width - 1) { + for (int k = 0; k < channels; k++) { + out[offset_img + k] = filling_value; + } + continue; + } + + if (interpolateMode == 0) + BilinearInterpolate(img, width, height, channels, x, y, + out + offset_img); + else if (interpolateMode == 1) + NNInterpolate(img, width, height, channels, x, y, out + offset_img); + else + throw "Not Implemented Interpolation Method"; + } + } +} + +void BilinearInterpolate(const double* img, int width, int height, int channels, + double x, double y, double* out) { + int xx, yy, m, n, u, v, offset, offset_img, l; + xx = x; + yy = y; + + double dx, dy, s; + + dx = __max__(__min__(x - xx, double(1)), double(0)); + dy = __max__(__min__(y - yy, double(1)), double(0)); + + for (m = 0; m <= 1; m++) + for (n = 0; n <= 1; n++) { + u = EnforceRange(yy + n, width); + v = EnforceRange(xx + m, height); + offset = v * width + u; + offset_img = offset * channels; + s = fabs(1 - m - dx) * fabs(1 - n - dy); + for (l = 0; l < channels; l++) out[l] += img[offset_img + l] * s; + } +} + +void NNInterpolate(const double* img, int width, int height, int channels, + double x, double y, double* out) { + int xx, yy, m, n, u, v, offset, offset_img, l; + xx = x; + yy = y; + + double dx, dy; + + dx = __max__(__min__(x - xx, double(1)), double(0)); + dy = __max__(__min__(y - yy, double(1)), double(0)); + + m = (dx < 0.5) ? 0 : 1; + n = (dy < 0.5) ? 0 : 1; + + u = EnforceRange(yy + n, width); + v = EnforceRange(xx + m, height); + offset = v * width + u; + offset_img = offset * channels; + + for (l = 0; l < channels; l++) out[l] = img[offset_img + l]; +} diff --git a/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.hpp b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.hpp new file mode 100644 index 0000000..53d8738 --- /dev/null +++ b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp.hpp @@ -0,0 +1,29 @@ +#include +#include + +using namespace std; + +void FlowWarp(double* img, double* flow1, double* out, const int height, + const int width, const int channels, const int filling_value, + const int interpolateMode); + +void BilinearInterpolate(const double* img, int width, int height, int channels, + double x, double y, double* out); + +void NNInterpolate(const double* img, int width, int height, int channels, + double x, double y, double* out); + +template +inline T __min__(T a, T b) { + return a > b ? b : a; +} + +template +inline T __max__(T a, T b) { + return (a < b) ? b : a; +} + +template +inline T EnforceRange(const T x, const int MaxValue) { + return __min__(__max__(x, 0), MaxValue); +} diff --git a/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.cpp b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.cpp new file mode 100644 index 0000000..5f3a987 --- /dev/null +++ b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.cpp @@ -0,0 +1,7928 @@ +/* Generated by Cython 0.27.3 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_27_3" +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef __cplusplus + #error "Cython files generated with the C++ option must be compiled with a C++ compiler." +#endif +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #else + #define CYTHON_INLINE inline + #endif +#endif +template +void __Pyx_call_destructor(T& x) { + x.~T(); +} +template +class __Pyx_FakeReference { + public: + __Pyx_FakeReference() : ptr(NULL) { } + __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } + T *operator->() { return ptr; } + T *operator&() { return ptr; } + operator T&() { return *ptr; } + template bool operator ==(U other) { return *ptr == other; } + template bool operator !=(U other) { return *ptr != other; } + private: + T *ptr; +}; + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__mmcv___ext +#define __PYX_HAVE_API__mmcv___ext +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#include "flow_warp.hpp" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +#define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "mmcv/video/optflow_warp/flow_warp_module.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":743 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":746 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":750 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":751 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":752 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":753 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":757 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":768 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":772 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":780 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":783 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":786 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* GetModuleGlobalName.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) + PyErr_SetObject(PyExc_KeyError, args); + Py_XDECREF(args); + } + return NULL; + } + Py_INCREF(value); + return value; +} +#else + #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* PyIdentifierFromString.proto */ +#if !defined(__Pyx_PyIdentifier_FromString) +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) +#else + #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) +#endif +#endif + +/* ModuleImport.proto */ +static PyObject *__Pyx_ImportModule(const char *name); + +/* TypeImport.proto */ +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void); /*proto*/ + +/* Module declarations from 'mmcv._ext' */ +static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "mmcv._ext" +extern int __pyx_module_is_main_mmcv___ext; +int __pyx_module_is_main_mmcv___ext = 0; + +/* Implementation of 'mmcv._ext' */ +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_Hi[] = "Hi"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_STUFF[] = "STUFF"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_shape[] = "shape"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_img_array[] = "img_array"; +static const char __pyx_k_mmcv__ext[] = "mmcv._ext"; +static const char __pyx_k_out_array[] = "out_array"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_flow_array[] = "flow_array"; +static const char __pyx_k_zeros_like[] = "zeros_like"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_flow_warp_c[] = "flow_warp_c"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_filling_value[] = "filling_value"; +static const char __pyx_k_interpolate_mode[] = "interpolate_mode"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_mmcv_video_optflow_warp_flow_war[] = "mmcv/video/optflow_warp/flow_warp_module.pyx"; +static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_Hi; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_STUFF; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_filling_value; +static PyObject *__pyx_n_s_flow_array; +static PyObject *__pyx_n_s_flow_warp_c; +static PyObject *__pyx_n_s_img_array; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_interpolate_mode; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_mmcv__ext; +static PyObject *__pyx_kp_s_mmcv_video_optflow_warp_flow_war; +static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; +static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_out_array; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_shape; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_zeros_like; +static PyObject *__pyx_pf_4mmcv_4_ext_flow_warp_c(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_img_array, PyArrayObject *__pyx_v_flow_array, int __pyx_v_filling_value, int __pyx_v_interpolate_mode); /* proto */ +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_codeobj__11; + +/* "mmcv/video/optflow_warp/flow_warp_module.pyx":11 + * void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode) + * + * def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<< + * np.ndarray[double, ndim=3, mode="c"] flow_array not None, + * int filling_value=0, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_4mmcv_4_ext_1flow_warp_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_4mmcv_4_ext_1flow_warp_c = {"flow_warp_c", (PyCFunction)__pyx_pw_4mmcv_4_ext_1flow_warp_c, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_4mmcv_4_ext_1flow_warp_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_img_array = 0; + PyArrayObject *__pyx_v_flow_array = 0; + int __pyx_v_filling_value; + int __pyx_v_interpolate_mode; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("flow_warp_c (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_img_array,&__pyx_n_s_flow_array,&__pyx_n_s_filling_value,&__pyx_n_s_interpolate_mode,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_img_array)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flow_array)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("flow_warp_c", 0, 2, 4, 1); __PYX_ERR(0, 11, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_filling_value); + if (value) { values[2] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_interpolate_mode); + if (value) { values[3] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flow_warp_c") < 0)) __PYX_ERR(0, 11, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_img_array = ((PyArrayObject *)values[0]); + __pyx_v_flow_array = ((PyArrayObject *)values[1]); + if (values[2]) { + __pyx_v_filling_value = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_filling_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13, __pyx_L3_error) + } else { + __pyx_v_filling_value = ((int)0); + } + if (values[3]) { + __pyx_v_interpolate_mode = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_interpolate_mode == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14, __pyx_L3_error) + } else { + __pyx_v_interpolate_mode = ((int)1); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("flow_warp_c", 0, 2, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 11, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("mmcv._ext.flow_warp_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_img_array), __pyx_ptype_5numpy_ndarray, 0, "img_array", 0))) __PYX_ERR(0, 11, __pyx_L1_error) + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_flow_array), __pyx_ptype_5numpy_ndarray, 0, "flow_array", 0))) __PYX_ERR(0, 12, __pyx_L1_error) + __pyx_r = __pyx_pf_4mmcv_4_ext_flow_warp_c(__pyx_self, __pyx_v_img_array, __pyx_v_flow_array, __pyx_v_filling_value, __pyx_v_interpolate_mode); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_4mmcv_4_ext_flow_warp_c(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_img_array, PyArrayObject *__pyx_v_flow_array, int __pyx_v_filling_value, int __pyx_v_interpolate_mode) { + PyObject *__pyx_v_out_array = NULL; + __Pyx_LocalBuf_ND __pyx_pybuffernd_flow_array; + __Pyx_Buffer __pyx_pybuffer_flow_array; + __Pyx_LocalBuf_ND __pyx_pybuffernd_img_array; + __Pyx_Buffer __pyx_pybuffer_img_array; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + __Pyx_RefNannySetupContext("flow_warp_c", 0); + __pyx_pybuffer_img_array.pybuffer.buf = NULL; + __pyx_pybuffer_img_array.refcount = 0; + __pyx_pybuffernd_img_array.data = NULL; + __pyx_pybuffernd_img_array.rcbuffer = &__pyx_pybuffer_img_array; + __pyx_pybuffer_flow_array.pybuffer.buf = NULL; + __pyx_pybuffer_flow_array.refcount = 0; + __pyx_pybuffernd_flow_array.data = NULL; + __pyx_pybuffernd_flow_array.rcbuffer = &__pyx_pybuffer_flow_array; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_img_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_img_array, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 11, __pyx_L1_error) + } + __pyx_pybuffernd_img_array.diminfo[0].strides = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_img_array.diminfo[0].shape = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_img_array.diminfo[1].strides = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_img_array.diminfo[1].shape = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_img_array.diminfo[2].strides = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_img_array.diminfo[2].shape = __pyx_pybuffernd_img_array.rcbuffer->pybuffer.shape[2]; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_flow_array.rcbuffer->pybuffer, (PyObject*)__pyx_v_flow_array, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_C_CONTIGUOUS, 3, 0, __pyx_stack) == -1)) __PYX_ERR(0, 11, __pyx_L1_error) + } + __pyx_pybuffernd_flow_array.diminfo[0].strides = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_flow_array.diminfo[0].shape = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_flow_array.diminfo[1].strides = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_flow_array.diminfo[1].shape = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.shape[1]; __pyx_pybuffernd_flow_array.diminfo[2].strides = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.strides[2]; __pyx_pybuffernd_flow_array.diminfo[2].shape = __pyx_pybuffernd_flow_array.rcbuffer->pybuffer.shape[2]; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":16 + * int interpolate_mode=1): + * + * out_array = np.zeros_like(img_array) # <<<<<<<<<<<<<< + * + * FlowWarp( np.PyArray_DATA(img_array), + */ + __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros_like); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + if (!__pyx_t_2) { + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, ((PyObject *)__pyx_v_img_array)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + } else { + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_3)) { + PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_img_array)}; + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_GOTREF(__pyx_t_1); + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { + PyObject *__pyx_temp[2] = {__pyx_t_2, ((PyObject *)__pyx_v_img_array)}; + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_GOTREF(__pyx_t_1); + } else + #endif + { + __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL; + __Pyx_INCREF(((PyObject *)__pyx_v_img_array)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_img_array)); + PyTuple_SET_ITEM(__pyx_t_4, 0+1, ((PyObject *)__pyx_v_img_array)); + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + } + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_out_array = __pyx_t_1; + __pyx_t_1 = 0; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":20 + * FlowWarp( np.PyArray_DATA(img_array), + * np.PyArray_DATA(flow_array), + * np.PyArray_DATA(out_array), # <<<<<<<<<<<<<< + * out_array.shape[0], + * out_array.shape[1], + */ + if (!(likely(((__pyx_v_out_array) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_out_array, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 20, __pyx_L1_error) + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":21 + * np.PyArray_DATA(flow_array), + * np.PyArray_DATA(out_array), + * out_array.shape[0], # <<<<<<<<<<<<<< + * out_array.shape[1], + * out_array.shape[2], + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_out_array, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":22 + * np.PyArray_DATA(out_array), + * out_array.shape[0], + * out_array.shape[1], # <<<<<<<<<<<<<< + * out_array.shape[2], + * filling_value, + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_out_array, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_t_3, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":23 + * out_array.shape[0], + * out_array.shape[1], + * out_array.shape[2], # <<<<<<<<<<<<<< + * filling_value, + * interpolate_mode) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_out_array, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_GetItemInt(__pyx_t_1, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 23, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":18 + * out_array = np.zeros_like(img_array) + * + * FlowWarp( np.PyArray_DATA(img_array), # <<<<<<<<<<<<<< + * np.PyArray_DATA(flow_array), + * np.PyArray_DATA(out_array), + */ + FlowWarp(((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_img_array))), ((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_flow_array))), ((double *)PyArray_DATA(((PyArrayObject *)__pyx_v_out_array))), __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_v_filling_value, __pyx_v_interpolate_mode); + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":27 + * interpolate_mode) + * + * return out_array # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_out_array); + __pyx_r = __pyx_v_out_array; + goto __pyx_L0; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":11 + * void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode) + * + * def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<< + * np.ndarray[double, ndim=3, mode="c"] flow_array not None, + * int filling_value=0, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_flow_array.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_img_array.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("mmcv._ext.flow_warp_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_flow_array.rcbuffer->pybuffer); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_img_array.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF(__pyx_v_out_array); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_copy_shape; + int __pyx_v_i; + int __pyx_v_ndim; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + int __pyx_v_t; + char *__pyx_v_f; + PyArray_Descr *__pyx_v_descr = 0; + int __pyx_v_offset; + int __pyx_v_hasfields; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + __Pyx_RefNannySetupContext("__getbuffer__", 0); + if (__pyx_v_info != NULL) { + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":220 + * # of flags + * + * if info == NULL: return # <<<<<<<<<<<<<< + * + * cdef int copy_shape, i, ndim + */ + __pyx_t_1 = ((__pyx_v_info == NULL) != 0); + if (__pyx_t_1) { + __pyx_r = 0; + goto __pyx_L0; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 + * + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + */ + __pyx_v_endian_detector = 1; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":224 + * cdef int copy_shape, i, ndim + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * + * ndim = PyArray_NDIM(self) + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":226 + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * copy_shape = 1 # <<<<<<<<<<<<<< + * else: + * copy_shape = 0 + */ + __pyx_v_copy_shape = 1; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * ndim = PyArray_NDIM(self) + * + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * copy_shape = 1 + * else: + */ + goto __pyx_L4; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * copy_shape = 1 + * else: + * copy_shape = 0 # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + */ + /*else*/ { + __pyx_v_copy_shape = 0; + } + __pyx_L4:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L6_bool_binop_done; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":234 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not C contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L6_bool_binop_done:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 235, __pyx_L1_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * copy_shape = 0 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L9_bool_binop_done; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":238 + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not Fortran contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L9_bool_binop_done:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 239, __pyx_L1_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 + * raise ValueError(u"ndarray is not Fortran contiguous") + * + * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< + * info.ndim = ndim + * if copy_shape: + */ + __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 + * + * info.buf = PyArray_DATA(self) + * info.ndim = ndim # <<<<<<<<<<<<<< + * if copy_shape: + * # Allocate new buffer for strides and shape info. + */ + __pyx_v_info->ndim = __pyx_v_ndim; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + __pyx_t_1 = (__pyx_v_copy_shape != 0); + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< + * info.shape = info.strides + ndim + * for i in range(ndim): + */ + __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim # <<<<<<<<<<<<<< + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + */ + __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim + * for i in range(ndim): # <<<<<<<<<<<<<< + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] + */ + __pyx_t_4 = __pyx_v_ndim; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 + * info.shape = info.strides + ndim + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + */ + (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< + * else: + * info.strides = PyArray_STRIDES(self) + */ + (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if copy_shape: # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + goto __pyx_L11; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":252 + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + */ + /*else*/ { + __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 + * else: + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + */ + __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); + } + __pyx_L11:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) + */ + __pyx_v_info->suboffsets = NULL; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":255 + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< + * info.readonly = not PyArray_ISWRITEABLE(self) + * + */ + __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":256 + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< + * + * cdef int t + */ + __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 + * + * cdef int t + * cdef char* f = NULL # <<<<<<<<<<<<<< + * cdef dtype descr = self.descr + * cdef int offset + */ + __pyx_v_f = NULL; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 + * cdef int t + * cdef char* f = NULL + * cdef dtype descr = self.descr # <<<<<<<<<<<<<< + * cdef int offset + * + */ + __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 + * cdef int offset + * + * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< + * + * if not hasfields and not copy_shape: + */ + __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L15_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L15_bool_binop_done:; + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 + * if not hasfields and not copy_shape: + * # do not call releasebuffer + * info.obj = None # <<<<<<<<<<<<<< + * else: + * # need to call releasebuffer + */ + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = Py_None; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * cdef bint hasfields = PyDataType_HASFIELDS(descr) + * + * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< + * # do not call releasebuffer + * info.obj = None + */ + goto __pyx_L14; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 + * else: + * # need to call releasebuffer + * info.obj = self # <<<<<<<<<<<<<< + * + * if not hasfields: + */ + /*else*/ { + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + } + __pyx_L14:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 + * + * if not hasfields: + * t = descr.type_num # <<<<<<<<<<<<<< + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + */ + __pyx_t_4 = __pyx_v_descr->type_num; + __pyx_v_t = __pyx_t_4; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); + if (!__pyx_t_2) { + goto __pyx_L20_next_or; + } else { + } + __pyx_t_2 = (__pyx_v_little_endian != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_L20_next_or:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L19_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L19_bool_binop_done:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 276, __pyx_L1_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * if not hasfields: + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + */ + switch (__pyx_v_t) { + case NPY_BYTE: + __pyx_v_f = ((char *)"b"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + */ + case NPY_UBYTE: + __pyx_v_f = ((char *)"B"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + */ + case NPY_SHORT: + __pyx_v_f = ((char *)"h"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + */ + case NPY_USHORT: + __pyx_v_f = ((char *)"H"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":281 + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + */ + case NPY_INT: + __pyx_v_f = ((char *)"i"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + */ + case NPY_UINT: + __pyx_v_f = ((char *)"I"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + */ + case NPY_LONG: + __pyx_v_f = ((char *)"l"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + */ + case NPY_ULONG: + __pyx_v_f = ((char *)"L"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":285 + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + */ + case NPY_LONGLONG: + __pyx_v_f = ((char *)"q"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + */ + case NPY_ULONGLONG: + __pyx_v_f = ((char *)"Q"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + */ + case NPY_FLOAT: + __pyx_v_f = ((char *)"f"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + */ + case NPY_DOUBLE: + __pyx_v_f = ((char *)"d"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + */ + case NPY_LONGDOUBLE: + __pyx_v_f = ((char *)"g"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":290 + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + */ + case NPY_CFLOAT: + __pyx_v_f = ((char *)"Zf"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":291 + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" + */ + case NPY_CDOUBLE: + __pyx_v_f = ((char *)"Zd"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f = "O" + * else: + */ + case NPY_CLONGDOUBLE: + __pyx_v_f = ((char *)"Zg"); + break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":293 + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + case NPY_OBJECT: + __pyx_v_f = ((char *)"O"); + break; + default: + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 + * elif t == NPY_OBJECT: f = "O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * info.format = f + * return + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 295, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 295, __pyx_L1_error) + break; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f # <<<<<<<<<<<<<< + * return + * else: + */ + __pyx_v_info->format = __pyx_v_f; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f + * return # <<<<<<<<<<<<<< + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * info.obj = self + * + * if not hasfields: # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":299 + * return + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + */ + /*else*/ { + __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":300 + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, + */ + (__pyx_v_info->format[0]) = '^'; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":301 + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 # <<<<<<<<<<<<<< + * f = _util_dtypestring(descr, info.format + 1, + * info.format + _buffer_format_string_len, + */ + __pyx_v_offset = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":302 + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< + * info.format + _buffer_format_string_len, + * &offset) + */ + __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 302, __pyx_L1_error) + __pyx_v_f = __pyx_t_7; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":305 + * info.format + _buffer_format_string_len, + * &offset) + * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + */ + (__pyx_v_f[0]) = '\x00'; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":214 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fullfill the PEP. + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(Py_None); + __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; + } + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_descr); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + +/* Python wrapper */ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); + __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__releasebuffer__", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":309 + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) # <<<<<<<<<<<<<< + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) + */ + PyObject_Free(__pyx_v_info->format); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":308 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":311 + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) # <<<<<<<<<<<<<< + * # info.shape was stored after info.strides in the same block + * + */ + PyObject_Free(__pyx_v_info->strides); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":310 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":307 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":789 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 789, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 792, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":795 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 795, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":798 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 798, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":797 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 801, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":800 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":804 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":803 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":815 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 818, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 818, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 818, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":819 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 819, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 819, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":820 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + #if !CYTHON_COMPILING_IN_PYPY + Py_ssize_t size = Py_SIZE(sequence); + #else + Py_ssize_t size = PySequence_Size(sequence); + #endif + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 820, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 820, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 820, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 822, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (__pyx_t_6) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 823, __pyx_L1_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":822 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (__pyx_t_6) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 827, __pyx_L1_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (__pyx_t_6) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 847, __pyx_L1_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":854 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 854, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 856, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":857 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 857, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":858 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 858, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":859 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 859, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 860, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 861, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":862 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 862, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":863 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 863, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":864 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 864, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":865 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 865, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":866 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 866, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":868 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 868, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 868, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":869 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":873 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 873, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":818 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":874 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + PyObject *__pyx_v_baseptr; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + __pyx_t_1 = (__pyx_v_base == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":993 + * cdef PyObject* baseptr + * if base is None: + * baseptr = NULL # <<<<<<<<<<<<<< + * else: + * Py_INCREF(base) # important to do this before decref below! + */ + __pyx_v_baseptr = NULL; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":992 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + goto __pyx_L3; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":995 + * baseptr = NULL + * else: + * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< + * baseptr = base + * Py_XDECREF(arr.base) + */ + /*else*/ { + Py_INCREF(__pyx_v_base); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 + * else: + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base # <<<<<<<<<<<<<< + * Py_XDECREF(arr.base) + * arr.base = baseptr + */ + __pyx_v_baseptr = ((PyObject *)__pyx_v_base); + } + __pyx_L3:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base + * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< + * arr.base = baseptr + * + */ + Py_XDECREF(__pyx_v_arr->base); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 + * baseptr = base + * Py_XDECREF(arr.base) + * arr.base = baseptr # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + __pyx_v_arr->base = __pyx_v_baseptr; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":990 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: + * return None # <<<<<<<<<<<<<< + * else: + * return arr.base + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(Py_None); + __pyx_r = Py_None; + goto __pyx_L0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1001 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 + * return None + * else: + * return arr.base # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); + __pyx_r = ((PyObject *)__pyx_v_arr->base); + goto __pyx_L0; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 + * cdef inline int import_array() except -1: + * try: + * _import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1011, __pyx_L3_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 + * try: + * _import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1012, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1013, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1013, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1017 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1017, __pyx_L3_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1018 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1018, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1019, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1019, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1016 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1015 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1023 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1023, __pyx_L3_error) + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1024 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1024, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1025, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1025, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1022 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec__ext(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec__ext}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "_ext", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_Hi, __pyx_k_Hi, sizeof(__pyx_k_Hi), 0, 0, 1, 1}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_STUFF, __pyx_k_STUFF, sizeof(__pyx_k_STUFF), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_filling_value, __pyx_k_filling_value, sizeof(__pyx_k_filling_value), 0, 0, 1, 1}, + {&__pyx_n_s_flow_array, __pyx_k_flow_array, sizeof(__pyx_k_flow_array), 0, 0, 1, 1}, + {&__pyx_n_s_flow_warp_c, __pyx_k_flow_warp_c, sizeof(__pyx_k_flow_warp_c), 0, 0, 1, 1}, + {&__pyx_n_s_img_array, __pyx_k_img_array, sizeof(__pyx_k_img_array), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_interpolate_mode, __pyx_k_interpolate_mode, sizeof(__pyx_k_interpolate_mode), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_mmcv__ext, __pyx_k_mmcv__ext, sizeof(__pyx_k_mmcv__ext), 0, 0, 1, 1}, + {&__pyx_kp_s_mmcv_video_optflow_warp_flow_war, __pyx_k_mmcv_video_optflow_warp_flow_war, sizeof(__pyx_k_mmcv_video_optflow_warp_flow_war), 0, 0, 1, 0}, + {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, + {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_out_array, __pyx_k_out_array, sizeof(__pyx_k_out_array), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_zeros_like, __pyx_k_zeros_like, sizeof(__pyx_k_zeros_like), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 235, __pyx_L1_error) + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 248, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 823, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1013, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":239 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 239, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 276, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":823 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 823, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 827, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1013 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1019 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1019, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1025 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1025, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":11 + * void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode) + * + * def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<< + * np.ndarray[double, ndim=3, mode="c"] flow_array not None, + * int filling_value=0, + */ + __pyx_tuple__10 = PyTuple_Pack(5, __pyx_n_s_img_array, __pyx_n_s_flow_array, __pyx_n_s_filling_value, __pyx_n_s_interpolate_mode, __pyx_n_s_out_array); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(4, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_mmcv_video_optflow_warp_flow_war, __pyx_n_s_flow_warp_c, 11, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC init_ext(void); /*proto*/ +PyMODINIT_FUNC init_ext(void) +#else +PyMODINIT_FUNC PyInit__ext(void); /*proto*/ +PyMODINIT_FUNC PyInit__ext(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec__ext(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #endif + #if CYTHON_REFNANNY + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); + if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); + } + #endif + __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__ext(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("_ext", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_mmcv___ext) { + if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "mmcv._ext")) { + if (unlikely(PyDict_SetItemString(modules, "mmcv._ext", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global init code ---*/ + /*--- Variable export code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + /*--- Type import code ---*/ + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", + #if CYTHON_COMPILING_IN_PYPY + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) + __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 163, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 185, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 198, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 885, __pyx_L1_error) + /*--- Variable import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":1 + * STUFF = "Hi" # <<<<<<<<<<<<<< + * + * import numpy as np + */ + if (PyDict_SetItem(__pyx_d, __pyx_n_s_STUFF, __pyx_n_s_Hi) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":3 + * STUFF = "Hi" + * + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 3, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":6 + * cimport numpy as np + * + * np.import_array() # <<<<<<<<<<<<<< + * + * cdef extern from "flow_warp.hpp": + */ + __pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 6, __pyx_L1_error) + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":11 + * void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode) + * + * def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, # <<<<<<<<<<<<<< + * np.ndarray[double, ndim=3, mode="c"] flow_array not None, + * int filling_value=0, + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4mmcv_4_ext_1flow_warp_c, NULL, __pyx_n_s_mmcv__ext); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_flow_warp_c, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "mmcv/video/optflow_warp/flow_warp_module.pyx":1 + * STUFF = "Hi" # <<<<<<<<<<<<<< + * + * import numpy as np + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../anaconda3/envs/PySpark-2.3.2/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1021 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init mmcv._ext", 0, __pyx_lineno, __pyx_filename); + } + Py_DECREF(__pyx_m); __pyx_m = 0; + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init mmcv._ext"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule((char *)modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t < '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number; + int ndim = ctx->head->field->type->ndim; +; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if (ctx->enc_type == *ts && got_Z == ctx->is_complex && + ctx->enc_packmode == ctx->new_packmode) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((unsigned)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* GetBuiltinName */ + static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* GetModuleGlobalName */ + static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS + result = PyDict_GetItem(__pyx_d, name); + if (likely(result)) { + Py_INCREF(result); + } else { +#else + result = PyObject_GetItem(__pyx_d, name); + if (!result) { + PyErr_Clear(); +#endif + result = __Pyx_GetBuiltinName(name); + } + return result; +} + +/* PyCFunctionFastCall */ + #if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ + #if CYTHON_FAST_PYCALL +#include "frameobject.h" +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = f->f_localsplus; + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ + #if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* GetItemInt */ + static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (!j) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return m->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if PY_VERSION_HEX >= 0x030700A2 + *type = tstate->exc_state.exc_type; + *value = tstate->exc_state.exc_value; + *tb = tstate->exc_state.exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = type; + tstate->exc_state.exc_value = value; + tstate->exc_state.exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { +#endif + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = local_type; + tstate->exc_state.exc_value = local_value; + tstate->exc_state.exc_traceback = local_tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (PyObject_Not(use_cline) != 0) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = 1.0 / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = 1.0 / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0, -1); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = 1.0 / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = 1.0 / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0, -1); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* ModuleImport */ + #ifndef __PYX_HAVE_RT_ImportModule +#define __PYX_HAVE_RT_ImportModule +static PyObject *__Pyx_ImportModule(const char *name) { + PyObject *py_name = 0; + PyObject *py_module = 0; + py_name = __Pyx_PyIdentifier_FromString(name); + if (!py_name) + goto bad; + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + return py_module; +bad: + Py_XDECREF(py_name); + return 0; +} +#endif + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, + size_t size, int strict) +{ + PyObject *py_module = 0; + PyObject *result = 0; + PyObject *py_name = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + py_module = __Pyx_ImportModule(module_name); + if (!py_module) + goto bad; + py_name = __Pyx_PyIdentifier_FromString(class_name); + if (!py_name) + goto bad; + result = PyObject_GetAttr(py_module, py_name); + Py_DECREF(py_name); + py_name = 0; + Py_DECREF(py_module); + py_module = 0; + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (!strict && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + else if ((size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(py_module); + Py_XDECREF(result); + return NULL; +} +#endif + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + PyErr_Clear(); + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(x); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.pyx b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.pyx new file mode 100644 index 0000000..0aca1b6 --- /dev/null +++ b/CDARTS_detection/mmcv/video/optflow_warp/flow_warp_module.pyx @@ -0,0 +1,27 @@ +STUFF = "Hi" + +import numpy as np +cimport numpy as np + +np.import_array() + +cdef extern from "flow_warp.hpp": + void FlowWarp(double* img, double* flow1, double* out, const int height, const int width, const int channels, const int filling_value, const int interpolateMode) + +def flow_warp_c(np.ndarray[double, ndim=3, mode="c"] img_array not None, + np.ndarray[double, ndim=3, mode="c"] flow_array not None, + int filling_value=0, + int interpolate_mode=1): + + out_array = np.zeros_like(img_array) + + FlowWarp( np.PyArray_DATA(img_array), + np.PyArray_DATA(flow_array), + np.PyArray_DATA(out_array), + out_array.shape[0], + out_array.shape[1], + out_array.shape[2], + filling_value, + interpolate_mode) + + return out_array diff --git a/CDARTS_detection/mmcv/video/processing.py b/CDARTS_detection/mmcv/video/processing.py new file mode 100644 index 0000000..ca32354 --- /dev/null +++ b/CDARTS_detection/mmcv/video/processing.py @@ -0,0 +1,159 @@ +import os +import os.path as osp +import subprocess +import tempfile + +from mmcv.utils import requires_executable + + +@requires_executable('ffmpeg') +def convert_video(in_file, out_file, print_cmd=False, pre_options='', + **kwargs): + """Convert a video with ffmpeg. + + This provides a general api to ffmpeg, the executed command is:: + + `ffmpeg -y -i ` + + Options(kwargs) are mapped to ffmpeg commands with the following rules: + + - key=val: "-key val" + - key=True: "-key" + - key=False: "" + + Args: + in_file (str): Input video filename. + out_file (str): Output video filename. + pre_options (str): Options appears before "-i ". + print_cmd (bool): Whether to print the final ffmpeg command. + """ + options = [] + for k, v in kwargs.items(): + if isinstance(v, bool): + if v: + options.append('-{}'.format(k)) + elif k == 'log_level': + assert v in [ + 'quiet', 'panic', 'fatal', 'error', 'warning', 'info', + 'verbose', 'debug', 'trace' + ] + options.append('-loglevel {}'.format(v)) + else: + options.append('-{} {}'.format(k, v)) + cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file, + ' '.join(options), out_file) + if print_cmd: + print(cmd) + subprocess.call(cmd, shell=True) + + +@requires_executable('ffmpeg') +def resize_video(in_file, + out_file, + size=None, + ratio=None, + keep_ar=False, + log_level='info', + print_cmd=False, + **kwargs): + """Resize a video. + + Args: + in_file (str): Input video filename. + out_file (str): Output video filename. + size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1). + ratio (tuple or float): Expected resize ratio, (2, 0.5) means + (w*2, h*0.5). + keep_ar (bool): Whether to keep original aspect ratio. + log_level (str): Logging level of ffmpeg. + print_cmd (bool): Whether to print the final ffmpeg command. + """ + if size is None and ratio is None: + raise ValueError('expected size or ratio must be specified') + elif size is not None and ratio is not None: + raise ValueError('size and ratio cannot be specified at the same time') + options = {'log_level': log_level} + if size: + if not keep_ar: + options['vf'] = 'scale={}:{}'.format(size[0], size[1]) + else: + options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio' + '=decrease'.format(size[0], size[1])) + else: + if not isinstance(ratio, tuple): + ratio = (ratio, ratio) + options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format( + ratio[0], ratio[1]) + convert_video(in_file, out_file, print_cmd, **options) + + +@requires_executable('ffmpeg') +def cut_video(in_file, + out_file, + start=None, + end=None, + vcodec=None, + acodec=None, + log_level='info', + print_cmd=False, + **kwargs): + """Cut a clip from a video. + + Args: + in_file (str): Input video filename. + out_file (str): Output video filename. + start (None or float): Start time (in seconds). + end (None or float): End time (in seconds). + vcodec (None or str): Output video codec, None for unchanged. + acodec (None or str): Output audio codec, None for unchanged. + log_level (str): Logging level of ffmpeg. + print_cmd (bool): Whether to print the final ffmpeg command. + """ + options = {'log_level': log_level} + if vcodec is None: + options['vcodec'] = 'copy' + if acodec is None: + options['acodec'] = 'copy' + if start: + options['ss'] = start + else: + start = 0 + if end: + options['t'] = end - start + convert_video(in_file, out_file, print_cmd, **options) + + +@requires_executable('ffmpeg') +def concat_video(video_list, + out_file, + vcodec=None, + acodec=None, + log_level='info', + print_cmd=False, + **kwargs): + """Concatenate multiple videos into a single one. + + Args: + video_list (list): A list of video filenames + out_file (str): Output video filename + vcodec (None or str): Output video codec, None for unchanged + acodec (None or str): Output audio codec, None for unchanged + log_level (str): Logging level of ffmpeg. + print_cmd (bool): Whether to print the final ffmpeg command. + """ + _, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True) + with open(tmp_filename, 'w') as f: + for filename in video_list: + f.write('file {}\n'.format(osp.abspath(filename))) + options = {'log_level': log_level} + if vcodec is None: + options['vcodec'] = 'copy' + if acodec is None: + options['acodec'] = 'copy' + convert_video( + tmp_filename, + out_file, + print_cmd, + pre_options='-f concat -safe 0', + **options) + os.remove(tmp_filename) diff --git a/CDARTS_detection/mmcv/visualization/__init__.py b/CDARTS_detection/mmcv/visualization/__init__.py new file mode 100644 index 0000000..f0c06c4 --- /dev/null +++ b/CDARTS_detection/mmcv/visualization/__init__.py @@ -0,0 +1,8 @@ +from .color import Color, color_val +from .image import imshow, imshow_bboxes, imshow_det_bboxes +from .optflow import flowshow, flow2rgb, make_color_wheel + +__all__ = [ + 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes', + 'flowshow', 'flow2rgb', 'make_color_wheel' +] diff --git a/CDARTS_detection/mmcv/visualization/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmcv/visualization/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbd0af1f7f9adc6d07a7e6b60be20f8c191edb19 GIT binary patch literal 454 zcmZ9IOHRWu5Qgo%lQyVGy+l|LscaD-A+`vKoi~;fn`>Zp4+mWyKY`!cHrJ zYRTWs{F;ZyH+i1jA6j;W5&A~|PGbEBd-aP2M;uE8m|*aThh6MR9|96UNJ5B6G-+9W9Om5GvT9>YA|W-4BawW_$KN=w`ynd zqoq})K7=-xr;v;BB-z~LO{(3LJ$b94smub{yP(I8yH*G}cy`#}I|L4)L*x)UxK)SL z;<=ccaH{*OFkuI*5od)aR_oD?KI>p&N|}_Dexc#QwRJc=m8}A?vh17vhV^B==S-J+ zt#k=s_Yw8jsowhaw%m4l!({i#CW+Fn??y_m*L#!Pg5nz~?%dyL_v!^+;-#PAU&=0a ABme*a literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/visualization/__pycache__/color.cpython-36.pyc b/CDARTS_detection/mmcv/visualization/__pycache__/color.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cefd354c484e9c03a72115d082f15cf1b1271dd3 GIT binary patch literal 1535 zcmY*Z&2QT_6esmz%Zk%xL4kF>j2L#9fgmo{%U}dsK2{XF?y#|^tAtEJM8c1-$3lPQ?5JqvOQ8xoKSd=kB{%;r>FPyY}Uec1gXC_@@z^a8Pfh3^mxIll5~hjm!@9im<4eUApwHAFxI zY=Y;aE>A(|$Z*d=L(S)Kg#_QOpqa7=}C{4OoYUtP61?h}okNi)oK-(U|q= z77HN9wh2$FqLQomksut>5IspXVf;K7T#>9Q%SyloCTc&939OS!=rk9KNY3`iSaL4* z$XQYIJ(4X`ke9qDsyBP2OvfBd_sE+`t~p5s1G6-n?Yl4%1K#WZfSnF}cW4>Z6Wu7R z?6jb`mG3ETwf9`sF>WuN^Gyt=U0YZTo7{uGiIPO5B~KC)CrMecy0CITNiOQNSl@J$ zgjHFRP#-ElLstPB*b30VR)BWe3efJl0yK0LprL#HKah-VTFX8h4<}X0zXUGE%Q_Xq zY{^nJRP#y=%aU=;Go4ogCoCTJoD6n?!3jnkOqB=UOm3V z4QAdflt04m&#*eSw-?7hLF{$BF8Kf6zzBd<1G}~Q%~~~J)1Y&IZ5K^@VEH#>j#)7C zdK(+c?v0xP5SiFOd!`PKfxBh0`7Vkv-YxEoL2P`tIsI zH1UEw!bSo`C?R^O-HZY?7SYrOb(sP zFc*3#SSn?@9E@O8U~byjsvVw}T-Q?E3Fq8?2Deuua@6J})j4?vUYd`&w9r_d!6z4E za{S^Y$tI}~yZ~?(&9xo>&EQ>13emaT7W~#xJ-q4~~?AB0>K>m$H(^2TaBjbJJbFq6q+7X#$n6xbf4XFr8G{ zq$aGv_AAr-0S>Km2XK_BHi5k#cqVk1Xm|ZSo0*VX7Fy3;CjjtcII7QqpktaU|yFp|E`!AEW*-Z!%OvxjVX>8|czt(x7Hf3-=ck;XGbReRG U_80KAEvvFK*7qW>@9#YPACl^WS^xk5 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmcv/visualization/__pycache__/image.cpython-36.pyc b/CDARTS_detection/mmcv/visualization/__pycache__/image.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..557062fe26e86f2b28220f5e3a83098a98de4ddf GIT binary patch literal 4742 zcmd5&*Qs^FZnVmY4ljiq?sBA?7(Ey z&!y%8gIctVx&Qn1ZoKPyksE|w$6s+A7v+ZNBv(|6dO_H6mlG-6Shx}Qgz)z2SKN)R zujjZjb~k;O2g#uC?fHCVGI2MEnvoa!YW8RfXdDX?d4|yoWK#yj8AKAC?|c1i-;LYu zE=GRf@3{j{c%h!KR?la-*3LybnFPuU6Z_1HBhGp*%R%AH9xtAiXhcc>tsyGlefiF6 zHxB)asH4u_wim6o_PCdLc6uIx)UVZUO(9P6iJ`bU{BZ(L|;P3 zZP+!YC|RK+d_US~m57QmkB{1PG0EGwI%&{V3=OHSrI$M}#o3f~*pNwfXvqA5nOu8e zv>9jId~VBvv{HtvW4AEtW3nsrxp8Rdo*t9dA;z}MD?3i5o{>kfn~~FZY8)9yjwFuBNre zrcYLxbiDy=b?dx)xjwaG<&cT1g#38qFLHa(han61CVA^)nD$hl=AX`&@u0ai_J9Zd zSh^BE)(!-8+L|6gf6on|f-hazYaoN28Ap=cptTkG3AE6Tv@sz-?S6ov>3ZZm?($|F z_o3FKu8-9OIkTeE(xZ?-F@Vb0qyD$r}=g5|0)O|vYIOwGS);Wwe(FWx~O8> zOXn)xdMgeG*H%du3AT*=H+95mYI9k++K*daKe>j+nERM26dKlPI&z6tr3^PW$USG& zsWVZ)c-<7FamHF&4~kSu89}?bfN!LQpxp|ReMD*<(S|7>OzwWIzMx~G6C^>DNH1#n znHdeT;wXA&dD1s42rugRSq`V0Rc|DTFX-5B3lWQ~{CnXlqX%v)!i`Qr%uOjyxnxag7du#$3 z3$o(qfE!j*P03gLy*N@cMy0`%zhwf}7*ofT@ zd|Rg-_&V~c45F4CF`dWgXfj+hbc4vfyZ*kL47`?4ei5k!T5+_4>m+Y}`0hK}#UXOkq47Ep%pq&%w8_F+=`=<@q)b*Ug8+YGYnijD zE{7t})SHw6-viIYjr6vgqiz2uHzHf7n^0~DnrFYuMVO4E? zW5@d)yCZYwW- zbBtq0O8{=Qeroax<#=TNN}Auw534Clt9nGyp-)=HxGD3z2#YU_{;g;K0G`0;feB3V z7hsb4VJ)5SS+dx(!MBhuD7_a*FCdFW^cnWCz-`q(krvV#J=)2% zhu!O5I|ptc9H5EC^hCOp7P&(h1n9?7NaG<84`fC*pX-&=8niemE9v|J!ztx~MAGym zFJok$V@C(6ivYpjp=|idFxED z+m(2;cjd`VLU2zYS2f1+UIkUse%t$KqBfdsm zs~wBblj2L%DGs*f25vN4y7S)JM)U6NhwFE5|EPKMz55UD-Tq((Bb`zhNqk*N&_2?b zo3Z~!Nx3~4b;Ft3XTzD=nA%yxnHqt33(~{{SBY;Z;W~U|dAz-oMxI1v1+`$qR&r@K z-=vA>Q8lVE94G{)?YQA6)+Vj`YDQfcfi1k5N0{x0kdRzOWxP>U(8^`DX(Q9s0Jrd= zZJSE~S~W#k0r0As=fJrLU{)JLuv?gqTx#J@!(DYtMM2MLbz|z{H>M10b#|xjtZ8P& zr7AK4aT(Py)`|wM1wdT9d+Y$-?)0y8}G&6zWYb9lZv4|~1#N8it6 z(qrsj?8@Un{sUSbqEk%qm?b>qoMaJ;(81e@-NXw$i@9+lX@*V6o@&H?(h6IU*OjkY zU-Pi7`l_S4kUDBZ_0$@qt`aZVVDlXI*??ERie5!|l%+=)G|hDMU^F#^PvY#)DXCddmjcocjx)7RDpMHY;7prU*p%cWNP zZw;JA=`fXv4tC$I1g2n?;46wUDuzWwno(L{k56U1z%G;E6f=L)%U~{zOzH(k1J}@( z*C+GX(61RJ-GR@HcO#t!)8vtFyH4sH|vo8O~b!^ zz5nEG)HVQ+l8u@x7-Dve$eakX4T&p%2Y=X!$nO;5D@0DYC%(B`IA?5y5i~Jp4j^$Q zpow+)$F(L9Z-=d%k||fY--wIen>ah{*u{GH2Q%XBj8}NajnX~i_U)bV0|u!{NGVR- zixKd8v}!1?bj!wL{Pm?rZ)p3atDGCG^r*i@|6vnotE9eNbk6v3_XPI7<)#ayS2m%6 zHKqS^^31buSUu*Si&bmY{=XL2O0UC8x5Lf^bk=XPRj2GIaSJDNo0X0d+iZ)iXper` z#jYF0CU)wTJ<=(in{2gKcFP`c@A$?U-)5?XQ)??xwznBFrB|QZ1gE>j$~KU-tH>?3 zS}&VQobXj2Yno+Wxugquz3lHW+8uTvKSqx|w#xn)_ZT#qzh@4!XSQB8E^U$)va&6| zL!KXI=__rDI`tAcr`&q+>>!8|IYqwwRFWcdp47=Yxt5~uGHsm2nHiqWw2rT(_EDB< ztUjdapq^gjTG9UGhNXf@q+>$f<~NZ9d1sC0b2--g!Bahvi@2z<3rnX_91}%Yi_s)V zvm(gndK|&x6}niYCuw$?4vU$&WDLs#c_2sGD;*TGC`V^Bat)4LT1})mfu}OR=GTR> zK&Ehcw#b8Mnr4Q!vuZDNu`uZsvb&(UNAHds%Ot)-rsVo%@?ZLUPHba5bmz!mc4Ua8 z_owLa{O6;GjNJX z=am=bsZ1+Jj`GUQzcdBJbXj?mD4k?@#hC=8=XvEM@^#geW?abhK_%vqorU>^%!?(q zbwI0C)2?{M%eT1TcYy{#`odZoQ-J8X zfkt0!>GxBhAVm&}-Bv<5NJ?De%9%EBiQ3E5Qt@uVzv6|UOZD@{im$}VS-C3@G}tVh zW4GjV#olJhpE|4N1ty3rZ-WX{)8al#f?As{WKb1!TW>KUTDR5)-nLeWZauQ9n!q|_ z=fXk*GY!%P-lfZ(hz-p}^UxX~sUB>2s#W$TC<2Zfs{MDdT2uaN{qgZM|efaJSeuVj>>1Z~2$oe_h1nrRpnJW_)!|8|7$mn@`CD&Vlj{Po@KBsl`(kfrDcmH zQc?%_9Y@+waX^cL2^MHMo?Xj+`!fdyzai|5GXu_;XUM~JN{p6Auj{$|Q2vXSv(RjR+7*bE85o(n^+tY}cIxXaNQx_o369)xzDA+Bs0=Z9z zw--Ew5u)*= 0 and channel <= 255 + return color + elif isinstance(color, int): + assert color >= 0 and color <= 255 + return color, color, color + elif isinstance(color, np.ndarray): + assert color.ndim == 1 and color.size == 3 + assert np.all((color >= 0) & (color <= 255)) + color = color.astype(np.uint8) + return tuple(color) + else: + raise TypeError('Invalid type for color: {}'.format(type(color))) diff --git a/CDARTS_detection/mmcv/visualization/image.py b/CDARTS_detection/mmcv/visualization/image.py new file mode 100644 index 0000000..d999662 --- /dev/null +++ b/CDARTS_detection/mmcv/visualization/image.py @@ -0,0 +1,146 @@ +import cv2 +import numpy as np + +from mmcv.image import imread, imwrite +from .color import color_val +import colorsys + +def imshow(img, win_name='', wait_time=0): + """Show an image. + + Args: + img (str or ndarray): The image to be displayed. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + """ + cv2.imshow(win_name, imread(img)) + cv2.waitKey(wait_time) + + +def imshow_bboxes(img, + bboxes, + colors='green', + top_k=-1, + thickness=1, + show=True, + win_name='', + wait_time=0, + out_file=None): + """Draw bboxes on an image. + + Args: + img (str or ndarray): The image to be displayed. + bboxes (list or ndarray): A list of ndarray of shape (k, 4). + colors (list[str or tuple or Color]): A list of colors. + top_k (int): Plot the first k bboxes only if set positive. + thickness (int): Thickness of lines. + show (bool): Whether to show the image. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + out_file (str, optional): The filename to write the image. + """ + img = imread(img) + + if isinstance(bboxes, np.ndarray): + bboxes = [bboxes] + if not isinstance(colors, list): + colors = [colors for _ in range(len(bboxes))] + colors = [color_val(c) for c in colors] + assert len(bboxes) == len(colors) + + for i, _bboxes in enumerate(bboxes): + _bboxes = _bboxes.astype(np.int32) + if top_k <= 0: + _top_k = _bboxes.shape[0] + else: + _top_k = min(top_k, _bboxes.shape[0]) + for j in range(_top_k): + left_top = (_bboxes[j, 0], _bboxes[j, 1]) + right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) + cv2.rectangle( + img, left_top, right_bottom, colors[i], thickness=thickness) + + if show: + imshow(img, win_name, wait_time) + if out_file is not None: + imwrite(img, out_file) + + +def random_colors(N, bright=False): + """ + Generate random colors. + To get visually distinct colors, generate them in HSV space then + convert to RGB. + """ + brightness = 1.0 if bright else 0.7 + hsv = [(i / N, 1, brightness) for i in range(N)] + colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) + return colors + + +def imshow_det_bboxes(img, + bboxes, + labels, + class_names=None, + score_thr=0, + bbox_color='green', + text_color='green', + thickness=3, + font_scale=0.8, + show=True, + win_name='', + wait_time=0, + out_file=None): + """Draw bboxes and class labels (with scores) on an image. + + Args: + img (str or ndarray): The image to be displayed. + bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or + (n, 5). + labels (ndarray): Labels of bboxes. + class_names (list[str]): Names of each classes. + score_thr (float): Minimum score of bboxes to be shown. + bbox_color (str or tuple or :obj:`Color`): Color of bbox lines. + text_color (str or tuple or :obj:`Color`): Color of texts. + thickness (int): Thickness of lines. + font_scale (float): Font scales of texts. + show (bool): Whether to show the image. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + out_file (str or None): The filename to write the image. + """ + assert bboxes.ndim == 2 + assert labels.ndim == 1 + assert bboxes.shape[0] == labels.shape[0] + assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5 + img = imread(img) + + if score_thr > 0: + assert bboxes.shape[1] == 5 + scores = bboxes[:, -1] + inds = scores > score_thr + bboxes = bboxes[inds, :] + labels = labels[inds] + + bbox_color = random_colors(80) # color_val(bbox_color) + text_color = random_colors(80) # color_val(text_color) + + for bbox, label in zip(bboxes, labels): + bbox_int = bbox.astype(np.int32) + left_top = (bbox_int[0], bbox_int[1]) + right_bottom = (bbox_int[2], bbox_int[3]) + tmp_bbox_color = bbox_color[label] + tmp_bbox_color = (int(tmp_bbox_color[2]*255), int(tmp_bbox_color[1]*255), int(tmp_bbox_color[0]*255)) + cv2.rectangle( + img, left_top, right_bottom, tmp_bbox_color, thickness=thickness) + label_text = class_names[ + label] if class_names is not None else 'cls {}'.format(label) + if len(bbox) > 4: + label_text += '|{:.02f}'.format(bbox[-1]) + cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2), + cv2.FONT_HERSHEY_COMPLEX, font_scale, tmp_bbox_color) + + if show: + imshow(img, win_name, wait_time) + if out_file is not None: + imwrite(img, out_file) diff --git a/CDARTS_detection/mmcv/visualization/optflow.py b/CDARTS_detection/mmcv/visualization/optflow.py new file mode 100644 index 0000000..0068a74 --- /dev/null +++ b/CDARTS_detection/mmcv/visualization/optflow.py @@ -0,0 +1,113 @@ +from __future__ import division + +import numpy as np + +from mmcv.image import rgb2bgr +from mmcv.video import flowread +from .image import imshow + + +def flowshow(flow, win_name='', wait_time=0): + """Show optical flow. + + Args: + flow (ndarray or str): The optical flow to be displayed. + win_name (str): The window name. + wait_time (int): Value of waitKey param. + """ + flow = flowread(flow) + flow_img = flow2rgb(flow) + imshow(rgb2bgr(flow_img), win_name, wait_time) + + +def flow2rgb(flow, color_wheel=None, unknown_thr=1e6): + """Convert flow map to RGB image. + + Args: + flow (ndarray): Array of optical flow. + color_wheel (ndarray or None): Color wheel used to map flow field to + RGB colorspace. Default color wheel will be used if not specified. + unknown_thr (str): Values above this threshold will be marked as + unknown and thus ignored. + + Returns: + ndarray: RGB image that can be visualized. + """ + assert flow.ndim == 3 and flow.shape[-1] == 2 + if color_wheel is None: + color_wheel = make_color_wheel() + assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3 + num_bins = color_wheel.shape[0] + + dx = flow[:, :, 0].copy() + dy = flow[:, :, 1].copy() + + ignore_inds = ( + np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) | + (np.abs(dy) > unknown_thr)) + dx[ignore_inds] = 0 + dy[ignore_inds] = 0 + + rad = np.sqrt(dx**2 + dy**2) + if np.any(rad > np.finfo(float).eps): + max_rad = np.max(rad) + dx /= max_rad + dy /= max_rad + + [h, w] = dx.shape + + rad = np.sqrt(dx**2 + dy**2) + angle = np.arctan2(-dy, -dx) / np.pi + + bin_real = (angle + 1) / 2 * (num_bins - 1) + bin_left = np.floor(bin_real).astype(int) + bin_right = (bin_left + 1) % num_bins + w = (bin_real - bin_left.astype(np.float32))[..., None] + flow_img = (1 - + w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :] + small_ind = rad <= 1 + flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind]) + flow_img[np.logical_not(small_ind)] *= 0.75 + + flow_img[ignore_inds, :] = 0 + + return flow_img + + +def make_color_wheel(bins=None): + """Build a color wheel. + + Args: + bins(list or tuple, optional): Specify the number of bins for each + color range, corresponding to six ranges: red -> yellow, + yellow -> green, green -> cyan, cyan -> blue, blue -> magenta, + magenta -> red. [15, 6, 4, 11, 13, 6] is used for default + (see Middlebury). + + Returns: + ndarray: Color wheel of shape (total_bins, 3). + """ + if bins is None: + bins = [15, 6, 4, 11, 13, 6] + assert len(bins) == 6 + + RY, YG, GC, CB, BM, MR = tuple(bins) + + ry = [1, np.arange(RY) / RY, 0] + yg = [1 - np.arange(YG) / YG, 1, 0] + gc = [0, 1, np.arange(GC) / GC] + cb = [0, 1 - np.arange(CB) / CB, 1] + bm = [np.arange(BM) / BM, 0, 1] + mr = [1, 0, 1 - np.arange(MR) / MR] + + num_bins = RY + YG + GC + CB + BM + MR + + color_wheel = np.zeros((3, num_bins), dtype=np.float32) + + col = 0 + for i, color in enumerate([ry, yg, gc, cb, bm, mr]): + for j in range(3): + color_wheel[j, col:col + bins[i]] = color[j] + col += bins[i] + + return color_wheel.T diff --git a/CDARTS_detection/mmdet.egg-info/PKG-INFO b/CDARTS_detection/mmdet.egg-info/PKG-INFO new file mode 100644 index 0000000..0c0bc11 --- /dev/null +++ b/CDARTS_detection/mmdet.egg-info/PKG-INFO @@ -0,0 +1,89 @@ +Metadata-Version: 2.1 +Name: mmdet +Version: 0.6.0+889383 +Summary: Open MMLab Detection Toolbox +Home-page: https://github.com/open-mmlab/mmdetection +License: Apache License 2.0 +Keywords: computer vision,object detection +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 + +# Hit-Detector Code Base + +Implementation of our CVPR2020 paper [Hit-Detector: Hierarchical Trinity Architecture Search for Object Detection](https://arxiv.org/pdf/2003.11818.pdf) + +We released the searched Hit-Detector Architecture. + +### Environments +- Python 3.6 +- Pytorch>=1.1.0 +- Torchvision == 0.3.0 + +You can directly run the code ```sh env.sh``` to setup the running environment. +We use 8 GPUs (32GB V100) to train our detector, you can adjust the batch size in configs by yourselves. + +### Data Preparatoin + +Your directory tree should be look like this: + +````bash +$HitDet.pytorch/data +├── coco +│   ├── annotations +│   ├── train2017 +│   └── val2017 +│ +├── VOCdevkit +│   ├── VOC2007 +│   │   ├── Annotations +│   │ ├── ImageSets +│   │ ├── JPEGImages +│   │ ├── SegmentationClass +│   │   └── SegmentationObject +│   └── VOC2012 +│      ├── Annotations +│   ├── ImageSets +│   ├── JPEGImages +│   ├── SegmentationClass +│      └── SegmentationObject +```` + +### Getting Start + +Our pretrained backbone params can be found in [BaiduCloud](https://pan.baidu.com/s/1mH4-qowzqlydhQ5VIaK--g). pwd: jbsm or [GoogleDrive](https://drive.google.com/open?id=1nFtzqsroOpMEpjc8Go1GKvope55UaxrC) + +Train the searched model: +``` +cd scripts +sh train_hit_det.sh +``` + +### Results on COCO minival + +| Model | Params | mAP | +| :---- | :----: | :----:| +| FPN | 41.8M | 36.6 | +| Hit-Det | 27.6M | 41.3 | + +## Citation +``` +@InProceedings{guo2020hit, +author = {Guo, Jianyuan and Han, Kai and Wang, Yunhe and Zhang, Chao and Yang, Zhaohui and Wu, Han and Chen, Xinghao and Xu, Chang}, +title = {Hit-Detector: Hierarchical Trinity Architecture Search for Object Detection}, +booktitle = {arXiv preprint arXiv:2003.11818}, +year = {2020} +} +``` + +## Acknowledgement +Our code is based on the open source project [MMDetection](https://github.com/open-mmlab/mmdetection). + + diff --git a/CDARTS_detection/mmdet.egg-info/SOURCES.txt b/CDARTS_detection/mmdet.egg-info/SOURCES.txt new file mode 100644 index 0000000..2959b1f --- /dev/null +++ b/CDARTS_detection/mmdet.egg-info/SOURCES.txt @@ -0,0 +1,301 @@ +README.md +setup.py +mmcv/__init__.py +mmcv/opencv_info.py +mmcv/version.py +mmcv/arraymisc/__init__.py +mmcv/arraymisc/quantization.py +mmcv/cnn/__init__.py +mmcv/cnn/alexnet.py +mmcv/cnn/resnet.py +mmcv/cnn/vgg.py +mmcv/cnn/weight_init.py +mmcv/fileio/__init__.py +mmcv/fileio/io.py +mmcv/fileio/parse.py +mmcv/fileio/handlers/__init__.py +mmcv/fileio/handlers/base.py +mmcv/fileio/handlers/json_handler.py +mmcv/fileio/handlers/pickle_handler.py +mmcv/fileio/handlers/yaml_handler.py +mmcv/image/__init__.py +mmcv/image/io.py +mmcv/image/transforms/__init__.py +mmcv/image/transforms/colorspace.py +mmcv/image/transforms/geometry.py +mmcv/image/transforms/normalize.py +mmcv/image/transforms/resize.py +mmcv/parallel/__init__.py +mmcv/parallel/_functions.py +mmcv/parallel/collate.py +mmcv/parallel/data_container.py +mmcv/parallel/data_parallel.py +mmcv/parallel/distributed.py +mmcv/parallel/scatter_gather.py +mmcv/runner/__init__.py +mmcv/runner/checkpoint.py +mmcv/runner/dist_utils.py +mmcv/runner/log_buffer.py +mmcv/runner/parallel_test.py +mmcv/runner/priority.py +mmcv/runner/runner.py +mmcv/runner/utils.py +mmcv/runner/hooks/__init__.py +mmcv/runner/hooks/checkpoint.py +mmcv/runner/hooks/closure.py +mmcv/runner/hooks/hook.py +mmcv/runner/hooks/iter_timer.py +mmcv/runner/hooks/lr_updater.py +mmcv/runner/hooks/memory.py +mmcv/runner/hooks/optimizer.py +mmcv/runner/hooks/sampler_seed.py +mmcv/runner/hooks/logger/__init__.py +mmcv/runner/hooks/logger/base.py +mmcv/runner/hooks/logger/pavi.py +mmcv/runner/hooks/logger/tensorboard.py +mmcv/runner/hooks/logger/text.py +mmcv/utils/__init__.py +mmcv/utils/config.py +mmcv/utils/misc.py +mmcv/utils/path.py +mmcv/utils/progressbar.py +mmcv/utils/timer.py +mmcv/video/__init__.py +mmcv/video/io.py +mmcv/video/optflow.py +mmcv/video/processing.py +mmcv/video/optflow_warp/__init__.py +mmcv/visualization/__init__.py +mmcv/visualization/color.py +mmcv/visualization/image.py +mmcv/visualization/optflow.py +mmdet/__init__.py +mmdet/version.py +mmdet.egg-info/PKG-INFO +mmdet.egg-info/SOURCES.txt +mmdet.egg-info/dependency_links.txt +mmdet.egg-info/not-zip-safe +mmdet.egg-info/requires.txt +mmdet.egg-info/top_level.txt +mmdet/apis/__init__.py +mmdet/apis/env.py +mmdet/apis/inference.py +mmdet/apis/train.py +mmdet/core/__init__.py +mmdet/core/anchor/__init__.py +mmdet/core/anchor/anchor_generator.py +mmdet/core/anchor/anchor_target.py +mmdet/core/anchor/guided_anchor_target.py +mmdet/core/bbox/__init__.py +mmdet/core/bbox/assign_sampling.py +mmdet/core/bbox/bbox_target.py +mmdet/core/bbox/geometry.py +mmdet/core/bbox/transforms.py +mmdet/core/bbox/assigners/__init__.py +mmdet/core/bbox/assigners/approx_max_iou_assigner.py +mmdet/core/bbox/assigners/assign_result.py +mmdet/core/bbox/assigners/base_assigner.py +mmdet/core/bbox/assigners/max_iou_assigner.py +mmdet/core/bbox/samplers/__init__.py +mmdet/core/bbox/samplers/base_sampler.py +mmdet/core/bbox/samplers/combined_sampler.py +mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py +mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py +mmdet/core/bbox/samplers/ohem_sampler.py +mmdet/core/bbox/samplers/pseudo_sampler.py +mmdet/core/bbox/samplers/random_sampler.py +mmdet/core/bbox/samplers/sampling_result.py +mmdet/core/evaluation/__init__.py +mmdet/core/evaluation/bbox_overlaps.py +mmdet/core/evaluation/class_names.py +mmdet/core/evaluation/coco_utils.py +mmdet/core/evaluation/eval_hooks.py +mmdet/core/evaluation/mean_ap.py +mmdet/core/evaluation/recall.py +mmdet/core/fp16/__init__.py +mmdet/core/fp16/decorators.py +mmdet/core/fp16/hooks.py +mmdet/core/fp16/utils.py +mmdet/core/mask/__init__.py +mmdet/core/mask/mask_target.py +mmdet/core/mask/utils.py +mmdet/core/post_processing/__init__.py +mmdet/core/post_processing/bbox_nms.py +mmdet/core/post_processing/merge_augs.py +mmdet/core/utils/__init__.py +mmdet/core/utils/dist_utils.py +mmdet/core/utils/misc.py +mmdet/datasets/__init__.py +mmdet/datasets/builder.py +mmdet/datasets/cityscapes.py +mmdet/datasets/coco.py +mmdet/datasets/custom.py +mmdet/datasets/dataset_wrappers.py +mmdet/datasets/registry.py +mmdet/datasets/transforms.py +mmdet/datasets/utils.py +mmdet/datasets/voc.py +mmdet/datasets/wider_face.py +mmdet/datasets/xml_style.py +mmdet/datasets/loader/__init__.py +mmdet/datasets/loader/build_loader.py +mmdet/datasets/loader/sampler.py +mmdet/datasets/pipelines/__init__.py +mmdet/datasets/pipelines/compose.py +mmdet/datasets/pipelines/formating.py +mmdet/datasets/pipelines/loading.py +mmdet/datasets/pipelines/test_aug.py +mmdet/datasets/pipelines/transforms.py +mmdet/models/__init__.py +mmdet/models/builder.py +mmdet/models/registry.py +mmdet/models/anchor_heads/__init__.py +mmdet/models/anchor_heads/anchor_head.py +mmdet/models/anchor_heads/fcos_head.py +mmdet/models/anchor_heads/ga_retina_head.py +mmdet/models/anchor_heads/ga_rpn_head.py +mmdet/models/anchor_heads/guided_anchor_head.py +mmdet/models/anchor_heads/retina_head.py +mmdet/models/anchor_heads/rpn_head.py +mmdet/models/anchor_heads/ssd_head.py +mmdet/models/backbones/__init__.py +mmdet/models/backbones/builder.py +mmdet/models/backbones/detnas.py +mmdet/models/backbones/dropblock.py +mmdet/models/backbones/efficientnet.py +mmdet/models/backbones/efficientnet_builder.py +mmdet/models/backbones/fbnet.py +mmdet/models/backbones/fbnet_arch.py +mmdet/models/backbones/fbnet_blocks.py +mmdet/models/backbones/feature_hooks.py +mmdet/models/backbones/hrnet.py +mmdet/models/backbones/mnasnet.py +mmdet/models/backbones/mobilenetv2.py +mmdet/models/backbones/mobilenetv3.py +mmdet/models/backbones/resnet.py +mmdet/models/backbones/resnext.py +mmdet/models/backbones/ssd_vgg.py +mmdet/models/backbones/utils.py +mmdet/models/bbox_heads/__init__.py +mmdet/models/bbox_heads/bbox_head.py +mmdet/models/bbox_heads/convfc_bbox_head.py +mmdet/models/bbox_heads/double_bbox_head.py +mmdet/models/bbox_heads/auto_head/__init__.py +mmdet/models/bbox_heads/auto_head/build_head.py +mmdet/models/bbox_heads/auto_head/mbblock_head_search.py +mmdet/models/bbox_heads/auto_head/mbblock_ops.py +mmdet/models/detectors/__init__.py +mmdet/models/detectors/base.py +mmdet/models/detectors/cascade_rcnn.py +mmdet/models/detectors/double_head_rcnn.py +mmdet/models/detectors/fast_rcnn.py +mmdet/models/detectors/faster_rcnn.py +mmdet/models/detectors/fcos.py +mmdet/models/detectors/grid_rcnn.py +mmdet/models/detectors/htc.py +mmdet/models/detectors/mask_rcnn.py +mmdet/models/detectors/mask_scoring_rcnn.py +mmdet/models/detectors/retinanet.py +mmdet/models/detectors/rpn.py +mmdet/models/detectors/single_stage.py +mmdet/models/detectors/test_mixins.py +mmdet/models/detectors/two_stage.py +mmdet/models/losses/__init__.py +mmdet/models/losses/accuracy.py +mmdet/models/losses/balanced_l1_loss.py +mmdet/models/losses/cross_entropy_loss.py +mmdet/models/losses/focal_loss.py +mmdet/models/losses/ghm_loss.py +mmdet/models/losses/iou_loss.py +mmdet/models/losses/mse_loss.py +mmdet/models/losses/smooth_l1_loss.py +mmdet/models/losses/utils.py +mmdet/models/mask_heads/__init__.py +mmdet/models/mask_heads/fcn_mask_head.py +mmdet/models/mask_heads/fused_semantic_head.py +mmdet/models/mask_heads/grid_head.py +mmdet/models/mask_heads/htc_mask_head.py +mmdet/models/mask_heads/maskiou_head.py +mmdet/models/necks/__init__.py +mmdet/models/necks/bfp.py +mmdet/models/necks/fpn.py +mmdet/models/necks/fpn_panet.py +mmdet/models/necks/hrfpn.py +mmdet/models/necks/nas_fpn.py +mmdet/models/necks/search_pafpn.py +mmdet/models/necks/auto_neck/__init__.py +mmdet/models/necks/auto_neck/build_neck.py +mmdet/models/necks/auto_neck/hit_neck_search.py +mmdet/models/necks/auto_neck/hit_ops.py +mmdet/models/plugins/__init__.py +mmdet/models/plugins/generalized_attention.py +mmdet/models/plugins/non_local.py +mmdet/models/roi_extractors/__init__.py +mmdet/models/roi_extractors/single_level.py +mmdet/models/shared_heads/__init__.py +mmdet/models/shared_heads/res_layer.py +mmdet/models/utils/__init__.py +mmdet/models/utils/conv_module.py +mmdet/models/utils/conv_ws.py +mmdet/models/utils/norm.py +mmdet/models/utils/quant_conv.py +mmdet/models/utils/scale.py +mmdet/models/utils/weight_init.py +mmdet/ops/__init__.py +mmdet/ops/dcn/__init__.py +mmdet/ops/dcn/deform_conv_cuda.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/dcn/deform_pool_cuda.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/dcn/setup.py +mmdet/ops/dcn/functions/__init__.py +mmdet/ops/dcn/functions/deform_conv.py +mmdet/ops/dcn/functions/deform_pool.py +mmdet/ops/dcn/modules/__init__.py +mmdet/ops/dcn/modules/deform_conv.py +mmdet/ops/dcn/modules/deform_pool.py +mmdet/ops/gcb/__init__.py +mmdet/ops/gcb/context_block.py +mmdet/ops/masked_conv/__init__.py +mmdet/ops/masked_conv/masked_conv2d_cuda.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/masked_conv/setup.py +mmdet/ops/masked_conv/functions/__init__.py +mmdet/ops/masked_conv/functions/masked_conv.py +mmdet/ops/masked_conv/modules/__init__.py +mmdet/ops/masked_conv/modules/masked_conv.py +mmdet/ops/nms/__init__.py +mmdet/ops/nms/nms_cpu.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/nms/nms_cuda.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/nms/nms_wrapper.py +mmdet/ops/nms/setup.py +mmdet/ops/nms/soft_nms_cpu.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/roi_align/__init__.py +mmdet/ops/roi_align/gradcheck.py +mmdet/ops/roi_align/roi_align.py +mmdet/ops/roi_align/roi_align_cuda.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/roi_align/setup.py +mmdet/ops/roi_align/functions/__init__.py +mmdet/ops/roi_align/functions/roi_align.py +mmdet/ops/roi_align/modules/__init__.py +mmdet/ops/roi_align/modules/roi_align.py +mmdet/ops/roi_pool/__init__.py +mmdet/ops/roi_pool/gradcheck.py +mmdet/ops/roi_pool/roi_pool_cuda.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/roi_pool/setup.py +mmdet/ops/roi_pool/functions/__init__.py +mmdet/ops/roi_pool/functions/roi_pool.py +mmdet/ops/roi_pool/modules/__init__.py +mmdet/ops/roi_pool/modules/roi_pool.py +mmdet/ops/sigmoid_focal_loss/__init__.py +mmdet/ops/sigmoid_focal_loss/setup.py +mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss_cuda.cpython-36m-x86_64-linux-gnu.so +mmdet/ops/sigmoid_focal_loss/functions/__init__.py +mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py +mmdet/ops/sigmoid_focal_loss/modules/__init__.py +mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py +mmdet/utils/__init__.py +mmdet/utils/collect_env.py +mmdet/utils/contextmanagers.py +mmdet/utils/flops_counter.py +mmdet/utils/logger.py +mmdet/utils/profiling.py +mmdet/utils/registry.py +mmdet/utils/util_mixins.py \ No newline at end of file diff --git a/CDARTS_detection/mmdet.egg-info/dependency_links.txt b/CDARTS_detection/mmdet.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/CDARTS_detection/mmdet.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/CDARTS_detection/mmdet.egg-info/not-zip-safe b/CDARTS_detection/mmdet.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/CDARTS_detection/mmdet.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/CDARTS_detection/mmdet.egg-info/requires.txt b/CDARTS_detection/mmdet.egg-info/requires.txt new file mode 100644 index 0000000..40e8bb0 --- /dev/null +++ b/CDARTS_detection/mmdet.egg-info/requires.txt @@ -0,0 +1,6 @@ +mmcv>=0.2.6 +numpy +matplotlib +six +terminaltables +pycocotools diff --git a/CDARTS_detection/mmdet.egg-info/top_level.txt b/CDARTS_detection/mmdet.egg-info/top_level.txt new file mode 100644 index 0000000..609f687 --- /dev/null +++ b/CDARTS_detection/mmdet.egg-info/top_level.txt @@ -0,0 +1,2 @@ +mmcv +mmdet diff --git a/CDARTS_detection/mmdet/__init__.py b/CDARTS_detection/mmdet/__init__.py new file mode 100644 index 0000000..1c4f7e8 --- /dev/null +++ b/CDARTS_detection/mmdet/__init__.py @@ -0,0 +1,3 @@ +from .version import __version__, short_version + +__all__ = ['__version__', 'short_version'] diff --git a/CDARTS_detection/mmdet/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ce5fa4d9c150537ba4e78ef7b1e672719a127bc GIT binary patch literal 235 zcmYL@O$x#=5QUSpT2TrfzzcL^KsPQ#J%Jl{-3-NqwveBaq(YD6m9ll^6}mFPzk&Dh zcsyoio9FZ6*i?WJdIbH31a}Ius{n==7D(a}OHvYy7-30G%7mpCR7{>YR0`r305@)I zs}+FHZKI9*1}3gENx->CefFvSQ=b7;LIB*6nZ|0wxTB4hd=seZp|2FZ4y>}&cG^-Y g8F&2Z!nL=j0Ig~rz`7g!Y%ewI1wX{sY&^asFQ3aj2LJ#7 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/__pycache__/version.cpython-36.pyc b/CDARTS_detection/mmdet/__pycache__/version.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b4652194a781c7e570e8e7936a23570c3af923b GIT binary patch literal 195 zcmXr!<>k7wIWw`4fq~&M5W@i@kmUfx#XLYFg&~R|g)xdTg(;Xplevo5K+jCiK-<8= z!qV8nxQZ3b_0wd!#T_4CmReMtnV%OQe~Y&`BfqEwCa{vBh#6=SnD}L`pOK%NY6K+n z(kn|7^YoJ|Qxc2yiwp9L^>cGmQcF^kOMr$z*!oZ-^$IF)aoFVMrDreGfB*|5F~Kq-k&H=I$-g^($)XpQ#^eV!!FQqeg2-h3-0TeA%nsQRuDn z8ynu30xhZK9Gb*$d-cW*S36FDJHB(;_S;c;D`|ri^QY literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/apis/__pycache__/env.cpython-36.pyc b/CDARTS_detection/mmdet/apis/__pycache__/env.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b1101686feb544a9285f4cd2747b82e43298102 GIT binary patch literal 2319 zcmZWqTW{Mo6c%-{ZysZ-e51{ehdMPzK*5+#sS zf^^Qy(%1HBz<$I25?}Y!ztE?hLp!dQ+2A41M~dY6&V~0Ijp|SDN8uyivi`A_eh!Qu zLz8boP>Yhp8q9h!AV#+bw&|ULV|sVsnZ7hAnZ7*s2Ng4_j%$NDjO?V5HU~{&(JR!Y z-fJ>gp(R>=Z4Fw~rxob0&{bNab(mSD4cY``jalQXM})4>7NFNC>06z(cW@`G+abC! zWGbMsR6)#-GVSsxN+KI);fE$*dj?H%5Xu@8N~rw?44C-h2Iw|*&Iz%m7Ii1KxTl;c zIU-a0Enxg1pdO%4mAi!A1hfR`OXV%0O$COG%Tw!|oRdFbf4HXKJ%B4#!z9U00-kZE zJ(-6mT$l4xm5FGioivXDObU^{THoczVG`4J5*9ofG0|41IeXat{jB3_JI!?qZIvn% zDoB|cWmLE1Wn>k2i06!49MW9E@VwTNZjRQ~U&EweJ3?eamyR-#hN|Oe|KcLLd>BR( z#;Gn%PC_x1e_P@z(E0ws#wbhKcOd!jvJMJmqtY`hWXvD{$% z_-=lxD~oG3&^fChEYc!X;*;h@v&D5lBZ71AbLY)?^D!vOQUv@*)`9agTRCHQ?44U@ zjv1Av_SCr!GsIF5h({27Q!6G@@|W?g@sHEhZf~pmOyKdIigseIw4F&^hFiuWN;h|am0csD_~s}xt}50#R$}^#-?0k5tA|)imb>xuGw3x!2#M8A)jcU7iloe z3u!2viEA(|>L6}FlaPs)WjBG07BT380t{@V!%%?Blr;}Eg2}Mf&D3t1CoE-LF>3tT zu|M44@`*_OSJ0$M7$Q)Aw<&@1iwwdu78&#>DF5NynQON3fNQ+rtXpT^!WX5vFPv%V zgDJ4QoA9g;b9CgOtxHKCZpFlvCbqOM#W~p{1V^rHbHgmoT;IE42BpB=CJs&nghD50pOtRvfZ+P7{e?3g7O zK65>V+z*GKLE;&wdS-<{Mzy!o>+SbCC0$Ok;V|Yy69RiMW_SGfai3#x~t|7ty7{fP1WSI(P>o#H8yxRIuXI)~0 z#HtJ2Ax%I$<~wvZvAZsQgl-nn_i^+M2<<@$%TEQeZyY+fV#?%Qtb!swETH!2+LB^o zT3?$jN{d9r^9(=+RxSaef=ATSG&;U33eLecwBNi1rW%?uWx~~XTf4j6r~BseOUC2# lB(SV%e>2T!k+4VT0g2g&hQr@RiHT5mstw=w-Rg>8{ui!yHJq36i>5jx9HnBF#)APCZg;Pcp8m+VLoM<0e(o*trZ74-8?K6lh$S z-K7+{;7gSB&1w3Sm;Qmwbox{J(lm^5r!EKb>v-f}kuZaqiP5Xx=FsZdI>+fb`d#a}`duG3dQB~PJr8dy zX{GI6JMHv3CbhMbg>ABuHExXBTdcJoa@^h@gnqQc`=YDN4 zkF~xqSc_jcwtE*@n{~c0dTZVCRo0_CNkYZT zx{N}lxagYYVv>g}i1vB(aFoZHLRM?QRS+eilw9JozLTe;TylsDd*e7^0pp5CDi=@O zXw1T`S0bB6z(s1JX9th`3le1%hBA*WaZy=Ad*~F#u{kla7j!;yjxG6`sxfousyda? z{kh4k!WcG!rt;{Gr^W2Y*2FGQ@~|nsR_y}vu{AN7gS^_B@z51TB{65sDD0q4iu=6O#y$)_PM`}s)4c@`$r?R*1uuN(fIzMt^E@7N(rmJk>+B#>@WRSos4?@iynFZ7 z-O@?93Lm|{LuTnyC(;iE*Hf3_0Y;CrN{@3zyexfceBf*sjF@(=s#Cg^ z221bh&X+FZ2XVy3d9?V}5I@H2$@R_sJmpuAW`m<~m~BQ!ER>rPUb~s5l?}sCDtR*; z#d0&w`do0Vb7OQ=c4FB4+^d(+rThsJqwSdsX4`b}G|VMD9;7A9vpnQ^_!cjsRzy{2 zmj&TCrPEt<2T1F{``_?Vu%@cMz0b5Wt$?YLMGO@aZ^AsuJ-tba!e*J+@DI zv~54fBT2ChOMlOmOorITqm}#>1R$$1v6P9obqw?!+7nwj$Hu^f5^FG&PraaXuY6GaBIghQv9srvOl~K|jc8-ldUIM}zeS^8I z&KiHVirP2USLVe15JB;G#(Ty`M&V+En&EAYzf;TfV=|F-*#$@eCkZ5HaQwaKt3A(O zp92(7kr0Z2?K|K^(1*>J8b~)Y777s_5iBLKR3F3hXLefjP9U-#G6r(_8oxt-z%wpFgwhI#fs()t04`@hcN+CD25wb(q=ETxbEih3wnCr9 z0?{Hls?hfYnncVuwQ&Mb7iE}^5-x)g7r|gO7F2%w9!No}>ODwuDYZ?*Zh3b>tf4~b zWc)J)Gz$ex5`;mD7m%&Zs&oig0egVzvN4KBJOSJZS2tsEq=ZdX$q>sLRv7J<^(+qt zB4pjVhOf$TXdK;ZucJ|a-qWkp(yf+v%GiC%UYX<99JX#CU!#uEcHwB3%m!epVO~P| zf+e<5`md|{`umN05xD|gXy5VVl{^VI+5$``DRuh|m;X(Ox50T|)jc`dlcCVqwi-^;M(j zP26`Ic*Xf;_xt8>MomYm@#YBq~SAg5iBGrP=t+<>drzBWhg z!u{HmsBsr;7tYxUU&VfY5KEvCaU&Sx)EyC7=0*sS3bZ49uam+9w9ktrKD2@;?ZZF-TQkm%PP~ zKmp;ePa9x_Lo7AVlQW$0!+lN=LqqEt^PQ^zHy?o?9O#Y8PqEN^tOAOl-{qhHI62}` z+>Z&qsJGdQLgJ~jts}}gUkj}VTXQI4oJU|hxo(FsDuzsK4563SMpMj)<%O7D#`41- zWTmZs*L5Zq=A5ha^r^~7> z*cF?A7>&3mIBOUu1dPDs8Y9X$zz42>+=vL0>7rr+sJnRNI+6ig`o0gS0nJgykfO82 z41kA;FPoWZYv0T&`XLfw4ATDb2Wa$i|+5 z)+gRJ8F_>=-}OJBk!BLE`RK=_wxII9jqM~yl#tgU%<6V)WlK}*%2mqd+~Pr((6nsM z&9-dJ^nUVhT$r`i4*_`hye3}=806liL*17PH*pdbWI~#8Yf>g&rUikwkB-Dm65Rvv z_UIudk!LvE2D)YK5cbUxMBqhvis*(nbWqmDNUxPma;*1NoJgY85e`SC8%oXN9rBj4 z$+K}v0h-VH#rXvv%WoJ)!s*(E)`ndXsJL;E`95&`G|4MHa9_ zW<9PUJmQx~;SfW_$s3$<7C`B|&B#h?;k#f3{|_Y<)BLZ768z2xB!%-To+6gqjt66$ zt92$&mr#6kG)izZM_rUWjVd!I{vQYg6h&?WN3_q8y8&J6m@lnjw+jx5tkNDO3a53w z3Odt~yHp3f`rbfX5raJ({a!*%an^}@I51XPde-AsPI+8VY3UoZmklJwvV|iv94Up| zwRQz%b{+96yfjMR+x^xOefpvZfXVBUjO6FV1-!~-{8k{%C-ELm=_HkI^%gO;(}&X< z5Nd{jOQR$Dk)Z%xa)Y2kL{#r1N`65-cJ$9EVh2gW6V1&vOD|ixK#Vgmc>SBA^z@H| z4Wcdj??mrnl|iIma{M2f@pM^H{&rQY4`H(U=kX#9=~jPu^pQccaJ?B?E2q>sKf#ub o?dr^XouZ-qDUyoE6L8T_h_=;+R@=7s;?hFHM>xgpT{F!80A`g9YybcN literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/apis/__pycache__/train.cpython-36.pyc b/CDARTS_detection/mmdet/apis/__pycache__/train.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f81b5d3ee1a0170a9002fc187f32e6ddc09e09f GIT binary patch literal 6498 zcma)A&2JmYmG3V$$tFcf)Q2s9%yc#%BW5+UXC|8uLH5Kuwr7%Hl!5Ie2oZ;Fu}V}^ z{XthXElX+UFq+FQ;KAe+U@r^oAwU+3{2hDV!yf9IO9CtwInOD-S51+WMuUV>Rad{R zdiCnn`@MRv-dR}i-u?U_c8xmaC0dQwI&1NIXFa~sxuR*+=xV&t*+AQ4wdh)Wy>lJyI$QX}=-glpwut9Vw!xNJ z^Aof48e3tjpuEnmu{E|1iqEdIE9@#-Ut!i`qjlrYSewx@RfUBwLK&vXE?#Os7K{td z?uWgsWvRx8AKnkL;G;kUQN$y)f%i~mBHSBg=rs4KWh?j5C`q_bE7aw25Dz0R9&^q< zNYg{LknRn-`y!3Ij9AmO7%h1wTaK#JhzBo%NOxVPw;vC)Fb+?+(4@6op34b{hcdw8TB#mR7ygm$s9J?(q)^m9iA(rral5bg@d^9FktyauloTlj9+OAVeyZyPt|U$dp4&TCQ?Q zFfLUECJ9l5>{N-;O@f$rJl!k=Bngy5c-s8BL{aYHQ6TzKd>tP;#mCo5dx61mUu?AKf*j{2_Wd`0gZ4xW^J4sX>mGoaxxf+}zB{gGyF~3f))csr}f1{s%)(IEO#LknO{&{DbaqqhF2H(XR}bUT0O^ zuIRR>+Za8idwodu@^@!4J^eSBe{GD@IZ7ASoqA8&D=^HT1u8uDMuiAIQS>O#La z{S2;n+wVnz#CZeJ{s!M-P$ahfJp?s=!~>CpN#7R$=6MMyT=_fZ50Ybu)*r<@$>iDO zy;KN}12$pAd%8p|?3T~l{r1*WTY4S3I^tpfAnP*T3y!y-bYeM*(Ld{*7p+)?-io6T z2Qf{MCUV;k3f1bJddClXJw8PE2MtD?$xCM3+zSKQjYKydMcJ0G-}LkoLW0AskVg|* zUlhalpUv}l<_8IzkYLBATMFJ$zPHR^ax^2>wy6iM-7AE(vR)wxi#ES%i>jrH82xLcKiXm}9l6Zb3(H4|kHFPy=j zdpi_7_R@HGHzC#(N4v(!S1*V^7fMh2V-{CSER_`rRi9UnTD*W*wRpj1Rec_veN2nr zU)~jTxGOi^-$!D1a(#0z$@@ub^B3$NTkp0viz#v%**Yn;>8Tvr+SZCH9|SVUG9m5| z2d9?~5%Uexq4H)YgIXd-%(^p*sGzr?$X=E7W{E8#+$2Kd9s*`TfGi?v8(E!N5d8Tl z1nRqe5isQj!y!+YvIHVR37?JNyaIwGPp8TlK{B-#b)eK)nA!u`rdpr9!W~2-#&t3& z?ve;v5oJ0H;08NI z%JTqh1poN65V4_W#qmtb^31wJ2|+`D#hcx* znx=RPjy}e-h78f{OUd&;hJ9uo2WEghfnYj~#{-0mWrVx)5D`D=U3$ zB2GhkizL-W&k9PFQifM$#8@;EV-Ss0@`_F zFE1mivIdKJHTUpbLRmg7nFi?1Y=vY*+ZGD+)xp}hh9_dkkY=mvKI?f6@lX9z$c~=Y z;EMQv(63U9D9I{X8;G?mP_I#&sKv@aZPMD3c6ObTddzS`*EgBV%CDS0MStbNYl!ZM z^Xn-7slG=7_A9&M=TN9>OgHoFhN4Q=sq;PM?Bm))d7LsnedIi$M4bZn)>j0=Gy>3w z1$w_!b;?Ol+NM@^)9eRyH$!0nXBr%W0&|_JsLD|)4v{4Y9np&g8nQHH(Jdk^QKSBK z`gTR|J|^a($TAR@bPsUfBaw!PNpH2>JK(*;VHzgc83`$C|EvvAPFT=LQ6}CxVY9J7 zK{)nsi zBUR5*+!J;AFzp@4RzqW6e3yjz9#xbJ2}V_ys$ZglB*l$ag*Jqof*@C@)@J(Bn^q{T zvxw#B%P2TU>fEW&4r*UgryL9yA9P7|5#Hk4;Q8+;^82U^t8T9XtPavl$GT?Mb-L*h zP|+@c3|J%ma*@*$zyW{TGA%d)xB^Ran}V3Oa0=qr0Ahfmo>5wcyC>x81Z2P%S-SNK z5J9?SWF=&x)_?$8M%r{9+Bn1j?H^_aMprNz>6eB!fv^T>mnP6I0}Aud{%iu|1wg&b zDxvvXTLZj~aSON|p!&-}Z4H{@zX7Qg^lNH(HqlxIyvu_%YH4s5vi*rA?f(RM!ew9RMl0ho!#$Y}SiebDLNBr>!Xc6Yx=!K&YXkHi<1fM< z?}kijzoE^tWfms-44W&qQ9T4@56{+3!HGsLT$MzS^to~X4Jmp=Cih-gI5~=$PZ0a` zN(sJp2inHP0lkJ)4X8S#ieeCvQZ=ONIaLHijbTK2<- zxMYC>AGZEMdEM^*2mv$ib_Io6;wPw7t(W3%T;Fv{ee+iYBnxred(oytoNmsw&I{jM z(;o`YQ7Bn^JPKuST}2nCf*`3p-EJf2 score_thr)[0] + for i in inds: + color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8) + mask = maskUtils.decode(segms[i]).astype(np.bool) + img[mask] = img[mask] * 0.5 + color_mask * 0.5 + # draw bounding boxes + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + mmcv.imshow_det_bboxes( + img, + bboxes, + labels, + class_names=class_names, + score_thr=score_thr, + show=show, + wait_time=wait_time, + out_file=out_file) + if not (show or out_file): + return img + + +def show_result_pyplot(img, + result, + class_names, + score_thr=0.3, + fig_size=(15, 10)): + """Visualize the detection results on the image. + + Args: + img (str or np.ndarray): Image filename or loaded image. + result (tuple[list] or list): The detection result, can be either + (bbox, segm) or just bbox. + class_names (list[str] or tuple[str]): A list of class names. + score_thr (float): The threshold to visualize the bboxes and masks. + fig_size (tuple): Figure size of the pyplot figure. + out_file (str, optional): If specified, the visualization result will + be written to the out file instead of shown in a window. + """ + img = show_result( + img, result, class_names, score_thr=score_thr, show=False) + plt.figure(figsize=fig_size) + plt.imshow(mmcv.bgr2rgb(img)) diff --git a/CDARTS_detection/mmdet/apis/train.py b/CDARTS_detection/mmdet/apis/train.py new file mode 100644 index 0000000..8fcba8f --- /dev/null +++ b/CDARTS_detection/mmdet/apis/train.py @@ -0,0 +1,256 @@ +from __future__ import division +import re +from collections import OrderedDict + +import torch +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import Runner, DistSamplerSeedHook, obj_from_dict + +from mmdet import datasets +from mmdet.core import (DistEvalHook, DistOptimizerHook, + DistOptimizerArchHook, Fp16OptimizerHook) +from mmdet.datasets import DATASETS, build_dataloader, build_dataloader_arch +from mmdet.models import RPN +from .env import get_root_logger + + +def parse_losses(losses): + log_vars = OrderedDict() + for loss_name, loss_value in losses.items(): + if isinstance(loss_value, torch.Tensor): + log_vars[loss_name] = loss_value.mean() + elif isinstance(loss_value, list): + log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) + else: + raise TypeError( + '{} is not a tensor or list of tensors'.format(loss_name)) + + loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) + + log_vars['loss'] = loss + for name in log_vars: + log_vars[name] = log_vars[name].item() + + return loss, log_vars + + +def batch_processor(model, data, train_mode, **kwargs): + losses = model(**data) + + losses_ = losses[0] + loss_latency = losses[1] + if loss_latency is not None: + losses_['loss_latency'] = loss_latency + + loss, log_vars = parse_losses(losses_) + + outputs = dict( + loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) + + return outputs + + +def train_detector(model, + dataset, + cfg, + distributed=False, + validate=False, + logger=None): + if logger is None: + logger = get_root_logger(cfg.log_level) + + # start training + if distributed: + _dist_train(model, dataset, cfg, validate=validate) + else: + _non_dist_train(model, dataset, cfg, validate=validate) + + +def build_optimizer(model, optimizer_cfg, optimizer_exclude_arch): + """Build optimizer from configs. + + Args: + model (:obj:`nn.Module`): The model with parameters to be optimized. + optimizer_cfg (dict): The config dict of the optimizer. + Positional fields are: + - type: class name of the optimizer. + - lr: base learning rate. + Optional fields are: + - any arguments of the corresponding optimizer type, e.g., + weight_decay, momentum, etc. + - paramwise_options: a dict with 3 accepted fileds + (bias_lr_mult, bias_decay_mult, norm_decay_mult). + `bias_lr_mult` and `bias_decay_mult` will be multiplied to + the lr and weight decay respectively for all bias parameters + (except for the normalization layers), and + `norm_decay_mult` will be multiplied to the weight decay + for all weight and bias parameters of normalization layers. + + Returns: + torch.optim.Optimizer: The initialized optimizer. + + Example: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + >>> weight_decay=0.0001) + >>> optimizer = build_optimizer(model, optimizer_cfg) + """ + if hasattr(model, 'module'): + model = model.module + if hasattr(model, 'module'): # For distributed model + model = model.module + + optimizer_cfg = optimizer_cfg.copy() + paramwise_options = optimizer_cfg.pop('paramwise_options', None) + # if no paramwise option is specified, just use the global setting + if paramwise_options is None: + if not optimizer_exclude_arch: + params = model.parameters() + else: + params = [p for n, p in model.named_parameters() if 'alpha' not in n] + + return obj_from_dict(optimizer_cfg, torch.optim, dict(params=params)) + else: + assert isinstance(paramwise_options, dict) + # get base lr and weight decay + base_lr = optimizer_cfg['lr'] + base_wd = optimizer_cfg.get('weight_decay', None) + # weight_decay must be explicitly specified if mult is specified + if ('bias_decay_mult' in paramwise_options + or 'norm_decay_mult' in paramwise_options): + assert base_wd is not None + # get param-wise options + bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.) + bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.) + norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.) + offset_lr_mult = paramwise_options.get('bias_decay_mult', 1.) # Noted by Jianyuan, for offset lr + # set param-wise lr and weight decay + params = [] + for name, param in model.named_parameters(): + param_group = {'params': [param]} + if not param.requires_grad: + # FP16 training needs to copy gradient/weight between master + # weight copy and model weight, it is convenient to keep all + # parameters here to align with model.parameters() + params.append(param_group) + continue + # Noted by Jianyuan, for huang lang offset + if 'offset' in name: + param_group['lr'] = base_lr * offset_lr_mult + + # for norm layers, overwrite the weight decay of weight and bias + # TODO: obtain the norm layer prefixes dynamically + if re.search(r'(bn|gn)(\d+)?.(weight|bias)', name): + if base_wd is not None: + param_group['weight_decay'] = base_wd * norm_decay_mult + # for other layers, overwrite both lr and weight decay of bias + elif name.endswith('.bias'): + param_group['lr'] = base_lr * bias_lr_mult + if base_wd is not None: + param_group['weight_decay'] = base_wd * bias_decay_mult + # otherwise use the global settings + + params.append(param_group) + + optimizer_cls = getattr(torch.optim, optimizer_cfg.pop('type')) + return optimizer_cls(params, **optimizer_cfg) + + +def _dist_train(model, dataset, cfg, validate=False): + # put model on gpus + model = MMDistributedDataParallel(model.cuda()) + + # build runner + optimizer = build_optimizer(model, cfg.optimizer, cfg.get('optimizer_exclude_arch')) + + arch_name = None + optimizer_arch = None + if 'optimizer_arch' in cfg: + raise NotImplementedError + + runner = Runner(model, batch_processor, optimizer, optimizer_arch, cfg.work_dir, cfg.log_level, arch_name=arch_name) + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, + **fp16_cfg) + else: + optimizer_config = DistOptimizerHook(**cfg.optimizer_config) + optimizer_arch_config = DistOptimizerArchHook(**cfg.optimizer_config) + + # register hooks + runner.register_training_hooks(cfg.lr_config, optimizer_config, optimizer_arch_config, + cfg.checkpoint_config, cfg.log_config) + runner.register_hook(DistSamplerSeedHook()) + # register eval hooks + if validate: + val_dataset_cfg = cfg.data.val + eval_cfg = cfg.get('evaluation', {}) + runner.register_hook(DistEvalHook(val_dataset_cfg, **eval_cfg)) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + + if 'optimizer_arch' in cfg: + raise NotImplementedError + else: + data_loaders = [ + build_dataloader( + dataset, + cfg.data.imgs_per_gpu, + cfg.data.workers_per_gpu, + dist=True) + ] + runner.run(data_loaders, None, cfg.workflow, cfg.total_epochs) + + +def _non_dist_train(model, dataset, cfg, validate=False): + if validate: + raise NotImplementedError('Built-in validation is not implemented ' + 'yet in not-distributed training. Use ' + 'distributed training or test.py and ' + '*eval.py scripts instead.') + # put model on gpus + model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() + + # build runner + optimizer = build_optimizer(model, cfg.optimizer, cfg.get('optimizer_exclude_arch')) + + arch_name = None + optimizer_arch = None + if 'optimizer_arch' in cfg: + raise NotImplementedError + + runner = Runner(model, batch_processor, optimizer, optimizer_arch, cfg.work_dir, cfg.log_level, arch_name=arch_name) + + # fp16 setting + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + optimizer_config = Fp16OptimizerHook( + **cfg.optimizer_config, **fp16_cfg, distributed=False) + else: + optimizer_config = cfg.optimizer_config + optimizer_arch_config = cfg.optimizer_config + runner.register_training_hooks(cfg.lr_config, optimizer_config, optimizer_arch_config, + cfg.checkpoint_config, cfg.log_config) + + if cfg.resume_from: + runner.resume(cfg.resume_from) + elif cfg.load_from: + runner.load_checkpoint(cfg.load_from) + + if 'optimizer_arch' in cfg: + raise NotImplementedError + else: + data_loaders = [ + build_dataloader( + dataset, + cfg.data.imgs_per_gpu, + cfg.data.workers_per_gpu, + cfg.gpus, + dist=False) + ] + runner.run(data_loaders, None, cfg.workflow, cfg.total_epochs) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/core/__init__.py b/CDARTS_detection/mmdet/core/__init__.py new file mode 100644 index 0000000..f8eb6cb --- /dev/null +++ b/CDARTS_detection/mmdet/core/__init__.py @@ -0,0 +1,7 @@ +from .anchor import * # noqa: F401, F403 +from .bbox import * # noqa: F401, F403 +from .evaluation import * # noqa: F401, F403 +from .fp16 import * # noqa: F401, F403 +from .mask import * # noqa: F401, F403 +from .post_processing import * # noqa: F401, F403 +from .utils import * # noqa: F401, F403 diff --git a/CDARTS_detection/mmdet/core/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef720b461af20f4d4a363ce506112008bbe02819 GIT binary patch literal 281 zcmYk0O>V+45Jv3~euO|hN4mf&NUVVN1Z>(>HkPqnViDU>#;KYkaV2lN>J?aZD3)MG z^WJRc)2!z6>FcEuD@w=@d8iEcHyqd?6j8J$A2-wFEP0TeCl8ZH=%yE6Z5Ue+EcZ|a>m>SNc9b?!( zE?ZW4tCvW3>^tF@?3Dlpc*?9*I%s*#z3oi;Rx{3xGlBC()5poidezl>cYP0LhzY&- EA8+4D(EtDd literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/anchor/__init__.py b/CDARTS_detection/mmdet/core/anchor/__init__.py new file mode 100644 index 0000000..304d493 --- /dev/null +++ b/CDARTS_detection/mmdet/core/anchor/__init__.py @@ -0,0 +1,8 @@ +from .anchor_generator import AnchorGenerator +from .anchor_target import anchor_target, anchor_inside_flags +from .guided_anchor_target import ga_loc_target, ga_shape_target + +__all__ = [ + 'AnchorGenerator', 'anchor_target', 'anchor_inside_flags', 'ga_loc_target', + 'ga_shape_target' +] diff --git a/CDARTS_detection/mmdet/core/anchor/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/anchor/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fff7b1149e1742d2e35deabfa0292104e42df255 GIT binary patch literal 409 zcmZ9I%SyyB6o!-D9V}DOm#7N^x|LBuaOY-r*@V!Z;IA}L8PTnOI zE_Mk@r0LFG+Ynt(N4^$p68JWal0}Kj+akzYTj6F!iu2Te-S}EeVHP8QHu+&JB_UKw zLOy`4A~?$71@%VpyP#Uk6V<4j8Fk2w#v!E`_q_9i(I<~OV>vqU5g~&fJR!H^yi^a$ Suu1Y=JdlgvOW43Bt>8BV#%>b; literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/anchor/__pycache__/anchor_generator.cpython-36.pyc b/CDARTS_detection/mmdet/core/anchor/__pycache__/anchor_generator.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d01cefbc123b6ebbfb5c3a837d70f2d8c413e390 GIT binary patch literal 2768 zcma)8OK%)S5bo}I&#oOOF|k9U2thb3GBE-na)?MGK;eM)f)8kfG@5LW_u<`Hr+YT` zTC)dO=a|2N;KC2;6A~x?g`H&^-8IwXf=S(^|pF4?={72SuFW|x29g@q-f${{8{vIM{36~7s*QF!f zbDlW4D?RC-vpG*Z8OZRQCBD2NBN?MLkT>Oq+=Ld&ZFx;@L5rloOz)X%Pm4}(te)jX zuCj8h4%!_bGtfnEf#(}M`XNNcE0(g74@4#Aq&rr3tnOAWEMDb7`V|`la|SJx4Aby- zD>BjZOy{ZYpXJ8GM4g`T5As5fmGN~a8|K>hSV4cRO~Zbtqildir&B7k)y<34_Rs8c zwP~(?wS431e0ZoRN{ZI40umvK2;vW>Ht>4$!+vi(%0GuJy3HGC@;T8!v=uqDglgc09U^osuE@{srWTvjwpC0ofO+zzr_QCo( z)}1fc6T_ckW)VNYvz`>V0 z=QB@=%Dcm6#0pRS2^RUFKYJx4I~2Du3hdTGw%RfXB=rm`^!y*b9(=>%C*o&4(#qKH zj_fZ+e%IH|b!8O+jf}m7740;rce=|e7hmD!*Gk*Ym9`z$b!8}>*J2RO*(|Ce;2O2N z1Po(;J-)C2bK}VV=;9w-Y8&=7(TZzc6Ok2F5m4cUDpx2@{4?&$etu#+U1psl_1>vT)PlZ9r)iB3dMTfVJPOc6J3;L-OXi03|!!SqcL zizW~8wZ|WbJ-#PgahKl{&Hv!tdJ=bNGj!4VNw>WF7D~kjs54Y$5P!E418@vkAqFU$ zu)_)+g%hn_<*0|KJ>DS-8mf;=Z(DWTwxR{Ujb_V1iOG-itTfIrFVroxw){niP7R+X zks6;Y>W(-)HDWql3xkQ$QLcMk)t8^5x2C8tzRB-#mxto)_Ij#adS7h8(YT$7%r3W0 zC5g6fxBWX5&=(0*(YD-*63LtSlt>_(4+4tUP9>0Z!NP{EU)nQbj__{>QbM-NGvh~oX_7taxeV-(O?)4AL^mM6AlW<`UgK0MI+)`hLlN1RPtcgeFS8*a zpcAj2c#rx~#w9`B)LSY5zP14PS^@A23xHpkrpB|{I@FE2{cxCd?Vpw2#T*RvjF7&8 zn@;mBd~Naf`PrRyv{!n!#6i2o4{%z4L~Fedfqby9nEn)sY?P;|iPLm6mXjgr%`|;6 z$%b`{x`rMmJXT`_yPVn}*`izL$vzGG6$z>uD}qjXMJGMwmi-9{MI1KW@J<}YH*HJH gStQ>0`2asS*0wT&9nk(pgpcaAeN7PC--poo7v_$D)Bpeg literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/anchor/__pycache__/anchor_target.cpython-36.pyc b/CDARTS_detection/mmdet/core/anchor/__pycache__/anchor_target.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..955f08b5d9768e09c79bbdaccd702f109fbf16ce GIT binary patch literal 4875 zcmb_gOOqo<5$@`jG_A*IW;9P5XpAAO2zz#IF2;=4HeMSX0fS?A9T06VO^>RjX-%tp zwz}lq+2}rC*+)mfU+@i11iyhF;pi);y>Z?XUsks?nqfB}2&AY|R#sNlBeTBD(vLeG z=bfjg!M87J+CQ|Fj|KW&l7@+4(pNXFR>DdPV7+oTQK5;9)F-0V`Y6Su7(zI6sds zo-pXVH*C`c_`HC!j`BWAHbS4&a%0YlHh#4^L(S&Q)C}!RPhZO0XL@4l8q=~@CHsPL z#`P~D$BljMHl&NrqmQMYx99A(cK>$KB^`Npzt&SxXL@!@W2G@M7rpd*q$v)oq;mWo zX%Bw!8!c~e^Qp&fHI@}}zfJGZa(G^HhN=w!K`Ln=3NGq-X(U(21m#T#FD zKC_GUqE~Dbo5fbqm#~l8$2#p{&i=@R!_B*zB#Y7hwkm$H4UVHY{hG5Q*YfqeGuMk< zP(4NMf!a{iKB!Ga9ptyL{-g19M_IKm( z^Dtg$aMO_+2E$V~ng-+WaH-_;Ada{{ii7cgQlunZlFXi`Y%G0p#4uacbuUb^RM4`1 zF47scE5%Hnx|QJbG)2>~KMiG&)jQ+Su-X}TZj_AD=MAh54`YJ_V?UCi2u|XV&IBA6 zdzghZbj1SoabF%uts;~Bs)oCg|91+8qp|x6tr*kZ`S=(c4x+>zrpYK8FHCtSlrxd6 zSX0h$&y`t*bY6I zUmeTg`NdP`h#YMQXTJ4AS{kj>)*gSqcF0=rC{Bk#oZULSlve9JzOp|9W7Yn8Ak_Yv z1>@RcXUA&s16?@iE}5^IRn-AUbWt6m)NA;kRSN>eXDPpVseE9U?b{-iw?{c|U=Ad|{j`?6Q$7Z3>e!}MDO8R0Z(S!h8$e0F?N-LR7!+3G}W|Z(}PI~+Q zG^yByBC)3ypbRedi9zmr&Ey||RhxX^ltvsTW%s=-3kA9G2STKxv_z1M!_tze7@op9 zffNmlD*IQw)X!iX#8)FoE=nVtP0OAF$jq0iuK+X~yx_I!G355%`ZdV(x>sS(TU&yc zAg2}^RBfX2wl5_S-*LqhPAUbJNmW~H(ExG=u}!0zu!0YrS-C-!S`~5w6&Okuc}@_= zel_0PSknH(FdCoA%Q2Poi^d8-9=l5l*biD_jjgV=|h*P&HZO~`qz zm8E-|s)sW+KaFAEMF@n(Sp`Q}41D*(TdDny689ojvc{ z*Za)WHzDJ&>T5GWur(yuqK{n{l@r}TLuwO-EQcY^<31KTXNB?T&788Fn}tdF#jaLZ zxjxY$rE~o&9X#tReWdSe69eP$Wm;|(#)AW7G&&&Kc>IU=)8unF=rX*YTvV~xmFX&P z6b6V4VHLgWxc_MlG;o5Ouhzok5wG8OU%TZldX-~b_N(UWueyhahxb)nsnB;|itk{L zrIpEGcvjlT62pX-jqJfJ3=uu-(ynF`KY*Cmsobq%Wond=B}+;Jqpn}gmoVbjC>i;^ zX0@4#uLA?QD1v&gB%*Zi>d^mIC8CtVS)A%}hWUa==P^4&Q@ zvT7o={H5FkcMGMDl(d1__mLQG<-3bL9BJIn`>XBTBUJO2#2>MPmhf%N3j`iM=B&Z$bW>T$}826T3T*d(d`(^xZ(JyN8h# zy#soyrnf-%Yq}5V9rR$W`JPH@o#RqrU3MCrhk~xt6Ef){8?m^&I1k6*;PcOSs>s5Q z2llc04ED>d`jSUU7V%JJp;_4lUZoF1?w_QBheEg*EhtS86ezvs;tk~46(uOPm3IA5 zvZ}=>7k3_q0=O1OXJNUPgb#hyF3lJzP-%_gG>~O;fqZ3i0eXUBq@bO64)s=oCa2duupiC9Gp)#Qb?@jQ;PpG4}IMQV}QtGRCQmem?E8oMw-=So`MWwZE)&-t40qZ^8 z1n~C(?bNoZ)G*&;J>3SUi(-P0Ho-pvM;o<68VnnpcJ&2E&@4mpG5S!%AzI@0x+T>b7W__co{VE)e*x#2{;-6w3+Jw ztv3`;Bj~1o4(RAq+$3pi{8F}xp(BjR={aLO{>1%+vSl}Lv)M@&N_w$HGMwU772qEZ z_XLqkh0Rku=0r4(k|1_h=4b8ErV0qwm0}dQ4CsjCQOhzISh6 zEQrN}twPF96_8E3bC*D;U`?SAFhoXirqWlHxu{60+*KJ#&9kyDWnp?9mCd7R%4czS ci{6JB!CF(_V{Hfj=%}yrc5{y?=Pjr6F9!e10ssI2 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/anchor/__pycache__/guided_anchor_target.cpython-36.pyc b/CDARTS_detection/mmdet/core/anchor/__pycache__/guided_anchor_target.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b25d3aed18d280cb07fc54de60fa8268cd0e3ec6 GIT binary patch literal 8196 zcmcIpOONB$mF6X>_oGxQ)vLQ5(~sCKr&HZs_QGiNbldSGNCqhwc4Hc#T~JserIJb_ zt1qdt${>M^%U&AOvk5S>oz-NOO%~Y%NLC5*4~Q%bWMR~9R)c{T$$aNtlA@}s?O-sW zKIrbV+jEtw_M+jhY&e#XqQy=GVJDoPc*W7q8lN>#gQuY9JMHGAD|*{h(` zZT5lE-8hF9W$#?Yv#Z6;gV|_e_blHZ4PA%F4O#cC>G6cB@A=Nm_CAo+t{N9+?$kPt zYtz{z81<~<2CIzU$mcLDcXxP92G1j?A%EM!|s2;;YvjuJAIG%9T>XKbvVDUr6s z#WF5^iAyIg@wSkYNnMsz$hd@@jB8m|F;wlsQHB3csEsvSMGf3Gth9??sF4=x2^E?$ zsvZ_D;Vp#4OL&VRWQ1rN=os*lHN#N6Wlj3Ci4{18WgK(w*yF*-bFGPSaNx}i?hH}e zsa6eK4-Oqk?K^JZaKqw`F&NDq+pt{Q2+-;}9~*}+i|WTO8zbL1G+r^d1&NZD))Ikl z@}Yl6h$itu;q?cO>wCO=$M^yCJH8QkMn5xAn&aOv{6p*5F|NBejMuuE0IBHuVB%Q; z1iUZwdIMpZHw%tuLDnDAN7uly0xX-JKl%jXf*IBp-h3C-89Ij+L$u6%baLQvqdyrP zACKIjka^(ET-)NONpIeF@RFPBkPvsrnBTr(oZ_DE;(u3IdyauuxOmWVKc|JdhefPS zM=l=LysO8Bz~lYHxWGwEtWP|57#I2zEck9I)@iWNMqP~S`Ci28{C2FK;-2r~fA>EX zz6t4HzjyP{n>xFAy2I0%<=*U{+LnLQKlc2a)2ZzQPM;=TmT&ew?%cH8{-MWj4re3V z!A=ylUSRQ|6YLzH#xrSTGzzcSlMetaABum1ht==#I`uZT-gzP#Tr z%98RD{;Rmf_T(6g0Ne76MMeBG{Fks@bZi3ksuGn^E+pm3P>HIzYq;xkH(+7ujJ>Vw zA4Cmo$i}wvks39ll|UV9VG-{&!xg#7$2w>QTmPaMwNNUCExUQ9Myn{7!WJ~P!|J$9 zC8mTmlvYKlj#8USkM2EG9;@>MXe^S($LjurXf0I2wQVKpKuRU-zylUOS8a8G&6?FT zs=<1;xu6x7IM{-tLu0TWu8*tsN>B^cP?HYojKS6oXsP3FgzINa{BN9*qbQO4iZ;W| zansg9j2vj?FVyi09NOk%Hva`|?u=Wu1}S7Y+y8mA6>f%G+sb$qJlkO>+=A_u&!y#B zxV^1l+^+=f@Jd+0xT1cheWXWMqifMK(RT#BoTEP*8PRjm^BC_c@rG-#!w6B|R6bY3 zXT#@YX`tVQ!wqX0&$aM73D4E=nH&$x@jOLNxNU1k`eT*<93}KPdf|y)yl|lxsONhD zYwICs!rG@;5^l@!Ji@5)F=^UVq8Fdg^x_3gs6U|zv~SY%m^~UlR382Ep)!9rTh(`> zh8QobYm5k<=(7T=Wl4WS!FX0>%cA|R@>qM+d8h30$n3^>q_ozd_xFm5NT z%K|dSTN&f6gfUZ_gpbQucW%oVw;Ym&L601*bKQOM-^FtY-_7Nw37>J;SD1|8F z&^U~y*3glCxfovju)S0sC;fh*cO`$aqss zh9PJRwJB%7F$>W9p|Bfo>|fL?ISpTOE+ohZ-+Nj_O#ncO4#+yX<1!DEO(v;TYq{5A%_J(K@%jf4k5(W&&W>LWr+1sSe0o6i4=>-;ZxbXlwu zN`Qgt&aIdQBH+r$7LIo-W_Hm4?}#j zo~}Y#uVyo?$(3f-^DE4(=U0wd8%>A3V~6*K$1}6+&ZfP|$;3BRejE3$xnfb*vVrp# z1nD4rC<8|#5*W@b;{D#pj@e<%J~nIlcSR+Auex|AE|L*=r;_zDE{JVrt^lN^Xo%)L zu1PN`tt@~auW3*jYbcwQMCkl>Ry48!6qeUVJ7~X z)Di1s*c(`VfW-^TE^aKXF9Vb4b3Bx-GApw>e!BQIQ7ST3{K~9~T2=g-tPQ@G*mZDq zRGnP`n%iP^bxT#*hPYo;=~rcCbqjBApxn&LJFKZ0>J{*H82<-oebA@fN!JzJ4cr3m zO|Uls^RUyRs)T(K^y!MG9I4(d)YLPU*23zM>guY(6#q`ZKE)Z%RuR;2B;8ghut_Kg zIM~45oF4{iSQU`)BfvoLg)0cgj5Ns7k?Ma+a>Dvp3-oazr3<*%NN9@W6|`nTOC$*} zc<@jGFwl?6{7(Q*uqV|DdrCnuD1{A#=_bxzC3*S^%V7m4y4pvDs1>b7?Pv|4v22$_ zUs^Ii17cjW;dg?HT^95;Nsrb+sR&9Zp=<=zuocz-3O7+&6{Rhd+M={g;11AbJ=_=+ z;K!=guX2I@VoXm1{-p;rI1ld4^P!OZ+7lr~a4mw_g^)OMeFUZ6U}6owO_F3ai1n%B z{6{k!L%&UuVgRJ`fd!D1gn0PB101%2FcKaEuF;S@B2H4|?GJ{=by5aQ@cw&yu+qhSt0 zEIOJvC7Qq|V2x}uL>tr&Qjeu=k(KGm(>DesVHZ>* zrKazQ-dui)7EMcWoFcr4;~OZ5INn^u@g!z52^^S+V892@gEcNCi{XO1XS%D6+kU6V0k-}bUgHtYhSALxBs6?`8$x7sO0aWd|5Ytx6s(FaY|9d%{P7D z5$UmaI7hsFnO?m?4*|q9oS8T!LgI}JO9$yCcQ|ok&2mrqtHjX~UeWIbUQZkV{O%RA zmh-P>>!P1DYdKfPUn3!v%-ircQ0H$FasARg&VN7*6mfz20I2Y$Z>|bxMdy!RvM0>- zyjnQkKzKy6m6(>JB-Uh&+?8KH0D&oG82Is`JwaS?1Bb&NO}fw%H*i#2s>0O3 z3#>^Gd}z(8;4@eAnxvf#L4qnNPI4r=1nI&|$14|R>NAXx|27`VRmxvoQ`WP9sWywW z`RgIFGW)k8ol>>PFBC#`tRicLOcJsuT8NwyWr&d6#VKAul0u|Cx&jEIBDiax{PSC$ zdxAxp26E%zR31txk(!?7K}(OAV)Z4_{=)>vVsYRQy{O#YH@<(*NL$4li*`}P;e2Oj zXI~!Mfas9S)RWVRA6We(u}g&KbsQX&ig8g&k8?{<9uP3gLSo`{eSmJT1~Efk6epJp z^T+=Yy8ejUCvU41B&G!-Wd?51n!!%Wf@b6YI*gqPzy9|y|F z>|x~X3IS7O_Nua-#y+B4T*5Ev^#IUrOc1saHUJ`PfO{gb&Pk=zM7TVS!pe7jq6bd&j!F>GwdLHqa)j;@c#yF1%R)U z+ICj~r3K)Zqu7OTEng2SfR?XdRjtzT7J0-rBy1qhszG}j@5*u;<+Y@|1`euq!u13~ zZXwF0xuS1@kPi-#4;su|kzGbsN94#QUJxG+D4FDogiwYvkUrwbMo&)Vdb03kfg|B= z0nnsh0({C3aR(oV7D=j0F{|)TDR5eRTp)6WrwCdh14Axa?v)!tG2tq@F8$V!^OJxz z*++|uko9kq+g;!#VQ=D%b8>~fDfw@+_et1*uP@6VOtMgs)AYm3!R~TL7WSqgC_|q= z$s^sTy>*Fge+RUi;-d_9W`q=R$_X}%CH$1J|KCvR-Z^<0nYwcH*b&GrA z5;+~2*jeL022)&g=EwMo7MHN7>Dv~42EjK{gs1EaXi=6pK{~TaflGe;L(v~bp{yM2^Wm1yV zOI%JD#{!-#aEQ4kdyH_O9SHrnl&lPXk3`CFo((`lmhj2RN9r_|NHytpypm3C(#$D} zo8=S`B-{xW;5Hh$LpnqRk{Bb2Ko?`okBAMoPu*0EGNOD7c%a2vBGRLdtVoeHW!y&z zW#mrUtfdu2J_WfJm6H@&0V*ea%&$^a+k5b!tKx2mV96 [target_level0, target_level1, ...] + """ + target = torch.stack(target, 0) + level_targets = [] + start = 0 + for n in num_level_anchors: + end = start + n + level_targets.append(target[:, start:end].squeeze(0)) + start = end + return level_targets + + +def anchor_target_single(flat_anchors, + valid_flags, + gt_bboxes, + gt_bboxes_ignore, + gt_labels, + img_meta, + target_means, + target_stds, + cfg, + label_channels=1, + sampling=True, + unmap_outputs=True): + inside_flags = anchor_inside_flags(flat_anchors, valid_flags, + img_meta['img_shape'][:2], + cfg.allowed_border) + if not inside_flags.any(): + return (None, ) * 6 + # assign gt and sample anchors + anchors = flat_anchors[inside_flags, :] + + if sampling: + assign_result, sampling_result = assign_and_sample( + anchors, gt_bboxes, gt_bboxes_ignore, None, cfg) + else: + bbox_assigner = build_assigner(cfg.assigner) + assign_result = bbox_assigner.assign(anchors, gt_bboxes, + gt_bboxes_ignore, gt_labels) + bbox_sampler = PseudoSampler() + sampling_result = bbox_sampler.sample(assign_result, anchors, + gt_bboxes) + + num_valid_anchors = anchors.shape[0] + bbox_targets = torch.zeros_like(anchors) + bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) + label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, + sampling_result.pos_gt_bboxes, + target_means, target_stds) + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + if gt_labels is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + if cfg.pos_weight <= 0: + label_weights[pos_inds] = 1.0 + else: + label_weights[pos_inds] = cfg.pos_weight + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_anchors.size(0) + labels = unmap(labels, num_total_anchors, inside_flags) + label_weights = unmap(label_weights, num_total_anchors, inside_flags) + bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, + neg_inds) + + +def anchor_inside_flags(flat_anchors, valid_flags, img_shape, + allowed_border=0): + img_h, img_w = img_shape[:2] + if allowed_border >= 0: + inside_flags = valid_flags & \ + (flat_anchors[:, 0] >= -allowed_border) & \ + (flat_anchors[:, 1] >= -allowed_border) & \ + (flat_anchors[:, 2] < img_w + allowed_border) & \ + (flat_anchors[:, 3] < img_h + allowed_border) + else: + inside_flags = valid_flags + return inside_flags + + +def unmap(data, count, inds, fill=0): + """ Unmap a subset of item (data) back to the original set of items (of + size count) """ + if data.dim() == 1: + ret = data.new_full((count, ), fill) + ret[inds] = data + else: + new_size = (count, ) + data.size()[1:] + ret = data.new_full(new_size, fill) + ret[inds, :] = data + return ret diff --git a/CDARTS_detection/mmdet/core/anchor/guided_anchor_target.py b/CDARTS_detection/mmdet/core/anchor/guided_anchor_target.py new file mode 100644 index 0000000..2e95406 --- /dev/null +++ b/CDARTS_detection/mmdet/core/anchor/guided_anchor_target.py @@ -0,0 +1,285 @@ +import torch + +from ..bbox import build_assigner, build_sampler, PseudoSampler +from ..utils import unmap, multi_apply + + +def calc_region(bbox, ratio, featmap_size=None): + """Calculate a proportional bbox region. + + The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. + + Args: + bbox (Tensor): Bboxes to calculate regions, shape (n, 4) + ratio (float): Ratio of the output region. + featmap_size (tuple): Feature map size used for clipping the boundary. + + Returns: + tuple: x1, y1, x2, y2 + """ + x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() + y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() + x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() + y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() + if featmap_size is not None: + x1 = x1.clamp(min=0, max=featmap_size[1] - 1) + y1 = y1.clamp(min=0, max=featmap_size[0] - 1) + x2 = x2.clamp(min=0, max=featmap_size[1] - 1) + y2 = y2.clamp(min=0, max=featmap_size[0] - 1) + return (x1, y1, x2, y2) + + +def ga_loc_target(gt_bboxes_list, + featmap_sizes, + anchor_scale, + anchor_strides, + center_ratio=0.2, + ignore_ratio=0.5): + """Compute location targets for guided anchoring. + + Each feature map is divided into positive, negative and ignore regions. + - positive regions: target 1, weight 1 + - ignore regions: target 0, weight 0 + - negative regions: target 0, weight 0.1 + + Args: + gt_bboxes_list (list[Tensor]): Gt bboxes of each image. + featmap_sizes (list[tuple]): Multi level sizes of each feature maps. + anchor_scale (int): Anchor scale. + anchor_strides ([list[int]]): Multi level anchor strides. + center_ratio (float): Ratio of center region. + ignore_ratio (float): Ratio of ignore region. + + Returns: + tuple + """ + img_per_gpu = len(gt_bboxes_list) + num_lvls = len(featmap_sizes) + r1 = (1 - center_ratio) / 2 + r2 = (1 - ignore_ratio) / 2 + all_loc_targets = [] + all_loc_weights = [] + all_ignore_map = [] + for lvl_id in range(num_lvls): + h, w = featmap_sizes[lvl_id] + loc_targets = torch.zeros(img_per_gpu, + 1, + h, + w, + device=gt_bboxes_list[0].device, + dtype=torch.float32) + loc_weights = torch.full_like(loc_targets, -1) + ignore_map = torch.zeros_like(loc_targets) + all_loc_targets.append(loc_targets) + all_loc_weights.append(loc_weights) + all_ignore_map.append(ignore_map) + for img_id in range(img_per_gpu): + gt_bboxes = gt_bboxes_list[img_id] + scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * + (gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)) + min_anchor_size = scale.new_full( + (1, ), float(anchor_scale * anchor_strides[0])) + # assign gt bboxes to different feature levels w.r.t. their scales + target_lvls = torch.floor( + torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) + target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() + for gt_id in range(gt_bboxes.size(0)): + lvl = target_lvls[gt_id].item() + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] + # calculate ignore regions + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[lvl]) + # calculate positive (center) regions + ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( + gt_, r1, featmap_sizes[lvl]) + all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + + 1] = 1 + all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + + 1, ignore_x1:ignore_x2 + 1] = 0 + all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + + 1] = 1 + # calculate ignore map on nearby low level feature + if lvl > 0: + d_lvl = lvl - 1 + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[d_lvl]) + all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + + 1, ignore_x1:ignore_x2 + 1] = 1 + # calculate ignore map on nearby high level feature + if lvl < num_lvls - 1: + u_lvl = lvl + 1 + # rescaled to corresponding feature map + gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] + ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( + gt_, r2, featmap_sizes[u_lvl]) + all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + + 1, ignore_x1:ignore_x2 + 1] = 1 + for lvl_id in range(num_lvls): + # ignore negative regions w.r.t. ignore map + all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) + & (all_ignore_map[lvl_id] > 0)] = 0 + # set negative regions with weight 0.1 + all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 + # loc average factor to balance loss + loc_avg_factor = sum( + [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 + return all_loc_targets, all_loc_weights, loc_avg_factor + + +def ga_shape_target(approx_list, + inside_flag_list, + square_list, + gt_bboxes_list, + img_metas, + approxs_per_octave, + cfg, + gt_bboxes_ignore_list=None, + sampling=True, + unmap_outputs=True): + """Compute guided anchoring targets. + + Args: + approx_list (list[list]): Multi level approxs of each image. + inside_flag_list (list[list]): Multi level inside flags of each image. + square_list (list[list]): Multi level squares of each image. + gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. + img_metas (list[dict]): Meta info of each image. + approxs_per_octave (int): number of approxs per octave + cfg (dict): RPN train configs. + gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. + sampling (bool): sampling or not. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple + """ + num_imgs = len(img_metas) + assert len(approx_list) == len(inside_flag_list) == len( + square_list) == num_imgs + # anchor number of multi levels + num_level_squares = [squares.size(0) for squares in square_list[0]] + # concat all level anchors and flags to a single tensor + inside_flag_flat_list = [] + approx_flat_list = [] + square_flat_list = [] + for i in range(num_imgs): + assert len(square_list[i]) == len(inside_flag_list[i]) + inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) + approx_flat_list.append(torch.cat(approx_list[i])) + square_flat_list.append(torch.cat(square_list[i])) + + # compute targets for each image + if gt_bboxes_ignore_list is None: + gt_bboxes_ignore_list = [None for _ in range(num_imgs)] + (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, + neg_inds_list) = multi_apply(ga_shape_target_single, + approx_flat_list, + inside_flag_flat_list, + square_flat_list, + gt_bboxes_list, + gt_bboxes_ignore_list, + img_metas, + approxs_per_octave=approxs_per_octave, + cfg=cfg, + sampling=sampling, + unmap_outputs=unmap_outputs) + # no valid anchors + if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): + return None + # sampled anchors of all images + num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) + num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) + # split targets to a list w.r.t. multiple levels + bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) + bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) + bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) + return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, + num_total_neg) + + +def images_to_levels(target, num_level_anchors): + """Convert targets by image to targets by feature level. + + [target_img0, target_img1] -> [target_level0, target_level1, ...] + """ + target = torch.stack(target, 0) + level_targets = [] + start = 0 + for n in num_level_anchors: + end = start + n + level_targets.append(target[:, start:end].squeeze(0)) + start = end + return level_targets + + +def ga_shape_target_single(flat_approxs, + inside_flags, + flat_squares, + gt_bboxes, + gt_bboxes_ignore, + img_meta, + approxs_per_octave, + cfg, + sampling=True, + unmap_outputs=True): + """Compute guided anchoring targets. + + This function returns sampled anchors and gt bboxes directly + rather than calculates regression targets. + + Args: + flat_approxs (Tensor): flat approxs of a single image, + shape (n, 4) + inside_flags (Tensor): inside flags of a single image, + shape (n, ). + flat_squares (Tensor): flat squares of a single image, + shape (approxs_per_octave * n, 4) + gt_bboxes (Tensor): Ground truth bboxes of a single image. + img_meta (dict): Meta info of a single image. + approxs_per_octave (int): number of approxs per octave + cfg (dict): RPN train configs. + sampling (bool): sampling or not. + unmap_outputs (bool): unmap outputs or not. + + Returns: + tuple + """ + if not inside_flags.any(): + return (None, ) * 6 + # assign gt and sample anchors + expand_inside_flags = inside_flags[:, None].expand( + -1, approxs_per_octave).reshape(-1) + approxs = flat_approxs[expand_inside_flags, :] + squares = flat_squares[inside_flags, :] + + bbox_assigner = build_assigner(cfg.ga_assigner) + assign_result = bbox_assigner.assign(approxs, squares, approxs_per_octave, + gt_bboxes, gt_bboxes_ignore) + if sampling: + bbox_sampler = build_sampler(cfg.ga_sampler) + else: + bbox_sampler = PseudoSampler() + sampling_result = bbox_sampler.sample(assign_result, squares, gt_bboxes) + + bbox_anchors = torch.zeros_like(squares) + bbox_gts = torch.zeros_like(squares) + bbox_weights = torch.zeros_like(squares) + + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + if len(pos_inds) > 0: + bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes + bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes + bbox_weights[pos_inds, :] = 1.0 + + # map up to original set of anchors + if unmap_outputs: + num_total_anchors = flat_squares.size(0) + bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) + bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) + bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) + + return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds) diff --git a/CDARTS_detection/mmdet/core/bbox/__init__.py b/CDARTS_detection/mmdet/core/bbox/__init__.py new file mode 100644 index 0000000..bcf6efd --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/__init__.py @@ -0,0 +1,20 @@ +from .geometry import bbox_overlaps +from .assigners import BaseAssigner, MaxIoUAssigner, AssignResult +from .samplers import (BaseSampler, PseudoSampler, RandomSampler, + InstanceBalancedPosSampler, IoUBalancedNegSampler, + CombinedSampler, SamplingResult) +from .assign_sampling import build_assigner, build_sampler, assign_and_sample +from .transforms import (bbox2delta, delta2bbox, bbox_flip, bbox_mapping, + bbox_mapping_back, bbox2roi, roi2bbox, bbox2result, + distance2bbox) +from .bbox_target import bbox_target + +__all__ = [ + 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', + 'BaseSampler', 'PseudoSampler', 'RandomSampler', + 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', + 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample', + 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping', + 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', + 'distance2bbox', 'bbox_target' +] diff --git a/CDARTS_detection/mmdet/core/bbox/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d0eac25d6283a56b5931b88dc96500d72c2343a GIT binary patch literal 1057 zcmb7@%Wj)M6ow)Af-(3K-yJ9Iq8m$9ExC(ss$SAn7LB4r-OL7!eI^8T4pSod6LwPUt;reA*v!;*aG?PnG{J`eTF{0Lbm16! z(1!sGVFY8Cz!Xkk2B&ZqjoB%@xUuFh|JY`nxvhiMD!mWWA0m@{qvnqG&$$wxm5QT8 zWV-d0-(RHP9(%v6u7%pld|ua0+I_=uBhBXEN{KC`kL@*2AjLyFy+~BfleIYKl5TL7 zs>9mAyyGBWis&G>zofW|69EUIRTOa&?F~4(wc5rKLVhqDl)c(p>Xr+kaT1o=yrDgs zc?m-1TzkcBNs4Y1S=`Eaqy3^pzS)?U>F(1mT=DgH?UJ<2(pb9&<@3!_E3(l6#O1if znn}7?4RfAFBF~OY`k6y8-X{$r?n6}n;Ec8RK^M=M%$`}*UY_wp-KH6;|F^B{ zVaTNn!=F}mO0y6i&KGxPkfmV~?Y2BwtarfGLTysDKm?JCbsnclSufUUCKmJqEy6HP n;yeuBY&mi}-F`9i49Gj}KvuJFE*xhuFB!P^tQ2;j{HJta|i1t-bL6$Ol#2~w@|RV|e^zD}!k z2~vhO9O%u1`3|N&0%%D`T3WD6rluV^?bi;h9=C4c6&$S{74*9Uwvk|k-v%AIr1*TL z0!~EENkq+9$Yr`I+ay&h{rS;xeV&%Nn6=xroPD{NEgLmkZ1Pg%^=g*RhPa~8gt^XZ z-KKRWl>_mZ@lmSf>mdc>iacwLTUFV);c8PuT4vHLie#ypDzV1kJ@|O5RHFTeY$62Ff zyjV1!;-1ySMh;0lS#Qm*%_zCyndj5+Of{LbT397FT8GKHMsUc_LmIWke)=`vTgjEiYRnv zMngn~_j<#7MHCp0rIp$Qt2P8+T+ErRk}W0@{wscr6%Cmg!1x*6N$jgQIDCrWC# z4Q9{kpXKE$uc_Fh>+oT)mT4?3& z$(0Z1vnP1j`C)clu5xMpx>Acv*fXmq>1?>E^c=mUp66>_&xR$0IX0Z2$Ev>9mS-0- zZx<@WSoKhZDDcs2u&7FiV}sVwrmmmAk?YlE4YxQt-eJ_u{YKOY?C{~+>19>Ory$GK zZkv_U#ZF{;s^L)6q7br{i+Wv^tv+2;N=|W8C%TJ&x!Kv|C=#AgKLJ4^M(@@U>zwIxN8G0Vx&I>H^8;T`9dsUoJq~Vu`W1oX+*6r>fJ`}Io$W-k5KO41eWXdjkHlI z-^_2MsVVyQeZPad?UmRUCbk7YLfAHhBjTgu!^t6sS+=CEq!6xhlp zXwg}*$)#2Uh^K}qMko+C^%#U5VWcA_XveWuJVdV;#OKiT7a&L&)0l>I1QIC`LL0k2 zfPM_)0dp%B_&QjS^Y{%MMSceb0LDg&a5w0mr88((YHkgg6>g1aaUXK#%EX^i%qTstH&pVSC!RR*)i> zl`6C&qV>1+I@c~gx)1IkbW4r`;>HRoVZ&!dCAPVIj(JsMMhF=M3mLEicyWgP_|d;9 CQM9E1 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/__pycache__/geometry.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/__pycache__/geometry.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df6e4c4569f6a435d367806a8825603e2a510e05 GIT binary patch literal 1889 zcmcIlJCEB&5Z+xrq~l303?=6vL9nlT#VC6UWE_0Rk7Xi$IOVrL*k8j?AH&2D!x5Sx<}#Z(zv3BY_!$~@u3!&6Ev!&vCxNXOaP?uhFrL6rw}2-n zZ13Dvy%ha|b99E8b&0ojjw{Q_EqOP$bLX6>y9NXH05R*F$RA->hd-Mm||S|+Yu*?KiV+r4XUvtOUepi8a>(cM89h7tih9w>&-2~_ucQ-^EtbyaP~ z+E+5xt6j$Zd@uL%{R5Qm-9ewCZ7=C+kG9_6WOYNsQbp^7R0OQiv5o$49m}p+3msjn zS^8}4`yVtV@}K`w)A}hh-D&GP(E7G_wbpqL-Wtz;Lg4N2qacg?lvDAN%gA5TQ=V=( zPiVRkRPmIG1wB0#uecg}9);_>1)a}B^}>(BWy0BfPD2Hr6F*XXJ|EF^1;If>3rh;` zrD}M&AFK?cVQbirsh_Ya({w}y#K}#lxaNBgL6x1Z{ZPUY4Oe;09ir;YkL6O`HyB+( zjXQeI6D8#EK2$)LOPPHi9o5uP1Vz93C^z=$Qhq$of zFuBqn@gXrdY3xpM>#f*kYQpmzq;m^FV622*%RLEcjk^U>GKj4KEM1H&;_Gg`{ zbG0!oY^~hPyY53_OR-Uf9f(L3BuWb+PYbeH6`kt&o;kkc{&9Jm&TOs1pGbZLiQaxQ zS&5iWVNRA8nV(F83+AhdS_?IaW5(069i=@9K;nr$(PYUXER`4I^+nOwo)=XzrGG~1 z5lrZIk9gR_Htvxg{*1VIghyn6DL%wszz)p#HtCTu9)ow(@_tSRL>>S`rQiwN_hD&# Z=G?7G|0Rp=15hQ4_#@p3r5n+;{sn%L&-(xX literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/__pycache__/transforms.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/__pycache__/transforms.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93b553beb32545790f4221caa11c837811405e4f GIT binary patch literal 5746 zcmbVQTW{RP73OWZT<%2|uWpI!uu0Hr4M9p2Crw?$PGToT(MCY)6hYTKV*r z)@$3;1aAxA7%!ph{@_+KDD~N)L>rnd-mN!81;@32`Y0 z%_?hdTQ=l!X^HRK50m*N;RP7 zK$S#I)I~$g;eghd^T?RYi}`(97$^FEnIWARk~Rz41ntQBiU617vC&WMY;xM zRx;l0WEKk_bXp5pWyIrAocMzzGy3Re82C{FnUsMsI>dK`?<0JB5CGi=-3Q$V%|J8I z3^d!zY!(bsKlxJQ&tefE z?e4~hTPepBdNCg+n;6_B-nQ)7Mz6^_5>0O!j_&A9^%~Q>rq}ca-eqH6$7}HCA$vNH zB>X#ezDpg@vxcug)h9S3!jse!o#})krl07{cuG4lpi@*wI`w3oDRf~r(NZGECKK^7 z84QgXADca6O`DX(l&jBhoq|hdWhz@KxTP7cOE{3)Upm38CRI^^&R46Ns4^QEs0jBl z+6991dC&|B3Z_gVwcONz7W4#Ul+jCX14;gPs!(=u2Wz%rmSm zw`5ImNi59JOVfH-#PX^(S)SJC^oqb)98^9xcuzF(q~Qz-fHkVvdreeDDYEWCc=nDnd$E)yh>E5JMf1i-k@?NI2`#A>-b4#qR3rh zmVL&$!|{OEB=sPKCGnHiT4wP$O!&9K<(DXFQhfz;>Lh#qC}`I|+Usy1u`XNn19SP52Tqmnnh&*7zzVPg8Q45&|uM3Q1-hzL*(D_zt&` zZ)Y}{h#-+xMw;%NrAMK{I9{kfIl{w4J|R2HD8fX^&wTlx6U zZ{d(UwR_%P%){e2O8vp$Xw!8)Jg@V9a$S~)QgZ9>NRQ&h+>4mcxqsBU?%k!s_trNz zH#fX%R}Wf6n{e1ynz5daM}q*OKN|Y|z?0gNkMgr19rI{rpu7Gs@X{mn=qKI>YS<61 zdoeFV9n&qP$-KI3!xhwmVW&GxBcR*dORL_>0W?S2>W5=54=!qYu6uO z>tZhrk~EC^UP6qP8n;!Fi{P(eq^u@uIz7LOz#+3}F)CnC+kI|TSxs)m&>xM^>pgUk z(B^6I!j|sp{0_<&FN@GFOx_@T&~hR}P&8@@0}tc> zCA6vMSFtd=iq0;Aq%NcqQWwow#5wUdQR-5#Mi24Hddl%$Q(m*z1-UH z?#Z4yVZ=7P!xuNaBYcP3$hYq+WYZ}~c<)Gj9uiWlRNJSU8(t`g zjE?9whl{8Q^)L=^3Ii(l3e@c}87PfOH$l4LA!0!cqZCUjk(Dda`n$j$6sNQ!x9lBU zY;4>LQdlo?XfO=Y>)Ycf#3dm!yQ6Vtg=sM4Kc;G1E&C@Zx65i@68<(Q2w_t)i#Fhi zUBQM(-}_4lCKPIDuoYKeEmq-U8#@0vI9-$Ycqpf+hw|h-B%k4)tKzQ%gz>Zs58+?Q zH?B(XonSqX!4A>-f31Bc@4m9Ef2w_o{ex#UA3EpTFq=G|e+k+(#ZYk*@u2MEL7FH{ zp8^YNH9GR7r<1ylz9ZZJqLlsa>l7z=@uj~gB&!ze{(lJroq z8}JP;jJkudL?cFghvmE9RI18MC@lVyJTiW3d`KS3nt`L9l^8kj)+|O%?w_8-o}QF1d-qR90&A6HB0AHSwlPSe@vf zL1Tn|KtF$S#N^A+cjQDc^It%mw~?SMy}zx_P)+nXswGeOdwluicCZOQ0GX9=h3yCY zr?M;+D32gG>2_9~Ip^tUG=i+<)N@vyS>|uRNC=@?wP7rq6s7VHKo#gACso*!e}RCF zfWv?tkxqu9$1@6dAF?5eBX4a*eSU`{TQb|CsL$_^ur1U2$J^(3M*KyB1v{wluV9^E z!9dZS$;@w3xLM^~IKYAnTXlhKN!S<2+7xLE^dnG@n-t<(D4^rN3RK?-x-knp27REw z&l~Z8h4M!O!FrbfL(U91xX?R}V!&<|rg>-!ueD8H>kuC5zKT5wFvT!u=)W@^xd_tw z;e#OZWY08*YlTgq9^{OC9undQK`-5a$Y{euT(^hvZk(p^uyr;y!JP00qf?f>hRn+w+xE>TQxrDR{-9ploq=oj8e5GDJ`3ie7 zNfj41As^pNCchbSo9CN{!=7G20KUM{imZRM$2ehtK>;Mh5IP@-K!!N_FNi5k8&rm0)5ZG5()a!XSES=lF9% z2o-{mPoPs9dpW$Iy^{RKYFkeOZBaEbYLMxaK~gf_^Qv!DyLeP7%~4sZ5&g|*5Ft(5 Y^n_e@)12N(#RkFe-FF$+C4+v4KfPUbq5uE@ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/approx_max_iou_assigner.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/approx_max_iou_assigner.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..106792fee3d11250b2f072bccc37901385afe4f5 GIT binary patch literal 4206 zcmb7H-EQN?6(%X_$CB-JcGDDTx5Zoph}6pRI@v2@o7P>VL4mFlq;3nS1t?lGl4w&L zbvRnv3;L!`E*I!)^dP5Yf0l9Fuqq7pPBIWyn+o%0=jy0g>%`FFGM zA5R?TpU&FH$MrApOP^!l4tHZ`=x~qMVt4KhJ=ftrugA5yKlEMa3x_v&^BadZg?C#U z*17vzr`LLiozBTS7h})OcVC59zm~r{PSa?b2=&ghk9W?_P)c%3)Cafd$N<}JR3t2%E&@~!9r^f#{)fM zqy58CpN*bj0AR?lIMUH;!J7d#V)$i;lPw6Ghco3*E z^)ApnN`gX}l_IaAB$_Yh3>q3WLK|F!6QLT*B#pRGg2PA_Rw@-{*+7w)0sE?qn{`wQ z|0iq~lE>#^91B&L942>BD&zQ4P#RH$pChoT77Nz3*}~@Gl>xBmRklraZ{?_|4zMh+ zI|2x0-7_g;IOz{F0aQ4x3_7D(>mQqjBox>-W5_)E%%bT`q?#3H&8|%492uysrlH2= zN&}ozsRRs~+>Q3H?nK+HqmvoeC`Ih(#$&7h)VuBIMnGos=2})t|9SOqg63qV8#XsdRu~@rZ{SI1l)2Ea!`*MsVOH*oMr|mqKg?a%8m~ zF(9$~+MCl&`;k&wh3eX>@vpLSohqts<7RPB|McJ`erXp&=4MXdWFAI0t6}sqA7d@6 zWAw8I#yWRU(A<+=Bk!!8xxBM>fb!1TA;{mmmofS7W(f1|75AG?>NRse74by1pqkpE zp-n@FhHV;lF#OF?kMQ^19}i~8nNKh#)9YoJ9E`7dm>!^@rU&ym7g~&UBoljmfMP2S z$e9O4p;YNXXiBunZmJ~Z#9(oqw}JpCSO)=vsFY+o&4yck-~8yI>OLs{yhV?U2M_+dHa?$_W$atr z*htyE=_EhKd_CLVm@l=!Ja1=hD>>UGc^Ce~d*R?}J8NeRSnw!&grw_)Z{fUSLu^HD zupvVkua5QU3@3PwLL{l05~qXiN-{0!lc;TBNX3Mlj-#Q2UcbUIG<$Cyu$5SVP(%WL zz6LZA6tikrlpdYU3g%%t-aqWKXLqSMSMMO}=5nHE2`t12yLqI~(KQGBg>q6*U{9lqDfW0rAbl=ObVH1WqVAjl%F^}6X?}Uw>PrP$I+OJ!F14PVp3&;))Y&(4oV$EI$6Ez)JIM@MW`6Hxr<*-9Zr zFHvvyP9z-;db6;>ct@{Drq{p=R*gsVle~tO1~>j(a`Emz_^1!C25&}npJ0)Ae=`pcur(Ofv|$90!4V58KsGX`pUxZ6GUD0|N<2DLk$3 z`*p{qZ+pja&0Mdcet?;Lk36&9oT(Z;ZcPPxKdr7!LYN(E5{y9Otkw&w=4X_)DG6=C I{O#I*0sHse!2kdN literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/assigners/__pycache__/assign_result.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e65f9074be8c25ec2cccd5cf29f383542d5e33 GIT binary patch literal 894 zcmY*Xy^a$x5VqI*mnESj!InRd$1|RpZ~SVp$ll(q#qWTSKjdVDAYZ^T&jBQnv?V3^Kx81<4UtUI z11kebKa)JX1q-sQ5bp;A;R2R<0iZ}pB?0T$d()E1P)0YT484Ei=I@Nz)cq%AcCB4j z5%}Q19nvE2Vfi#hIucIC0FaIhFc?EXCJCfLmwD)te%JBZ8kf|TZ+dCmyc1V^xKz3o zTjSzZtW<0A$c09=Uo_^au~d!0bPVu|=oGfQkHvcE)Jwp=-tRrAh`72ajXJJ18QYNOulkh@lzDX+m?BFiP%-VTQjG z!!0$z2T!=^Jd!B)ngoV|YR3;|%Gs%N%Xwj|Sfe!z{8GqUNQY{Mz;POX;+??R?8%2|TE1B4d-GcE z=;%Nt=#`aiikV7n<|>qIVEZ5zWo|D3i}_})3ZXYfIpJ2N6M1fIZFRjYi1l6imzga=hC3uw8^`mjMv=diuAsaa)gA976+LDGQZbDScncyfo_!`9R9&waVH` z#nJ8n;b6RF$GC?h!E9#JWsEjE5%oRz8%k&JTpR_St5jF$rmk5xYr_IGmw- zhL&ZJ7t0&uvOtbG=9<5dQ_lGTIm|T+1pW(q*?iSKGbCjrxrG9oJ^iSvuB!gN8hx;~ z*8KT*1OK0QE$d&_!pBAbLwplzge6#Lbu3{ECuAeLV>9Zzp)+zjF8UQw4J)H+r^>8P zEKw8n*OsVD`_k#u1pAHEYP^9I>*x(b+hW=3M}8t7CrQwcq@rij|6C@MFnwd2MQf+0 z@e410E>-A{lP(D;KBaMOj7Y1)gw?SH>o{VKOnJ>Zu2>g!(Lk>vHbhgbpjQ<&n6jCz zf9k*ZRs7pRzu$#8^T2)Hjg^wgIF3XR^?5(#WCf2EKlQuAzKSQ2;AzaI-yP_wcC*Rx z`Iw%^Djvs)A13@f2t$498D z3$H3an444V)9U#4{_ZKlC0ac0Ey>Y>9;<$GpnK#72=#(^;-v${w|imir>z4HU*jn7 z>L3mU@8RgBZgQ?n+U0(jeq}#-DxKgM?-W!X^{Svz5P7*Vb3;B2qF^)`aagF$@J($Tk^6$D_|6=nrI)^8CJ;l}S882ByYRKSg(L0q$rQq~}sD2lB*Nd%_`!ziqv~-LxMml`3Cd7&HD| zpx3b?gzMJwXZodqC-^4YXr^pxdDhfM%cc%md+MTfrWLgAw2HPOEI=7MYE`q9g*%t6 zEL@;$W#JNJ*WSvQY<)R|+0FcU*-5Qh<|Z=isRm3_O=?!CS*2!;nsqdPwbUklzB}F< zAZOl38}%e7j(=JwNIWV%6H2 zlU;dj-_38k`#b!>Rc?zWqf@EcU@LuXlLgA!Ddj!L=Sn;;CO>JD9^brAMji;ex6>BX;I{=1S(r*&g}G70E}qy)bH-<>1y9 z@dfMyXv|%=TqjUYnk41h$1+NgW(WKuDy}q7P*Ffa7AmM6o(%l4+>Unm!v!8BK_z`G z$ofQAYN@W8bd8bk=i9^oA|U6VLJm@SOlaj3uNsLw7pY~^;Zt7JUSvw~3H?)vLO_=i zCk9ppsy^B7w|96~#pi+>3CO@FPoF%U6BS9ofwTY+lyT0IA-^WsREw48UOXFoQ-=0) znNC!+P@a+SgZT99;N-Ce0aI|F8~`7R`@xY>D8lCkDXQ6DZXCswW>g1j!;juj4W2om zNYwA{D8_}!>vl&3qc`8<)2e3`9ki_ZJL2t+l|pe{#Xij&Sp|1mhMGPy2e6-AH>v6o z0Tp$BtEo0fQ`P9r8s(|8&9a-vO-~xnW%WWpXCtSza^QMCKul8%X3Wdha`naon$Jvg zO&efl_s~x`8tc7#tj=z*roF{h!9_OOEt}d+cFSI6FK=F9C9}Wl&|Z9qHOUv6lKu=Q z6D4W-u+@z@1)br&<`PtdU+0q9*36l?cygw0kBLU=o>hbka;adfDnKQgT&lC0eg^gE z=lZOX5{1;$#-*)Z2?hee>LFSh9a{zzo#oSbIC*%1o@YA^%XO_Xq1Sv z*ee|L^?ogDZCIH#L03&st8?3ep5~$T;>&b>3hRGief8OFWm=!E+$9>@7*^D`_}%;y zQ};oXD);1&^T-(EALpaW{vEOTv5Z$8z5!!%o?cJ)dc$(;dyLm^SrIv zY1^TLL`zdV3-O9NXNxUwj#>sJ6s%UECpQUR^SZTa1k6+=N9H6mQR$1M?<+yCq-E=mcRr`(xHSRRornAo00eE-Vx_!fL(m0-N zcEff7dM|(cPOw`-auY#U)uHHEKODF0BZT+}uNbKCBfOG&Ue@$Hu*oE({;KCaoA_Zq zQ}a9#cRf#SL4vwZ4Vk|H)cSCQ){0)LNKryCTfgmAEJlCLdb6$PT6NXJOtwb<+9=;O p6oIq)2{oGD>V1i~nN(dE)*_+r(w5Z^jmeLQD-+UN2Ls^G`45LPi8=rP literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py b/CDARTS_detection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py new file mode 100644 index 0000000..1283f7f --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py @@ -0,0 +1,116 @@ +import torch + +from .max_iou_assigner import MaxIoUAssigner +from ..geometry import bbox_overlaps + + +class ApproxMaxIoUAssigner(MaxIoUAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `-1`, `0`, or a positive integer + indicating the ground truth index. + + - -1: don't care + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float or tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each gt). + gt_max_assign_all (bool): Whether to assign all bboxes with the same + highest overlap with some gt to that gt. + ignore_iof_thr (float): IoF threshold for ignoring bboxes (if + `gt_bboxes_ignore` is specified). Negative values mean not + ignoring any bboxes. + ignore_wrt_candidates (bool): Whether to compute the iof between + `bboxes` and `gt_bboxes_ignore`, or the contrary. + """ + + def __init__(self, + pos_iou_thr, + neg_iou_thr, + min_pos_iou=.0, + gt_max_assign_all=True, + ignore_iof_thr=-1, + ignore_wrt_candidates=True): + self.pos_iou_thr = pos_iou_thr + self.neg_iou_thr = neg_iou_thr + self.min_pos_iou = min_pos_iou + self.gt_max_assign_all = gt_max_assign_all + self.ignore_iof_thr = ignore_iof_thr + self.ignore_wrt_candidates = ignore_wrt_candidates + + def assign(self, + approxs, + squares, + approxs_per_octave, + gt_bboxes, + gt_bboxes_ignore=None, + gt_labels=None): + """Assign gt to approxs. + + This method assign a gt bbox to each group of approxs (bboxes), + each group of approxs is represent by a base approx (bbox) and + will be assigned with -1, 0, or a positive number. + -1 means don't care, 0 means negative sample, + positive number is the index (1-based) of assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every bbox to -1 + 2. use the max IoU of each group of approxs to assign + 2. assign proposals whose iou with all gts < neg_iou_thr to 0 + 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, + assign it to that bbox + 4. for each gt bbox, assign its nearest proposals (may be more than + one) to itself + + Args: + approxs (Tensor): Bounding boxes to be assigned, + shape(approxs_per_octave*n, 4). + squares (Tensor): Base Bounding boxes to be assigned, + shape(n, 4). + approxs_per_octave (int): number of approxs per octave + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + + if squares.shape[0] == 0 or gt_bboxes.shape[0] == 0: + raise ValueError('No gt or approxs') + num_squares = squares.size(0) + num_gts = gt_bboxes.size(0) + # re-organize anchors by approxs_per_octave x num_squares + approxs = torch.transpose( + approxs.view(num_squares, approxs_per_octave, 4), 0, + 1).contiguous().view(-1, 4) + all_overlaps = bbox_overlaps(approxs, gt_bboxes) + + overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, + num_gts).max(dim=0) + overlaps = torch.transpose(overlaps, 0, 1) + + bboxes = squares[:, :4] + + if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and ( + gt_bboxes_ignore.numel() > 0): + if self.ignore_wrt_candidates: + ignore_overlaps = bbox_overlaps(bboxes, + gt_bboxes_ignore, + mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + else: + ignore_overlaps = bbox_overlaps(gt_bboxes_ignore, + bboxes, + mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) + overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 + + assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) + return assign_result diff --git a/CDARTS_detection/mmdet/core/bbox/assigners/assign_result.py b/CDARTS_detection/mmdet/core/bbox/assigners/assign_result.py new file mode 100644 index 0000000..33c761d --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/assigners/assign_result.py @@ -0,0 +1,19 @@ +import torch + + +class AssignResult(object): + + def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): + self.num_gts = num_gts + self.gt_inds = gt_inds + self.max_overlaps = max_overlaps + self.labels = labels + + def add_gt_(self, gt_labels): + self_inds = torch.arange( + 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) + self.gt_inds = torch.cat([self_inds, self.gt_inds]) + self.max_overlaps = torch.cat( + [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps]) + if self.labels is not None: + self.labels = torch.cat([gt_labels, self.labels]) diff --git a/CDARTS_detection/mmdet/core/bbox/assigners/base_assigner.py b/CDARTS_detection/mmdet/core/bbox/assigners/base_assigner.py new file mode 100644 index 0000000..7bd02dc --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/assigners/base_assigner.py @@ -0,0 +1,8 @@ +from abc import ABCMeta, abstractmethod + + +class BaseAssigner(metaclass=ABCMeta): + + @abstractmethod + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + pass diff --git a/CDARTS_detection/mmdet/core/bbox/assigners/max_iou_assigner.py b/CDARTS_detection/mmdet/core/bbox/assigners/max_iou_assigner.py new file mode 100644 index 0000000..57a1e75 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/assigners/max_iou_assigner.py @@ -0,0 +1,152 @@ +import torch + +from .base_assigner import BaseAssigner +from .assign_result import AssignResult +from ..geometry import bbox_overlaps + + +class MaxIoUAssigner(BaseAssigner): + """Assign a corresponding gt bbox or background to each bbox. + + Each proposals will be assigned with `-1`, `0`, or a positive integer + indicating the ground truth index. + + - -1: don't care + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + pos_iou_thr (float): IoU threshold for positive bboxes. + neg_iou_thr (float or tuple): IoU threshold for negative bboxes. + min_pos_iou (float): Minimum iou for a bbox to be considered as a + positive bbox. Positive samples can have smaller IoU than + pos_iou_thr due to the 4th step (assign max IoU sample to each gt). + gt_max_assign_all (bool): Whether to assign all bboxes with the same + highest overlap with some gt to that gt. + ignore_iof_thr (float): IoF threshold for ignoring bboxes (if + `gt_bboxes_ignore` is specified). Negative values mean not + ignoring any bboxes. + ignore_wrt_candidates (bool): Whether to compute the iof between + `bboxes` and `gt_bboxes_ignore`, or the contrary. + """ + + def __init__(self, + pos_iou_thr, + neg_iou_thr, + min_pos_iou=.0, + gt_max_assign_all=True, + ignore_iof_thr=-1, + ignore_wrt_candidates=True): + self.pos_iou_thr = pos_iou_thr + self.neg_iou_thr = neg_iou_thr + self.min_pos_iou = min_pos_iou + self.gt_max_assign_all = gt_max_assign_all + self.ignore_iof_thr = ignore_iof_thr + self.ignore_wrt_candidates = ignore_wrt_candidates + + def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): + """Assign gt to bboxes. + + This method assign a gt bbox to every bbox (proposal/anchor), each bbox + will be assigned with -1, 0, or a positive number. -1 means don't care, + 0 means negative sample, positive number is the index (1-based) of + assigned gt. + The assignment is done in following steps, the order matters. + + 1. assign every bbox to -1 + 2. assign proposals whose iou with all gts < neg_iou_thr to 0 + 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, + assign it to that bbox + 4. for each gt bbox, assign its nearest proposals (may be more than + one) to itself + + Args: + bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). + gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). + gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are + labelled as `ignored`, e.g., crowd boxes in COCO. + gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + if bboxes.shape[0] == 0 or gt_bboxes.shape[0] == 0: + raise ValueError('No gt or bboxes') + bboxes = bboxes[:, :4] + overlaps = bbox_overlaps(gt_bboxes, bboxes) + + if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and ( + gt_bboxes_ignore.numel() > 0): + if self.ignore_wrt_candidates: + ignore_overlaps = bbox_overlaps( + bboxes, gt_bboxes_ignore, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) + else: + ignore_overlaps = bbox_overlaps( + gt_bboxes_ignore, bboxes, mode='iof') + ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) + overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 + + assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) + return assign_result + + def assign_wrt_overlaps(self, overlaps, gt_labels=None): + """Assign w.r.t. the overlaps of bboxes with gts. + + Args: + overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, + shape(k, n). + gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). + + Returns: + :obj:`AssignResult`: The assign result. + """ + if overlaps.numel() == 0: + raise ValueError('No gt or proposals') + + num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) + + # 1. assign -1 by default + assigned_gt_inds = overlaps.new_full( + (num_bboxes, ), -1, dtype=torch.long) + + # for each anchor, which gt best overlaps with it + # for each anchor, the max iou of all gts + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + # for each gt, which anchor best overlaps with it + # for each gt, the max iou of all proposals + gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) + + # 2. assign negative: below + if isinstance(self.neg_iou_thr, float): + assigned_gt_inds[(max_overlaps >= 0) + & (max_overlaps < self.neg_iou_thr)] = 0 + elif isinstance(self.neg_iou_thr, tuple): + assert len(self.neg_iou_thr) == 2 + assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) + & (max_overlaps < self.neg_iou_thr[1])] = 0 + + # 3. assign positive: above positive IoU threshold + pos_inds = max_overlaps >= self.pos_iou_thr + assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 + + # 4. assign fg: for each gt, proposals with highest IoU + for i in range(num_gts): + if gt_max_overlaps[i] >= self.min_pos_iou: + if self.gt_max_assign_all: + max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] + assigned_gt_inds[max_iou_inds] = i + 1 + else: + assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 + + if gt_labels is not None: + assigned_labels = assigned_gt_inds.new_zeros((num_bboxes, )) + pos_inds = torch.nonzero(assigned_gt_inds > 0).squeeze() + if pos_inds.numel() > 0: + assigned_labels[pos_inds] = gt_labels[ + assigned_gt_inds[pos_inds] - 1] + else: + assigned_labels = None + + return AssignResult( + num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) diff --git a/CDARTS_detection/mmdet/core/bbox/bbox_target.py b/CDARTS_detection/mmdet/core/bbox/bbox_target.py new file mode 100644 index 0000000..aa1fbc6 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/bbox_target.py @@ -0,0 +1,73 @@ +import torch + +from .transforms import bbox2delta +from ..utils import multi_apply + + +def bbox_target(pos_bboxes_list, + neg_bboxes_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + cfg, + reg_classes=1, + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0], + concat=True): + labels, label_weights, bbox_targets, bbox_weights = multi_apply( + bbox_target_single, + pos_bboxes_list, + neg_bboxes_list, + pos_gt_bboxes_list, + pos_gt_labels_list, + cfg=cfg, + reg_classes=reg_classes, + target_means=target_means, + target_stds=target_stds) + + if concat: + labels = torch.cat(labels, 0) + label_weights = torch.cat(label_weights, 0) + bbox_targets = torch.cat(bbox_targets, 0) + bbox_weights = torch.cat(bbox_weights, 0) + return labels, label_weights, bbox_targets, bbox_weights + + +def bbox_target_single(pos_bboxes, + neg_bboxes, + pos_gt_bboxes, + pos_gt_labels, + cfg, + reg_classes=1, + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]): + num_pos = pos_bboxes.size(0) + num_neg = neg_bboxes.size(0) + num_samples = num_pos + num_neg + labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long) + label_weights = pos_bboxes.new_zeros(num_samples) + bbox_targets = pos_bboxes.new_zeros(num_samples, 4) + bbox_weights = pos_bboxes.new_zeros(num_samples, 4) + if num_pos > 0: + labels[:num_pos] = pos_gt_labels + pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight + label_weights[:num_pos] = pos_weight + pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means, + target_stds) + bbox_targets[:num_pos, :] = pos_bbox_targets + bbox_weights[:num_pos, :] = 1 + if num_neg > 0: + label_weights[-num_neg:] = 1.0 + + return labels, label_weights, bbox_targets, bbox_weights + + +def expand_target(bbox_targets, bbox_weights, labels, num_classes): + bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0), + 4 * num_classes)) + bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0), + 4 * num_classes)) + for i in torch.nonzero(labels > 0).squeeze(-1): + start, end = labels[i] * 4, (labels[i] + 1) * 4 + bbox_targets_expand[i, start:end] = bbox_targets[i, :] + bbox_weights_expand[i, start:end] = bbox_weights[i, :] + return bbox_targets_expand, bbox_weights_expand diff --git a/CDARTS_detection/mmdet/core/bbox/geometry.py b/CDARTS_detection/mmdet/core/bbox/geometry.py new file mode 100644 index 0000000..3bc8dae --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/geometry.py @@ -0,0 +1,63 @@ +import torch + + +def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False): + """Calculate overlap between two set of bboxes. + + If ``is_aligned`` is ``False``, then calculate the ious between each bbox + of bboxes1 and bboxes2, otherwise the ious between each aligned pair of + bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (m, 4) + bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n + must be equal. + mode (str): "iou" (intersection over union) or iof (intersection over + foreground). + + Returns: + ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1) + """ + + assert mode in ['iou', 'iof'] + + rows = bboxes1.size(0) + cols = bboxes2.size(0) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols) + + if is_aligned: + lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2] + rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2] + + wh = (rb - lt + 1).clamp(min=0) # [rows, 2] + overlap = wh[:, 0] * wh[:, 1] + area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( + bboxes1[:, 3] - bboxes1[:, 1] + 1) + + if mode == 'iou': + area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( + bboxes2[:, 3] - bboxes2[:, 1] + 1) + ious = overlap / (area1 + area2 - overlap) + else: + ious = overlap / area1 + else: + lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2] + rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2] + + wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2] + overlap = wh[:, :, 0] * wh[:, :, 1] + area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( + bboxes1[:, 3] - bboxes1[:, 1] + 1) + + if mode == 'iou': + area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( + bboxes2[:, 3] - bboxes2[:, 1] + 1) + ious = overlap / (area1[:, None] + area2 - overlap) + else: + ious = overlap / (area1[:, None]) + + return ious diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/__init__.py b/CDARTS_detection/mmdet/core/bbox/samplers/__init__.py new file mode 100644 index 0000000..167044f --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/__init__.py @@ -0,0 +1,14 @@ +from .base_sampler import BaseSampler +from .pseudo_sampler import PseudoSampler +from .random_sampler import RandomSampler +from .instance_balanced_pos_sampler import InstanceBalancedPosSampler +from .iou_balanced_neg_sampler import IoUBalancedNegSampler +from .combined_sampler import CombinedSampler +from .ohem_sampler import OHEMSampler +from .sampling_result import SamplingResult + +__all__ = [ + 'BaseSampler', 'PseudoSampler', 'RandomSampler', + 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', + 'OHEMSampler', 'SamplingResult' +] diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb921cc8ffc030cebc6403afc344156c36833af8 GIT binary patch literal 693 zcmY*W%WC5=6t&{ScH&oBIxrOG1F{)IyDe?$FfgS|TUxr&M##RxG3XI28=6n*FJ;@+ z{6beFDH*$Cp^|7Yc6VH41K3$RF*XGf$2EYTvJ%KPnmQVOZ_}F*c z=TkAmGsmYq5Frj7pYce{@!au%FGP%E$3vco6sL|yJQF$24WGjTVn`r`402f35njU4 zv$s0_gUt3Tw2N=3X3tb?IBQLrK5Ev3`XoG22}+E@>8;cql@+_8+`iyZ>Cydit6u&Y z_pBa`{D%@{BN>cR{P64N@3G7rZDjq#bkDoi49;tP3sVa-3xS2uf*oT!H*mi?!bxdD z5WP>~e#F_v1sO!U5YAc`W)SVd_*-)bM9M=VvQc{Y#_o&1HmV0;6d2IOI^&1rnch*-#=St+Ee=+-+=Q zb+U6zul)`EOT0E0|Ak!I8Ls5YP7$Chb9Ts`7l*@d{C&3@{Pc0b`)xx0B0G-@{9_pU z5QrdxCM2a}nzAvYIA@77c7!7uNh5W~E+x;2aD{h4geR$JzIVo+U@ytgzXW|UWTyG# z>9b#C!A*}(bfNgPNM*6e#nB~20cv_LdAdsCZ2m&(GAX90w|yL79>dU&fJic?f{dBy zqW=pzc4VXWeNM+N@J-=g5a2!06M<-hL{qdNmcD5}mI-cT2@$q0V7(c_% z_kmQjA`z(=P+B=aS=9gv98lnZx`KdNdNlNmla;CIta2UAaBp#*nIMz%2v^F9xy^-$ z=0(JHv{E@N@YsZ1NCHxsbe&@!2+Cd8q9BrPNX5aYdm%hg=}NmL)EAAcP! z@>G5YG@GB6JR40<1=k}Dwnu3yWFc)dH9wl>N{%Lz{A9ForAHG;$=30~>eTpA6lZY} zMd(d;fDq54+uzw>Cxr)F(J3AfKA2NHPn$)YY%*kH9X3=KmQ0sx9nZ5!*@HG6NL?gP zRuHHZss|EQx+Z(5(B8y{*}MY_8V`!Fvpd&fsdt1j|0^7|54O~8B%k#S=kpZ^ebzU~ z99>lu9uFR%&Ba~FO2q^f^c_<6j?LITvSt-q!upa5=N_5Sf-Rw>-?Oz>k!3?En7dGW zY|fz6U07!oU3yjHoKn8v!im1LHz9=ci;MU-8S+eokWg+X zShG?e1VOlYju){G;g5$p#v2q`n#!yIg**%MS%^BJtqMSua1J)Ja0*sq!3(J$UYV-P z5oPL%1f$@3uJRIIg({1MHFeGX6S#thR~9g)YegO2Kg=h~!&gsirPqk|YIqnv#gIPn zcW}*6z18skvCMR?Pzkpl7I`?4^>(BPXDUyxs{y)vqWS#4_OQ($zCkJB6Pf6(yN6-E z!u;_B96w7S)NmsJ3H^LZUdWC;YPv9P8E3_#8ZDhEFHCcTzQ#wDJd@fu zAhH9m(Ef-T05M*f#cxWm1*jwwi@U~2`H4a#R9_&$mND#GEUUre9xAr34#2W?)-0rq zefTs>_*{>hz@h)~-Y)1NW*Hv{Ag|ElK7O-xD+PYKAq;oIH0*jG*r;5sq>3h3thm3q zwQ&M!aRh%4+CHrsVRAK(Z$pK;+q4>ca#nlClXYy{ UjQ>L&^p9A58lCSkpLLvn06KhPp#T5? literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/combined_sampler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12120b8cdcde332b003d0d89c3fc3dff3bded595 GIT binary patch literal 992 zcma)5OK;Oa5T5ngiJRCBj|&pNz!IvZaz%hZ2qC0ga`R>7csHrRkFe{gq{@j#>W%-w zU)n3D{)L{H@gpEO(3NI9J08#D`^L}1aPsQzA`KmYpYX?V(SD6$o}rQ;Spg+W7z61@ zUN|LBID_{fUFjV_dWv21#Fe83MB`hApG3?~-=sz@(sENMeT(_Y7cRGXA%*F@89I-L z_f}g`LBb>?&JC=XWYRhK371^D2f!+x9O2Wu0T+_CqMg01%Vl0E*_X?PWHbiZQxtQK zs)2^>;oAA+tYFW-<3J7HvlSzI55mQpbG&&Ha3D61M%FXijndk~6xJ6aukuC+P3i4) zQ=4HTJFV1uxJ0gXjVe~!$1r_Dv7^tIsa_jI;zWq7NR1KV2iz^6AJ6ON15sM4s!{Tt*0qitJ;6XtAnWj`PSLW{z9WI7 z1n4l7AK2B?e;Vm#p}2eYqGy=tUbT@Eiv4eGjElxZZKI}qZ_HZ8h0 zEw=gr=F|^S*{~-wQ$>Mmal{~EnugW{MY}t+KP?}=6L>yvee=k(!4m(KfjmtZ*%>ZJ hzOFiFUai|@Yd`p%zq4AP{TE%UFNr)uiy5$h{{j#W>Zbqz literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/instance_balanced_pos_sampler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28c3cf8ac215faa14efcbdbdd67b77c0edb81819 GIT binary patch literal 1305 zcmZ`(&5q5wscE)yK zdgMN3AAr~34R{t`IqfU#iE`4j%mqs>SGlUbseB{X=AXu!zeX92~RXcfKzFLdz+16^Ue__5$rw3kKO zHB6RHcRxH}UcpqSK-d@q@G3*0c%GFd?1F@{s49Ce0dH$q27PH#@k`}g&~*7c^L8`s)GR?0v(zFtU?{> z*m!zo!U2DT2FUIy&_lFAhxqmv6MTby{pm2fk7#CfGN7HqPfYSIDhGawnsD!i19mcC z6YYrn+n{=G5)%y`c*J$;G#K6RpIsFT69ZBV_OL>x=ua#!Ae&J>Lx;Iu){pdwehgK; zH$Yy%#S?c168NTIn}Y9rN5J=djFy@?L`wnxiPP1muJZbQ;68xL-9E%M>BdzN^Jnj( zJB00s04D+nv57CCKj5DH3+b*~x*J_RwSgA0+}el>e#fM+(MIPLr^*JrZ&_m_b<;C; z$86a13Rvqmj9b6r+6GeeoZ8^J5rwv-(yX-$Ikr$epvDArutMWIx+;kjh0Z?sLpK4-Tb zD>b7w-2G9=jk0kk)L22boHEYuD~g=`Myo7wOvfY$S*pfvr&H0mf;yo3Y&KzQS!4;j zg=N(Lh2$r2Y;S*CZ$-;K2g*15zToR}PYbnH5PIFVlxaw*5`5yER^<2>g@%ktbNdEN@zH;zy9{H8CONh8l8o$)XpkWby7`;p+6 zLdrE*?2Ym&9NVee%O}pneE@cO?382C()|#l@dH42*wUfe_~n$xm#%kJ2qea_m-_z# Da|>p$ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/iou_balanced_neg_sampler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc2c373e321a18e3a6827a55f63c4c6baabf5e11 GIT binary patch literal 3680 zcma)9TaOz_6|U-=Z|%!WCdp)X$@VTNjH5740@_ug73{KHv;sva;j%J9Jw5KS-FCa% zscuhZqVfxl*e4J|Jo3aFe~5>EYVS?e7D)G|KQa) zkk=Xe7u$I(j6cPj-319*$Ya*$`fK!!&Fh}`o5w;9Xl?V59eT3+AVS-Ke%?1j_bWE!p&dFeYJDrLgw+=e z+G}AQU*{`kv3>=Xn#I*G(qH{Bh=XJx!bf7XHWEBA8N-UVS@QHkl#J?i4^JS!jLsf< z<2;{d4|}~po=0cLsT}p@;jnk_!QJm2-@X6w-FwIQ=^cIk>t8(f?%%!lpj!%WRC#lm z&eJT2GcOnnQW-+Mm#1Dn7LoK|{n$Gh#%U`3p$rCjlqM%$I`k4DLJ@k8(=W2MFT%@o zY(j4+)7g&NXX7*zqzMiSFHWBc3GG3$(VLIui6?b*2RpqK#Sf8<(>U~wW>FH&7PGGQb(rcy^-kB>R>l@*erM}5`ICv9 zlHfb{ylChJaqKNJ$_qNguSve?$JXoZKJ@vK{B~(K3f>|K5t!{JF7<6f{q+#)lZul6 z<&ZQ`jgqSOCCZ2m91waccxj@6HyeZGEa!@?jESMRGKPk7{=_*`X2_>@b{k)FYGk)_ z^Y=XD%CaCl<>@D(G3AM6FwU}{(3-L_V`q=`*i_87lm%)l8`KFI>fCyy-O-M{{5PI% zrLeQbTu4a}QdE5(N0s}2(fTaQgrs8ov6QJSYTMuxt#Y`DWKrD&3CgTY#KXeGOyTZk zl&nL&qzy}#3^~(h0n!wxR`dNq9AugA|AW2yZEuV!xDT3)E*3%38(f4z*2|!(H=Bhb z7rOq-aSvD_dZ(x9d9N%_nf0P{;cwEzPsGSyuN==WWEFIk2CYw#CMzBq zH%MzEuc?MIKBcjn*Oj~C%9&d7@6gpyu5wn!vYB+C!Bs9O66ueaQI$zkHCITnryu=> zVZNm*&`^N}e3dz6f9CQ-WtP7W*|J3%V9UsD;y8po@VBwv27ha6f&W0;Y^xS*wo9F= zJ%X>xilP>S)Z3N=j@=IPi@E5w3v(8nm)W5c!vs_nHn4ak3KJMm7|Fb_p8;1z(TLMg zGzelJNL^TQl;wpPi==3l(CrV#X*3Xp9Y`5m6izUo3oteYX)*|Mk>KriWE1CBxG?Ic zXF|roJnL77i#Yblm`kZF&gV!o5%w#@g^cRElvZ#|$g?2scTg_=E@R=Y+XHyQtl&|< zmgRxWwQL1Gl!JUWCnNnT4T!Vf)>CEA()Mem+FS-jU7X{i`Q$!)F3A=0{z_@i2l{Wl zx`PGX{TZ+F4sRJ2QNGU&Zt*(4F8&-|HE6Al6_;Prv}HW+zCHEdVr4+EMGtvIaP~92 z*%1ylr(EU}Tso$7=BS2=xx$5KYRX#}TcMss{#eKu0GIqoSx=~Tkl`yfsiJ~xWSF)2 z1sx7Dstyg7{99hj8_2*0@)lPkR01GvvZ)2GYoSu|tXFK*NI73kzFg}uTOI8R2{S(TBu%ES0O4Lb=gEbYtY-&djF#3YN%{u z*;>m{+)B9?Mpso!xkEaMHYBNb4b-|Jt6C?1L#MWf-8k?`&*)m;1OFcQJKEAKZD~(+ zU};b5ZL7VtUN*T#a+B*g)kDgN$qlln+D~u+1`gP}Er<}I3+$lJJB6L6axg9`Nt!$t zGA$}2ln_;vjj|ANQOTYzguv1_z%M@pDa^rqp_hU0hcv7E%ncg7Lxk{NzDML5k!=H# zNA&Y1k@tyw08-TH+DMf>zW}_T1rmAzZL`7Lt&7Ljigc>`n`>()tKR9ATjCI(P(0VgiEjF!;>kd*LpfbSvZ zuhAF%)Zo_)0vrnvLr`M@l&XNFI$oM@5hwwQ2%2`F2;(|`{=wVfXvZH((9<>br@YYE zG*PAdOA^ck?k{!UpQYgb$CI?HQdRZH$20ah_VIVVbLL*ZpMLdhn&0X&_7B@=0p7nx zH=jUACV9a!COzpFeBostXEKms;V*(L;Ou)QBN?AD87pt?XQAYKY?z$GlI@;z$RW4g zZ*rsd^2M@H`rK>hlKuVaXh&DGF{xfQI+~jK3_`JtOO|=E|CAkY>C51(lld}~(HYAE zIgp9$oN*j^MW(Wgx9BMgSQf*9E!(~S!*@SjoT=%|hvSHLMvKuybljVn2M{%@dBfJ; z+-rE_H9-@uakJWQe970I*=hWF&@d5T3^9h~Z8zi3qnqsc{N-0D865C-v$R|-gsJn1 zGQ-%0X0=pWlMgl#Vph&-A#6O&O-uwC%XLsmf*6AFs4lW!>(F z?E<5U1MA}m+gnyf9FWbdDs4Qj${Ksvv{Vy8ORIf*EtgVEYLOeU)DjYQs5Y$@`EZ*V~PkF*qFTi-Ry%DFom$*C+RSy!`3uX)rY&1~=Jh+NASN<2k#|Y4;IUe`XKZuMFhq-lX7$yPBA7gR&|Wg3%clMi_!3Yh$+5 zO4_K%_Z4vNyHPw*v&pnJw&$W+1A6uR>tZdt3cHnqS5&`)Ei4XxvGsOUfY1oY*MNx0!|tbW_F@%P*=;tVNVmTHkg$X{t!*o z(Gp=Lr^!2I^bJx74^&>8EC4M{mO8)PjJ{5*TRRf(hYT^c7KxhN6 z_;q+C4lg}~yZQqXz$-w4tZ(!R{d{ z_O9y*mFF^FD3s?^h(#q=1-8xxaxVFz Qb^A*yQ-;(Mug81-f3|%4_5c6? literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/pseudo_sampler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b75a37650e92c4ae8a1af6be1d3f73095101cd7b GIT binary patch literal 1255 zcma)6&2G~`5Z?8F(u5*Fm3o1*B~(kmfdhgH2_b|O0X-o3vbf%@<6y7TU0WrQdm+8@ zAiUCEIrSBKff+l2wyH!o+R4~6JM+zavwk`r4_{qOlg~CGzsRNKfW8k~KLf!Drv-@# zx42!<(uysLx>MMt6FZc=CEVrS8R4F=R(9-idO$+|98QwSIRzO~v-LXB;vgyKg;4N# zr&Dv8eGJ)u9%K~K|wtfskkeG53Tby$1jKns#xdZ=BpAmBTJb-794B(#j9MR8=juP2GhwDQ2(iHncXv?|3DkTP2|iHy<( zPjsZ`m5$1ii&~_0UddjM(n^Ww@UZ$8>Ap%G%{we?{k%6{n1Hcd<~3sj1cImtUG7}L zyti?=8GHaoYVsX=(iH%$yg#Yxci7ib$Xf6>N>wVfln;&yz5i9DFss>G17c9e2wrSp z>|TS>!$z-LB2ilq?N*tk$hQBV8N5>D3Kt^BiaG+nfKrngZRrf=;;XZwGnfjH&WfD6 zmn>Fian}~FwPr4CkJDW|?{oWyb?Vna>$YHP?UuIw$~z6(0D9|96i$pnlAy~juNQN1 z{u`D;?BT$;b*0j&@nt2KLRH4kYG9XZUl>CT|qf6{!hnF03%-zm6sZmF1 zxh#~WQtq|kL1x6Jo&aYOb8!l@jPIQ{tyslhashOae)vZ zTy%Pt>X`u;10k{^HAaZv+4El}^SV?YLsql(Dy=5jTBc@VuxnD5Qd!m7@5Ym?)@pKm zTtAtZ4ooxAZP;S_eXv|>3U)w&w%|s4{QUL}Y!0sC$p|EBr0Ao6i*780WdMga{4_Ls zmjBAF*D&jEJblLoi;&iyGK`{54__X>dW(MJZ8&F*C$aZ8Z=XH&+nvE;u<=Y`~mZ33%`PkIv5oAS3L=0GR~CQdQO@C%P^ZJ6FjpEkL97K^!OB z_9JwaDh-xXoBX`Cwa(`EO_CGgXm?6?l;kEcMSZ3;=~W3Xak<7J$(c%4M!m4s&D%O5 zXu*Q+)Vz{;rp&<$wZri(=NGDSyI&ilG@bCB*0pwVZt}|5w91qViriS|RZAD9TBmCl zrLa0vE+XS~>3U|qIyotn>s7MT2wU%6O!11=7gzy1oM&qnN%a_RjDu-!3xn?CzNTRY zb7ndqt6-}%dKc53xP_4$b?1p}USt33m@*_-Z;!`#cKNVyLvHiOYCCBTG*y;1%MZtqrj!PPb#<lRB#l0$^S@!av&}Zj_xGV`NS%3m zKrluRyz~8=7+%wVB?PZw^-lyX@Iq!@M=*LN1W})m1^NQn}qE%`(c6GQYv#&^Tv zYyG&L#}9xgg&O-NskW>e9^=<`Rem+)M63GJ`n4qNf8P221(DZKo$m9#H}wAolqdUc literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-36.pyc b/CDARTS_detection/mmdet/core/bbox/samplers/__pycache__/sampling_result.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16d9395d8c5b9ae0e0a4efce62cfe45a9e4f4de5 GIT binary patch literal 969 zcmZuvJ8#=C5I&NUEIV-;v`HU8hpt{Cpk0f$K!IQ)7sYQI48Ok;jn&tWzuv!y4`g7RN5$wpMN$WnJ9p!m}C4R;%{A zwr)mJ&(rB(Yh}^7(5|zsc9UjT^3pj;@h}FBs%)hzn?8vB zaWQd$)z!73W|RRbe=iR1lJWul0=!B9amL5+Vv>ab?%y6;YyIA5iHl36aX0Xo@fJZE1Gy3>3 zJ7s6cPWa7>zenKW!7rz7)1sZ`YM6zmNIEES8cLjnQWX3^v&fc+ z;8>V;t4+5bfGMkf literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/base_sampler.py b/CDARTS_detection/mmdet/core/bbox/samplers/base_sampler.py new file mode 100644 index 0000000..12df013 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/base_sampler.py @@ -0,0 +1,78 @@ +from abc import ABCMeta, abstractmethod + +import torch + +from .sampling_result import SamplingResult + + +class BaseSampler(metaclass=ABCMeta): + + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + self.num = num + self.pos_fraction = pos_fraction + self.neg_pos_ub = neg_pos_ub + self.add_gt_as_proposals = add_gt_as_proposals + self.pos_sampler = self + self.neg_sampler = self + + @abstractmethod + def _sample_pos(self, assign_result, num_expected, **kwargs): + pass + + @abstractmethod + def _sample_neg(self, assign_result, num_expected, **kwargs): + pass + + def sample(self, + assign_result, + bboxes, + gt_bboxes, + gt_labels=None, + **kwargs): + """Sample positive and negative bboxes. + + This is a simple implementation of bbox sampling given candidates, + assigning results and ground truth bboxes. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + bboxes (Tensor): Boxes to be sampled from. + gt_bboxes (Tensor): Ground truth bboxes. + gt_labels (Tensor, optional): Class labels of ground truth bboxes. + + Returns: + :obj:`SamplingResult`: Sampling result. + """ + bboxes = bboxes[:, :4] + + gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) + if self.add_gt_as_proposals: + bboxes = torch.cat([gt_bboxes, bboxes], dim=0) + assign_result.add_gt_(gt_labels) + gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) + gt_flags = torch.cat([gt_ones, gt_flags]) + + num_expected_pos = int(self.num * self.pos_fraction) + pos_inds = self.pos_sampler._sample_pos( + assign_result, num_expected_pos, bboxes=bboxes, **kwargs) + # We found that sampled indices have duplicated items occasionally. + # (may be a bug of PyTorch) + pos_inds = pos_inds.unique() + num_sampled_pos = pos_inds.numel() + num_expected_neg = self.num - num_sampled_pos + if self.neg_pos_ub >= 0: + _pos = max(1, num_sampled_pos) + neg_upper_bound = int(self.neg_pos_ub * _pos) + if num_expected_neg > neg_upper_bound: + num_expected_neg = neg_upper_bound + neg_inds = self.neg_sampler._sample_neg( + assign_result, num_expected_neg, bboxes=bboxes, **kwargs) + neg_inds = neg_inds.unique() + + return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/combined_sampler.py b/CDARTS_detection/mmdet/core/bbox/samplers/combined_sampler.py new file mode 100644 index 0000000..25e820b --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/combined_sampler.py @@ -0,0 +1,16 @@ +from .base_sampler import BaseSampler +from ..assign_sampling import build_sampler + + +class CombinedSampler(BaseSampler): + + def __init__(self, pos_sampler, neg_sampler, **kwargs): + super(CombinedSampler, self).__init__(**kwargs) + self.pos_sampler = build_sampler(pos_sampler, **kwargs) + self.neg_sampler = build_sampler(neg_sampler, **kwargs) + + def _sample_pos(self, **kwargs): + raise NotImplementedError + + def _sample_neg(self, **kwargs): + raise NotImplementedError diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py b/CDARTS_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py new file mode 100644 index 0000000..bc829a2 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py @@ -0,0 +1,41 @@ +import numpy as np +import torch + +from .random_sampler import RandomSampler + + +class InstanceBalancedPosSampler(RandomSampler): + + def _sample_pos(self, assign_result, num_expected, **kwargs): + pos_inds = torch.nonzero(assign_result.gt_inds > 0) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + unique_gt_inds = assign_result.gt_inds[pos_inds].unique() + num_gts = len(unique_gt_inds) + num_per_gt = int(round(num_expected / float(num_gts)) + 1) + sampled_inds = [] + for i in unique_gt_inds: + inds = torch.nonzero(assign_result.gt_inds == i.item()) + if inds.numel() != 0: + inds = inds.squeeze(1) + else: + continue + if len(inds) > num_per_gt: + inds = self.random_choice(inds, num_per_gt) + sampled_inds.append(inds) + sampled_inds = torch.cat(sampled_inds) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array( + list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + extra_inds = torch.from_numpy(extra_inds).to( + assign_result.gt_inds.device).long() + sampled_inds = torch.cat([sampled_inds, extra_inds]) + elif len(sampled_inds) > num_expected: + sampled_inds = self.random_choice(sampled_inds, num_expected) + return sampled_inds diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py b/CDARTS_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py new file mode 100644 index 0000000..62431d6 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py @@ -0,0 +1,133 @@ +import numpy as np +import torch + +from .random_sampler import RandomSampler + + +class IoUBalancedNegSampler(RandomSampler): + """IoU Balanced Sampling + + arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) + + Sampling proposals according to their IoU. `floor_fraction` of needed RoIs + are sampled from proposals whose IoU are lower than `floor_thr` randomly. + The others are sampled from proposals whose IoU are higher than + `floor_thr`. These proposals are sampled from some bins evenly, which are + split by `num_bins` via IoU evenly. + + Args: + num (int): number of proposals. + pos_fraction (float): fraction of positive proposals. + floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, + set to -1 if all using IoU balanced sampling. + floor_fraction (float): sampling fraction of proposals under floor_thr. + num_bins (int): number of bins in IoU balanced sampling. + """ + + def __init__(self, + num, + pos_fraction, + floor_thr=-1, + floor_fraction=0, + num_bins=3, + **kwargs): + super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, + **kwargs) + assert floor_thr >= 0 or floor_thr == -1 + assert 0 <= floor_fraction <= 1 + assert num_bins >= 1 + + self.floor_thr = floor_thr + self.floor_fraction = floor_fraction + self.num_bins = num_bins + + def sample_via_interval(self, max_overlaps, full_set, num_expected): + max_iou = max_overlaps.max() + iou_interval = (max_iou - self.floor_thr) / self.num_bins + per_num_expected = int(num_expected / self.num_bins) + + sampled_inds = [] + for i in range(self.num_bins): + start_iou = self.floor_thr + i * iou_interval + end_iou = self.floor_thr + (i + 1) * iou_interval + tmp_set = set( + np.where( + np.logical_and(max_overlaps >= start_iou, + max_overlaps < end_iou))[0]) + tmp_inds = list(tmp_set & full_set) + if len(tmp_inds) > per_num_expected: + tmp_sampled_set = self.random_choice(tmp_inds, + per_num_expected) + else: + tmp_sampled_set = np.array(tmp_inds, dtype=np.int) + sampled_inds.append(tmp_sampled_set) + + sampled_inds = np.concatenate(sampled_inds) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array(list(full_set - set(sampled_inds))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + sampled_inds = np.concatenate([sampled_inds, extra_inds]) + + return sampled_inds + + def _sample_neg(self, assign_result, num_expected, **kwargs): + neg_inds = torch.nonzero(assign_result.gt_inds == 0) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + max_overlaps = assign_result.max_overlaps.cpu().numpy() + # balance sampling for negative samples + neg_set = set(neg_inds.cpu().numpy()) + + if self.floor_thr > 0: + floor_set = set( + np.where( + np.logical_and(max_overlaps >= 0, + max_overlaps < self.floor_thr))[0]) + iou_sampling_set = set( + np.where(max_overlaps >= self.floor_thr)[0]) + elif self.floor_thr == 0: + floor_set = set(np.where(max_overlaps == 0)[0]) + iou_sampling_set = set( + np.where(max_overlaps > self.floor_thr)[0]) + else: + floor_set = set() + iou_sampling_set = set( + np.where(max_overlaps > self.floor_thr)[0]) + + floor_neg_inds = list(floor_set & neg_set) + iou_sampling_neg_inds = list(iou_sampling_set & neg_set) + num_expected_iou_sampling = int(num_expected * + (1 - self.floor_fraction)) + if len(iou_sampling_neg_inds) > num_expected_iou_sampling: + if self.num_bins >= 2: + iou_sampled_inds = self.sample_via_interval( + max_overlaps, set(iou_sampling_neg_inds), + num_expected_iou_sampling) + else: + iou_sampled_inds = self.random_choice( + iou_sampling_neg_inds, num_expected_iou_sampling) + else: + iou_sampled_inds = np.array( + iou_sampling_neg_inds, dtype=np.int) + num_expected_floor = num_expected - len(iou_sampled_inds) + if len(floor_neg_inds) > num_expected_floor: + sampled_floor_inds = self.random_choice( + floor_neg_inds, num_expected_floor) + else: + sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) + sampled_inds = np.concatenate( + (sampled_floor_inds, iou_sampled_inds)) + if len(sampled_inds) < num_expected: + num_extra = num_expected - len(sampled_inds) + extra_inds = np.array(list(neg_set - set(sampled_inds))) + if len(extra_inds) > num_extra: + extra_inds = self.random_choice(extra_inds, num_extra) + sampled_inds = np.concatenate((sampled_inds, extra_inds)) + sampled_inds = torch.from_numpy(sampled_inds).long().to( + assign_result.gt_inds.device) + return sampled_inds diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/ohem_sampler.py b/CDARTS_detection/mmdet/core/bbox/samplers/ohem_sampler.py new file mode 100644 index 0000000..0711d97 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/ohem_sampler.py @@ -0,0 +1,73 @@ +import torch + +from .base_sampler import BaseSampler +from ..transforms import bbox2roi + + +class OHEMSampler(BaseSampler): + + def __init__(self, + num, + pos_fraction, + context, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, + add_gt_as_proposals) + if not hasattr(context, 'num_stages'): + self.bbox_roi_extractor = context.bbox_roi_extractor + self.bbox_head = context.bbox_head + else: + self.bbox_roi_extractor = context.bbox_roi_extractor[ + context.current_stage] + self.bbox_head = context.bbox_head[context.current_stage] + + def hard_mining(self, inds, num_expected, bboxes, labels, feats): + with torch.no_grad(): + rois = bbox2roi([bboxes]) + bbox_feats = self.bbox_roi_extractor( + feats[:self.bbox_roi_extractor.num_inputs], rois) + cls_score, _ = self.bbox_head(bbox_feats) + loss = self.bbox_head.loss( + cls_score=cls_score, + bbox_pred=None, + labels=labels, + label_weights=cls_score.new_ones(cls_score.size(0)), + bbox_targets=None, + bbox_weights=None, + reduction_override='none')['loss_cls'] + _, topk_loss_inds = loss.topk(num_expected) + return inds[topk_loss_inds] + + def _sample_pos(self, + assign_result, + num_expected, + bboxes=None, + feats=None, + **kwargs): + # Sample some hard positive samples + pos_inds = torch.nonzero(assign_result.gt_inds > 0) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], + assign_result.labels[pos_inds], feats) + + def _sample_neg(self, + assign_result, + num_expected, + bboxes=None, + feats=None, + **kwargs): + # Sample some hard negative samples + neg_inds = torch.nonzero(assign_result.gt_inds == 0) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], + assign_result.labels[neg_inds], feats) diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/pseudo_sampler.py b/CDARTS_detection/mmdet/core/bbox/samplers/pseudo_sampler.py new file mode 100644 index 0000000..b4c2ea0 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/pseudo_sampler.py @@ -0,0 +1,26 @@ +import torch + +from .base_sampler import BaseSampler +from .sampling_result import SamplingResult + + +class PseudoSampler(BaseSampler): + + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + raise NotImplementedError + + def _sample_neg(self, **kwargs): + raise NotImplementedError + + def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): + pos_inds = torch.nonzero( + assign_result.gt_inds > 0).squeeze(-1).unique() + neg_inds = torch.nonzero( + assign_result.gt_inds == 0).squeeze(-1).unique() + gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + return sampling_result diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/random_sampler.py b/CDARTS_detection/mmdet/core/bbox/samplers/random_sampler.py new file mode 100644 index 0000000..0d02b27 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/random_sampler.py @@ -0,0 +1,53 @@ +import numpy as np +import torch + +from .base_sampler import BaseSampler + + +class RandomSampler(BaseSampler): + + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, + add_gt_as_proposals) + + @staticmethod + def random_choice(gallery, num): + """Random select some elements from the gallery. + + It seems that Pytorch's implementation is slower than numpy so we use + numpy to randperm the indices. + """ + assert len(gallery) >= num + if isinstance(gallery, list): + gallery = np.array(gallery) + cands = np.arange(len(gallery)) + np.random.shuffle(cands) + rand_inds = cands[:num] + if not isinstance(gallery, np.ndarray): + rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device) + return gallery[rand_inds] + + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Randomly sample some positive samples.""" + pos_inds = torch.nonzero(assign_result.gt_inds > 0) + if pos_inds.numel() != 0: + pos_inds = pos_inds.squeeze(1) + if pos_inds.numel() <= num_expected: + return pos_inds + else: + return self.random_choice(pos_inds, num_expected) + + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Randomly sample some negative samples.""" + neg_inds = torch.nonzero(assign_result.gt_inds == 0) + if neg_inds.numel() != 0: + neg_inds = neg_inds.squeeze(1) + if len(neg_inds) <= num_expected: + return neg_inds + else: + return self.random_choice(neg_inds, num_expected) diff --git a/CDARTS_detection/mmdet/core/bbox/samplers/sampling_result.py b/CDARTS_detection/mmdet/core/bbox/samplers/sampling_result.py new file mode 100644 index 0000000..696e650 --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/samplers/sampling_result.py @@ -0,0 +1,24 @@ +import torch + + +class SamplingResult(object): + + def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_bboxes = bboxes[pos_inds] + self.neg_bboxes = bboxes[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_bboxes.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def bboxes(self): + return torch.cat([self.pos_bboxes, self.neg_bboxes]) diff --git a/CDARTS_detection/mmdet/core/bbox/transforms.py b/CDARTS_detection/mmdet/core/bbox/transforms.py new file mode 100644 index 0000000..580b9bd --- /dev/null +++ b/CDARTS_detection/mmdet/core/bbox/transforms.py @@ -0,0 +1,180 @@ +import mmcv +import numpy as np +import torch + + +def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]): + assert proposals.size() == gt.size() + + proposals = proposals.float() + gt = gt.float() + px = (proposals[..., 0] + proposals[..., 2]) * 0.5 + py = (proposals[..., 1] + proposals[..., 3]) * 0.5 + pw = proposals[..., 2] - proposals[..., 0] + 1.0 + ph = proposals[..., 3] - proposals[..., 1] + 1.0 + + gx = (gt[..., 0] + gt[..., 2]) * 0.5 + gy = (gt[..., 1] + gt[..., 3]) * 0.5 + gw = gt[..., 2] - gt[..., 0] + 1.0 + gh = gt[..., 3] - gt[..., 1] + 1.0 + + dx = (gx - px) / pw + dy = (gy - py) / ph + dw = torch.log(gw / pw) + dh = torch.log(gh / ph) + deltas = torch.stack([dx, dy, dw, dh], dim=-1) + + means = deltas.new_tensor(means).unsqueeze(0) + stds = deltas.new_tensor(stds).unsqueeze(0) + deltas = deltas.sub_(means).div_(stds) + + return deltas + + +def delta2bbox(rois, + deltas, + means=[0, 0, 0, 0], + stds=[1, 1, 1, 1], + max_shape=None, + wh_ratio_clip=16 / 1000): + means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) + stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) + denorm_deltas = deltas * stds + means + dx = denorm_deltas[:, 0::4] + dy = denorm_deltas[:, 1::4] + dw = denorm_deltas[:, 2::4] + dh = denorm_deltas[:, 3::4] + max_ratio = np.abs(np.log(wh_ratio_clip)) + dw = dw.clamp(min=-max_ratio, max=max_ratio) + dh = dh.clamp(min=-max_ratio, max=max_ratio) + px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) + py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) + pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) + ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) + gw = pw * dw.exp() + gh = ph * dh.exp() + gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx + gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy + x1 = gx - gw * 0.5 + 0.5 + y1 = gy - gh * 0.5 + 0.5 + x2 = gx + gw * 0.5 - 0.5 + y2 = gy + gh * 0.5 - 0.5 + if max_shape is not None: + x1 = x1.clamp(min=0, max=max_shape[1] - 1) + y1 = y1.clamp(min=0, max=max_shape[0] - 1) + x2 = x2.clamp(min=0, max=max_shape[1] - 1) + y2 = y2.clamp(min=0, max=max_shape[0] - 1) + bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) + return bboxes + + +def bbox_flip(bboxes, img_shape): + """Flip bboxes horizontally. + + Args: + bboxes(Tensor or ndarray): Shape (..., 4*k) + img_shape(tuple): Image shape. + + Returns: + Same type as `bboxes`: Flipped bboxes. + """ + if isinstance(bboxes, torch.Tensor): + assert bboxes.shape[-1] % 4 == 0 + flipped = bboxes.clone() + flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4] - 1 + flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4] - 1 + return flipped + elif isinstance(bboxes, np.ndarray): + return mmcv.bbox_flip(bboxes, img_shape) + + +def bbox_mapping(bboxes, img_shape, scale_factor, flip): + """Map bboxes from the original image scale to testing scale""" + new_bboxes = bboxes * scale_factor + if flip: + new_bboxes = bbox_flip(new_bboxes, img_shape) + return new_bboxes + + +def bbox_mapping_back(bboxes, img_shape, scale_factor, flip): + """Map bboxes from testing scale to original image scale""" + new_bboxes = bbox_flip(bboxes, img_shape) if flip else bboxes + new_bboxes = new_bboxes / scale_factor + return new_bboxes + + +def bbox2roi(bbox_list): + """Convert a list of bboxes to roi format. + + Args: + bbox_list (list[Tensor]): a list of bboxes corresponding to a batch + of images. + + Returns: + Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] + """ + rois_list = [] + for img_id, bboxes in enumerate(bbox_list): + if bboxes.size(0) > 0: + img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) + rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) + else: + rois = bboxes.new_zeros((0, 5)) + rois_list.append(rois) + rois = torch.cat(rois_list, 0) + return rois + + +def roi2bbox(rois): + bbox_list = [] + img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) + for img_id in img_ids: + inds = (rois[:, 0] == img_id.item()) + bbox = rois[inds, 1:] + bbox_list.append(bbox) + return bbox_list + + +def bbox2result(bboxes, labels, num_classes): + """Convert detection results to a list of numpy arrays. + + Args: + bboxes (Tensor): shape (n, 5) + labels (Tensor): shape (n, ) + num_classes (int): class number, including background class + + Returns: + list(ndarray): bbox results of each class + """ + if bboxes.shape[0] == 0: + return [ + np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1) + ] + else: + bboxes = bboxes.cpu().numpy() + labels = labels.cpu().numpy() + return [bboxes[labels == i, :] for i in range(num_classes - 1)] + + +def distance2bbox(points, distance, max_shape=None): + """Decode distance prediction to bounding box. + + Args: + points (Tensor): Shape (n, 2), [x, y]. + distance (Tensor): Distance from the given point to 4 + boundaries (left, top, right, bottom). + max_shape (tuple): Shape of the image. + + Returns: + Tensor: Decoded bboxes. + """ + x1 = points[:, 0] - distance[:, 0] + y1 = points[:, 1] - distance[:, 1] + x2 = points[:, 0] + distance[:, 2] + y2 = points[:, 1] + distance[:, 3] + if max_shape is not None: + x1 = x1.clamp(min=0, max=max_shape[1] - 1) + y1 = y1.clamp(min=0, max=max_shape[0] - 1) + x2 = x2.clamp(min=0, max=max_shape[1] - 1) + y2 = y2.clamp(min=0, max=max_shape[0] - 1) + return torch.stack([x1, y1, x2, y2], -1) diff --git a/CDARTS_detection/mmdet/core/evaluation/__init__.py b/CDARTS_detection/mmdet/core/evaluation/__init__.py new file mode 100644 index 0000000..b8177aa --- /dev/null +++ b/CDARTS_detection/mmdet/core/evaluation/__init__.py @@ -0,0 +1,14 @@ +from .class_names import (coco_classes, dataset_aliases, get_classes, + imagenet_det_classes, imagenet_vid_classes, + voc_classes) +from .eval_hooks import DistEvalHook +from .mean_ap import average_precision, eval_map, print_map_summary +from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, + print_recall_summary) + +__all__ = [ + 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', + 'coco_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', + 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', + 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall' +] diff --git a/CDARTS_detection/mmdet/core/evaluation/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/evaluation/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d6b10569be4e6a4346437ad7b474b040c2edb5c GIT binary patch literal 723 zcmZvazmA+R5XNC)Sr*uTcUz=POB1A|jgl0JbXA&kZY+zjc2PVSVOVLUyigt@FXA@U zy~0(Qfou|;0^-N;jWyrc!<)?}d;fG+KNdmoGx+D)Qv87)_l*DngeJgHgt!n3j6{U7 zh;b>Fn203g{KBj-73ma5W{sK1Fc&!%qL{SUY;Y^KxDz|vi#;C1f%GLLu!0oUkUhU=Ij5c9ff|(4gH)!WxAXalK5p4#gX*MP zQsBSprGYkY_`czX|qU!vi>zj7q zP`VLO_17}?#gwMi${1?2Gj@=T9p$Y27TOVS!f`G#r*C!o745oTG7<(2DCi?b%t#q) zM#jh)1!KqH>Gd1J=kng?)1k;#p?1&9{H@P+BAYrt)5hK8;8&T*$|rNPvn3nN zmTc}HZ{c{SFQXznhA-kIOzHjwJdno- literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-36.pyc b/CDARTS_detection/mmdet/core/evaluation/__pycache__/bbox_overlaps.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b76834c7009eb136c6366273c0f6babb7305982c GIT binary patch literal 1479 zcmZWo&2JMq6t_JynPi%zsHzHO4|qACj)aH^dqAk-L%`ma!-9kq?W%HPr%97|vi1xl z$aA4^MCysl{ssFl`O2xcJ#*S;Pm?CgShnBC@BRGz-uOYQl|1=AWPjHX`i9oN0OY4I z^&Tu7aXdji%p)G~@DuJ4j$fm+b^$*0$_AsX=(_;-U4sc^=s8S%NDwlFZ;flZnWK-` zpi`Wa)8Gu3$l#Ui>>&^KQ5k@>l0v{mCNSZExCkD8A*I)M5f8>h{b8clh(~2@8M}-e zhw7^VMuJLIy3-;PnqZxU*IX2jdHk91`he`Cl?J$dbOZ6m^($MlfoNPIc2OA{yuq{7 z)_In;if5^9^3>1qoaC#!oZ%Pf&wtAXWFH%38r<`(&hpwBDU+4ov#tDI1OFEI>ps4^ zCg(&U6PpIqvkvuZ-$3QI z+%dItqT7{S5>qd?P1|h$giHkY@;w-kLv)V+diWMOta@mo(+2b!`kOXc2gAR_CjDZ< zazXQ9l4kFOqJ;!8rI`Z2 zU=y%9%;n|5gG`ANl@*ewRqnrwyijs2eOG`VlY-pGRqhI29dfU7prCjt#Wv#kVkUll z^XkI6E}ZqkJ=m0ZtnzYV+t0KX%H{o1sZ7}#(=N^uIkWL#lCk_j*G9fpHu@k`rfsB{ zJP|gYviWFQOhFh)FW@)nH5*@6dMR{m=&EHymA%(C>}L~L-F13(G4BsORm7BF_pRT$ z7LRN^KhilP}@m|l)OyaT=+vV(8p8VvO_Fr?TwAq{LKi|K5k5-@s?zsTA`!C~u% QP{Lv&9=k4T*Ux(JAMasu8vpX0^K#oPB4s>Y$#Y%GwsOF~T;)vyNRRI&r+xGQJxCAH8)-kii4M@i^awpl zkJ01w1RbO&>CGhQE%X#UP0x@=ZzV}S1w^E1Od(AuqC+&Lm=a1!Q$~l$kfodoTBR8s zp*gM5v$Re}>236OdI!Ceo}***JiUwFP4A)i();NB^a1)HeTY6xAEA%Z$LQnq33`D( zNuQ!m(`V?j^f~%Gy+~i6FVdIj%k&lcDt(P!qOa38=$rH{dYQgW-=Xi)_vrid1NtHT zh<;2zp`X&v=;!nc`X&8}eoeoj-_q~s_w)z)BmIf~On;%j(%r9nxnPoAYN}5Dc6vvv`JPIaysxbD55x(!euQbwV>For{*U%gi#WXw38i*^8vz}^CxdQeG1&YEbmoZ`*8xjMG>|`dPDq|M0-Eu?#R|`|a0J-Rsk)cjmA1yLPB6@_W*#u&w zVgRkdz_ElU;VGNIf5gyXX{Ki|hG>qXP%Mt4xUwirq;fGB$9jg70-i*B9OKkwbF7cx zO~dB&0B9+|-{=rS&75D&u*5KoFh8>dZwhrxW9(h@!Hp{ouyD-gfv}LUpN54xVS)2P z84RKxnK>)Q@isGZF8ULhzz0HzJ75#IRTr&=Ex6AxLIzT_z-D6mMCRTs$|s&gLvV2u z!44B83HtKc|1pb-6C86iiLy-NkTNN(Sj4HKHJ!5ReQC@V_KnU&=fH5$iVkQJoycN1 ziT)wzckT{1m} z*qOSq_T7=X34Aby<~xZr(2*D<5!@(4HdSTc@iV!K_)emDiZdZgBuF`S8nM@73HWC@ zaLL9cFbpj0#Auy}R*I>`AXUK>dO=1NE$wUMBW+{~#k4scLHP)#0CC{MVhYO%@JN19LGV@+9KHS>I}KRvUxX);Pi|IS+uq$k@S$@*?nOPEEUUK zdDzR8vtgn_>;w|ggJEoh{EMlzL6~5E9>H!&1`5JHjLO$kT!M)Ji$=7v0@IAL0zS$< z$qTfz0?D`R7}!37PI)Dmz6r-F`$kdat>wt5v5DYf5Y-bjaBDLkAH!#47LEDjA#8Sd~qf zP_*r|NCDKAO@*2u1^9@8P4z6%egt*&n9mTOV$o`ud4LDlg|{`dItZ{!-j~_NGj?qz z^CFgI&mJx!V}O=Di%@f(JA}986OCKc2cMC-84hg_fZ&z0A;d)*a)G9- z*kWf<0@n+@++fcZlg!vkT|0+CLgXk{1c_k|T=QUP4(wsBQaEd+yq1fACa@o4S^H%- z*KiIn3s`PeURk0*`1t5W|H_=&@ zK>=9otVS9bCkCrJjsf6oyWtE=#L9~dtkS74R*YtFKPa&(^ak!Jz+OjH%`lCYLg~oXHhTxL7w=GvQD$D@?9oavc-4zPXXfO-ycP zato79ZEte_rT5hcz+ZVRzhb#Ws5MxttaWo6o=(x+0SKJtUIO!4H243gki!G8&cpj1 z6jwIk<5enk7UP=9sgG|IKu5#(N^=8<7>y&NyvZB|1d0O0xXwHqg{fG`q?vM9BS#|s z&*e9`W-~)3&cM!&&Qc33bEbBF!73KrFyi1<<}7~dp&kM9#8Mz@rvy+oyPe!3?FRr< z(FOEcE*)DyK#Q#a;e1zN4RL2Q`371{BP0gYu2oSA2+76biKj4T02|mT!nb<4ZwoW7 z*oEV$Q=z}%YmykaICfx$n_GdK*BIrSXiw8<%z&6_otpD7(wxoY%xcd#!37ThK#lu6L}B(y7_ySgyCrc2!fh zR$1-HNRhR;n~DK0p~QEEld3S(-jHjZPkh7m!)AbOaxTY` z#lcoVow*ymWfQknWPfP`lv~WVL9V&UHP`OO#TkbmoeZ905XE-Y1Pr92iKsrQ8&4;? zxu?ZjLj8o?hPInnWb#0lTMzErv-gpEQTObTIW&-$@PUnw3!;B;&%Ua6xo!9yW-pU_ zncT-@A4tGoLimpZfZLK%f z>+7xCYlqJ}SUbFUu$DItHP>s$>W0>9>-A~JJeIdGqINbw=S;|twhyo7ZE76q@Tg<; z=j!RUyi3hPoxE2`Shn5<0JPT5zfa{$4&|2Ihm$2eUXn{xopRUOV8c9DEU%sQcxqIj z!!%k`xWZRMZe4oz=-STX4@;;QbG(kMf7Oy-(ZAd;w;Uhg$vA@kY}rOcCbn!P zNw8X?I`;5R+2W5m<=_c?(NTL56{hSUvJ!kJ=|48lj*D73*yT>yJwBxvxH)(8_Dj^M zpxqOzIED*p+-vMfp4f!maqHO3tj|DT`J+dz(W-BQ18!|_#@yD}+Spb%PeQ)jGY_G0 z-;Lbo5tln%8gPzj9^hUcdD1%0n-;Nz0<4gjQrgC E4`L`GU;qFB literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc b/CDARTS_detection/mmdet/core/evaluation/__pycache__/eval_hooks.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea4c15733e3a9cad2e9ba044e7d9dd7be315ef28 GIT binary patch literal 2534 zcmaJ?O>Z2x87AjzN3*-KtjJEBxb7Z;v>UWi;Gk#^f@36hQlx;;HU=CFnhr)o?rKK! zu_8H&q|9ErzP3qDEsFku9CFK#=&3(|*PeXq*{445tmHZ_kXi60m*o5Xz7O9WkE8dV z9i`t!jQyLv@OfC@$IwS0f(c%-cs=vNiQRt3i@kpKW51t+Vi1Q|Ipszb#Zfhkhn#ra za#W4ualiJ;&FWfwjkAxK@I~;1i9n85ZoDPexZnlx1q99q24IMye^d z-mjbH81vvkYEmstMwhQX7mNEC`YjO2VlG(hhz;Qg_X&r=p75VA7#xTJW?zI*7unH+ zT$_idX-T4=a34kW>~~%f zbX-7X&R0%RiyFU@#^)P^2IJg<|cWwi`xjlT?k6k1p zGc|%}wN2y|BBc5orfy(*_VM0PQ^~ua_58d|>%Ht;q5KehkN)J@QWKSW_b6CCv)gNzVYSw z0JHuV640@NQBDiwPH$6YI#hh&t{CbU74U=^&-geGD3cG^iZ4+M#Xz{njyf=5vGKpt zrF-_B@#yqw59?^m&b!5yz8RWP=g&D6>7N}lE;fs6D>jD(!o_)jovju7 z@;2{nnW1OIK!pEry5O(QU-)wH7+v-|_Ivgz>jtPRZ=dc(%dqpr#!d7eESo*Iadvjw zc*5;Mv9@Hi-00lmI{A03b>g9trd4%PHd@C6 zB>{t_eELstJOx%W%cF~-k<}vkV!69GE~j_wraYDEJh{}W`xZq113XIWQSVq^rS)7| zx0H3?rPjm7vGuAdJGH~#s%8#7`u$Yd=<`Ma3Uq!U6~#=EtJXJ-%8sm;wIa2fS*w(+ zO(NuJo&l)qCYh^L*q~hiLZtN<3P3^FqKzFKrb^{fS+~&(>lIC2+Ym~Q6Ok)L*|1*O zfZJ)b4V0{!Qvl6n^GgV6tHpGzsLgTQ)-^;?RNB&L8!lja0u9#9#ThOGKk*WQQaDK* z-LuhQYOOK*|a_i?PIc+1{N)FrE3`q!@)+}0s+LEa_p9X!3)GvwUS0HhpLyD{Q z>a;@fp%7mRuOclEF*H>+L*Q;AOj|Ew_f;_*O!>NV0(t{TXp-0{p=Yu!X+27klQu2a zC+by*RPPb#ouPN=W&IRXdbKnyLMIxBZ!20(ud8=4*+GLBCpD;iqG}I&>P;d)0g12o zU9n4LuJXgyNMVOB{82xrOJnLW2vKQS%Cc{#zOn4EKUi0^qRiT@wY=8krQW3??jP&k hpcf49{UB7b;r`n5djxbnu;B?rWzj>ph3Xo){{>x_kTC!N literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/evaluation/__pycache__/mean_ap.cpython-36.pyc b/CDARTS_detection/mmdet/core/evaluation/__pycache__/mean_ap.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f70d1eea9e53d10ba371ee090bc9e6867e530d22 GIT binary patch literal 12906 zcmeHNNsL_Ab^SxlQ&)EvdtyuaZc$u}iOuHFMbM%9cG7kj|X15NM-x7?T7VFuXWGV6^qxd^$zO28=maA{Qy<+EibG$I&KhnL6mT%=QeC2 ziq8yw8T?+sFF0Xgs%sIXq7;lq@pk-(s!&uQduIiXWcdBw{(dBb+k=$X$pB6m6*y)BQL8` z%24U1eP|9caoSWVPG=9^3UeyEYYT8GtR+LFMCs7JU|iiAjhT~}c^n3sPqu;%D@x|76zeAtDd+}Z#6>fk2D?UcvYx(Wl z=!GVE!|C~*Th1EI*qY<|!dV*bl&?zJlTq@vCwoDZ=7FC*BXh)YqUYm+Ozi}r z+t`be()C+jlF;}nJdVmA zy?%11*Y(aI^jo*~UH@d`mT-fULBAKA>~?W3yao+i$4@qT(mUzh0JE;n#h$IZ{muTZ zXf{5<^%2hn&<|cfU=$N(&P#bkf&=8(hwr{UPfLMxt0kbgfxo|>LmlgL9{c}#~_Q?{du_|9bvMl zItOa`7UJ@UB7y5Rxn=lD0T2lL8@Nt4_f+c#K3H{DJ!&UvVO2Aw*Q zm=w?#_>LLVh71aK+JV#V0uSI)Zw^Ixcfa1 zk*T*^KCZV(YG33Wfvz4-b;JwtGiG#;dvGpzO&6f$oU;wlFsdqaZoS>xuj5h%%pvg! zaPY<6S1@qt1v|ZtaKfH*&2t((KWIbVlOPS8t{XOX5Ct$Ak)xiwo@noPS>$zG2@n;p z(t6MZ8iH8F9PQwGc7i^@mD_RTzD7{gc=#D{dusEu<4GoAT4!;EY3{Ux@JhH35O&p3 zJSwpLCJ8zkJg+O&4`SwHIpK+oDQ??70~-ti<8iL6ciTQ72BhHW>V`w9Gn_hu6mZAY z&FOiJx77n6gxR{{_cvn@a`l|E9`-k!X20s(-05LsdL6Ip0deu>J;k$idegZKjHMxB zuQ|mg^qd`31&DO88K>}LH{L!maAEns&4^}GW+9U9;piAG{O~#U(!B)!s@C>4{TOwO zgX)YVB`V5ep&Nj~K7Lw~&r){c8niYR>QU;31{QY5^JRq!vmOutKr>pTD4oXCG1f?9 z4+H=@QI2s+z0=po<{N z3?VhJ6To#V4CET|WQG06YWAZnb<2B09;5sWc;z%=?tzXaa3X?Q0+erZxKqo-X9TR$ zcyw3hUX(+f2C1l?U{07b%}$g-8F$3ki%waTi_Z<%ip*V1DlQ;KnT&vRuufbR!+x`0 z=X=NZ!p|cs*g;?{CICxTtdf-jM9B~+u{C@_+FCQ823&a@`6crhzAI+RT0%&*%T@*u zrvz}5uu66gE!WM0nM)BBqE(`wRYq>=xoUVU^+tixfv#WDbZSoWLSY;E?%;V3A5JvrYL!P=azi z#3=qdBc!D9-4H2Fps7)fM`9^R@dcm-!ZHgnmN|0p?`jSvg@1Wyzxk*XV{N+gsPEkMzL7t|7uz@6H znd}ZNNLzW9a-Ky{U6G%q_~$5i4ndTTcZGbOvVNO_a}-)T)s%L zmnb-ofW>xDM^Cjp$ORC`-0jO$fwXmbfr3{lc#VQDPyma%@lc^$V4)nNk?){Z@Gb&l z;h{1)L8gTXnQY~(0={E;OwyX_lwt`?lDNc@eL@cZ9_WvZ+yv-o4hho*-$(FGqiG6L zSnp?)ae*{ey9EO^e#wIm6`6Uz1`9$;*vrO;td;FqWJlOFke9?gordu>rSHx|(?ssB zDUf#v3&37hehamr!;M;IRFYd}&@waK^14H1Az3yG2w_{e13QLkTz7DP+A5)vO&f;R zqB{@!+Us6ON*t_v0aOxTV3^0C)wF#L!^z$X2=6)#*tuLFbT;@uQiU5*(Ha4%1y`X7 zTyndxR|d{|1pKl7{q^d5yv3W$m~cT2pEGfI$6*HIyf!ZT&@;i5i&o7iTjVO>xoO+TE3K>-Jyxi0D!$DdYB&`ZXn>mb4I?Tb&+EzQGHK z4C_a!j(g4P1gud+DXq+H*NQrK2+;YV#tY&kt0MZwFj*g|S`%4Goc(^^^F?H%f30#P zS-0VfqMD8J1khui0QJXfC4V1`(+C4LnMY}g0pCZZI1)1u*=Q7{NiQIUAU~lU=vgzF zl4PJgbKfldgl1>~fsvadu#5v*2HHm*CuNlY-b6vw3m{97A6N}KM|&YGIP6W zRSq+%s7h+)nxXO8rRVf1nm{bD6L<=Gme(Oxcuz z4G9lTqZ(|H8>RU1PS*e$T|C$diywk7+;*qX_Y!`~u$@)Z67>o`XJB<(hRtzNt&q*} z&>TGz9RTAQ*MIsp)ha7I4-M#%iMbpcMcyp1wB~Vngu_>nKd0vKG{z-rl}cbiq}=)Y za%q<4)tsf=GABNWhbhvmY33-S#*`%v0?%%@H|Ehj#@SN`Xi&{bnH*^Cp{DiXU$`xE!r|AmY zfNTrS=os0TVk^^>?T3s)y?q!~9J-VtB_XO{XNhf+cqt_DNiS$^-+!Re6eYZ8J;T!B zegXAy1mo+nhu1y_hSm_KsPUZ)gBENb4qK?&q1VO4Om#|}-N(4=YkTUZ^JVW=z%w~P z4+fL*8>$n3o^jT+=vg~QFQ)xlu{CUL=VVLjRL|5D?X;x&S?$J8tq*h;3zRiyz1OEl zCbt7@@o)y%(E4ZwJ{hGkOZ-m!F|D5ZP5D!{c%FCIP0z!-`PnlkPH;v1O&GdqqDRUanWV>C@K>t~#wRwmALAvQtjG8p+mJ8lGwt|< zAn}kqY!SPiOxuq!Y;1lE+<_61Dz}?WNnre>HCIt84Gk1_G8%c z+0TZr3|wSWI~Fg(U02@PbS`j~OagUsKoc%z*8Q?Xc+XrcQ7F@o_SJ-P8r{`j{J)ZrUW_Fm?F^Uc63L*Fn%rb>^ zm+B*=P2mhbr!Y3X++M;jAQLcS>{Y_p0*FDF^Yj7<$wRTGr7?Ha))!A1cx~JPDv%2Tzdgh@1o;Lsq9pHatyU9St|ql6 zPda^+(q0DS0~Wj7$S09M#Vx5tykygA5iMD~L?$8S)whgEasWR0n1 z*+|pe_u+R!(FrpVFWDEVCYyNJ;`0h6$5tcSj`C#+dK5fCfkVM(D4+$arSOmmN0VBf z{^KVmyzprE6X>3pxY1U?n(}$!Q_1>fW_S^ZE~ih+Ned&k#NCd?-g>VOhHQ`=g9l> z$U$ zd7NxPlc90IqQx~vc{$`|C-bg5jO58KfHvFE`3q`>_{Q7G4C{|3rNtb6^PrF( z=0i`VG&yl{y+afImsJTmY?-ws^ZH+5v`C8zoOtO=_2HOF9JE+74ChJXAIypcHCwNM z$3-=}dt{Mz3iAgq4(N{T`n&9^_p{=KX`k;)PuVS@jxJn%6eYEn)}PQlL2He3ea zdcc=FTLR(DJv1`c$Kp>!~-pJ59kV1tg#uIY3}{8#kgY9^(L^O_%0ure*K=1)A z&KD3d<{?8N>mc9AkKctNx)O`f-qo*-T_jlxFwPs2eH-pujD-0&>X1Vx!GIPAjD>IF=a5zt#+ f0dtc9;wWGy3U=FN2{4{vO5FiwL6>;z}lvKRO6t zhW5D9t)OjjJ9NfQ*Fn3&o%ghEl~=iox69Xfoj2ZN-5P(6H~A7;b-v73FxKF!{1o0z zuHV*LrypTet;M8sJJ_EFQ7`DA<6cjC!{Hr&Ck)W5PQ)-ux5Iewh~>hKot^k#JH8)? z(4Qnd`ULu&!sFn14NvkL$Y$CIAGEH`jFfSm8}EU0Lz}ZXo0+LTGBP$YGqbOwZE^FV zKC?bx2g|9QLE?t?0lTZeqtEPFg=9I+at{8KRx*P|@BU#{&9tn#PL9&v*SVHe-q-v3 zx;Ap=+RRn@-F$t_tYz-V92`LBLK2$$vpV`NIrt%)H8Rpw z<7^?LXAN3s*36nC7i%qL&1`91%W7Hus+KjVJzM6`nJwkE_4TV7cQQBcWy{px|Lt4a z)0VuY!7eMapUm37rljhSY-E!ghI6=XnT7^=%?c`@!ZCfoCX`{C4AToLsAFlVUqUZ@#I>AtRfR_?daTgE|Rpb zhB0K5YiN{$WEf8mCxNgajh^Wr43kHH#ZN2|XGv7>l14HeOFf!M>nIR$BF$uY6v#?H zjQ#ZTC0Pxk={OL68c0j{(IAj!FP(XLHm{GeN*bBu`5f121?BI=$zu3;I?z_fKWh>%uJi<8N+#Y#5ZY#>uSV^E@1{!dVZLn5y%@# z4@)iRxu0_jX*0uUre{WsZ0Qb@VT9NKqzd|Iu>vA3UkLwD8UR%BSuk3rc!Bs%xqxhLpIBX1 zX~)#sPjAaI>?n3-dxEeVe;JyR=Rq{X#TH;|bW?Y*K~H+3jnR)ugv{*Gk)U4_51j(F z8?OQZ$nWr+m09G^IdC&Mo3eLVs*|_ogvZ6^R+<9m+2Gdv2tAuk}cJu$Qh)Gw?UBgcEnYL+-+@nzDFD{?t6-rP)k9qw-H_NuCx}j%Yga! zbnKB1FBaYgS}%WEo5Ce;1Ci=wURfQyjV{@CC!Tnn#W_j5H1>7^uK?|U=jVaGtPEan zLqXtiRf<8%z7@ae;cx}XZXEKY?cE6a{xnRtyo>FZpJ+n;F$iESy@%1DeKYT8UXlu^ z?Zi<)hV2I41OJc=Qe_Jim#$yDxzasS#01|x!<&FeAw>?B7i61jvPbEJ@W|tm1U0)JR;|b zd>%yF2_^NU>(I(7^VBKRmD9TpwJ6|J7875fiIi-PC80V6SDA>eV=Vb1h;|BzMhDIT zQL(-VzF`CyNvV)8;`Jo%6qp1+$zcZv2=!AASlsK2cK1gD76+R ze=)rmqvU$#bc`e$Cl`Ij{Qo=PA^bwwO_}_K&vOm-{mUl)Qdv z>4Gk9(!d#e>fi}FE+577HL7n$ng+G$LIBc4+oR8BxM@l_f&h!7S^O9e9YD14y0Ng+#w1K7>pvARa*0$D8WtzxXPA%|;}w+RNKlI6-sG^e2qqVI zC-DQw{4W9rRWcG3(uKiIdM^S8b$bywsA*NpEy4!D#w*VV9OvjKwv;DW@rOXWf zQK$^yO8oxvO3}bw*no}V+hBewV8nNbOTi*;68SEX4w3H>xkcprL?|M}SBTsODFZ^4 zIWG~H0z%v&@+Og!7*d1;eO4aZzlO{|3yHTN(=s|@6YmZhola{>E& bboxes2.shape[0]: + bboxes1, bboxes2 = bboxes2, bboxes1 + ious = np.zeros((cols, rows), dtype=np.float32) + exchange = True + area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( + bboxes1[:, 3] - bboxes1[:, 1] + 1) + area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( + bboxes2[:, 3] - bboxes2[:, 1] + 1) + for i in range(bboxes1.shape[0]): + x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) + y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) + x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) + y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) + overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum( + y_end - y_start + 1, 0) + if mode == 'iou': + union = area1[i] + area2 - overlap + else: + union = area1[i] if not exchange else area2 + ious[i, :] = overlap / union + if exchange: + ious = ious.T + return ious diff --git a/CDARTS_detection/mmdet/core/evaluation/class_names.py b/CDARTS_detection/mmdet/core/evaluation/class_names.py new file mode 100644 index 0000000..87fb239 --- /dev/null +++ b/CDARTS_detection/mmdet/core/evaluation/class_names.py @@ -0,0 +1,108 @@ +import mmcv + + +def wider_face_classes(): + return ['face'] + + +def voc_classes(): + return [ + 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', + 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', + 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' + ] + + +def imagenet_det_classes(): + return [ + 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', + 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', + 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', + 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', + 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', + 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', + 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', + 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', + 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', + 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', + 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', + 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', + 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', + 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', + 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', + 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', + 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', + 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', + 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', + 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', + 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', + 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', + 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', + 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', + 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', + 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', + 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', + 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', + 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', + 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', + 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', + 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', + 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', + 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', + 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', + 'whale', 'wine_bottle', 'zebra' + ] + + +def imagenet_vid_classes(): + return [ + 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', + 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', + 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', + 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', + 'watercraft', 'whale', 'zebra' + ] + + +def coco_classes(): + return [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', + 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', + 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', + 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', + 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', + 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', + 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', + 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', + 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', + 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' + ] + + +dataset_aliases = { + 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], + 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], + 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], + 'coco': ['coco', 'mscoco', 'ms_coco'], + 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'] +} + + +def get_classes(dataset): + """Get class names of a dataset.""" + alias2name = {} + for name, aliases in dataset_aliases.items(): + for alias in aliases: + alias2name[alias] = name + + if mmcv.is_str(dataset): + if dataset in alias2name: + labels = eval(alias2name[dataset] + '_classes()') + else: + raise ValueError('Unrecognized dataset: {}'.format(dataset)) + else: + raise TypeError('dataset must a str, but got {}'.format(type(dataset))) + return labels diff --git a/CDARTS_detection/mmdet/core/evaluation/coco_utils.py b/CDARTS_detection/mmdet/core/evaluation/coco_utils.py new file mode 100644 index 0000000..3022ad0 --- /dev/null +++ b/CDARTS_detection/mmdet/core/evaluation/coco_utils.py @@ -0,0 +1,177 @@ +import mmcv +import numpy as np +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from .recall import eval_recalls + + +def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000)): + for res_type in result_types: + assert res_type in [ + 'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints' + ] + + if mmcv.is_str(coco): + coco = COCO(coco) + assert isinstance(coco, COCO) + + if result_types == ['proposal_fast']: + ar = fast_eval_recall(result_files, coco, np.array(max_dets)) + for i, num in enumerate(max_dets): + print('AR@{}\t= {:.4f}'.format(num, ar[i])) + return + + for res_type in result_types: + result_file = result_files[res_type] + assert result_file.endswith('.json') + + coco_dets = coco.loadRes(result_file) + img_ids = coco.getImgIds() + iou_type = 'bbox' if res_type == 'proposal' else res_type + cocoEval = COCOeval(coco, coco_dets, iou_type) + cocoEval.params.imgIds = img_ids + if res_type == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.params.maxDets = list(max_dets) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + + +def fast_eval_recall(results, + coco, + max_dets, + iou_thrs=np.arange(0.5, 0.96, 0.05)): + if mmcv.is_str(results): + assert results.endswith('.pkl') + results = mmcv.load(results) + elif not isinstance(results, list): + raise TypeError( + 'results must be a list of numpy arrays or a filename, not {}'. + format(type(results))) + + gt_bboxes = [] + img_ids = coco.getImgIds() + for i in range(len(img_ids)): + ann_ids = coco.getAnnIds(imgIds=img_ids[i]) + ann_info = coco.loadAnns(ann_ids) + if len(ann_info) == 0: + gt_bboxes.append(np.zeros((0, 4))) + continue + bboxes = [] + for ann in ann_info: + if ann.get('ignore', False) or ann['iscrowd']: + continue + x1, y1, w, h = ann['bbox'] + bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1]) + bboxes = np.array(bboxes, dtype=np.float32) + if bboxes.shape[0] == 0: + bboxes = np.zeros((0, 4)) + gt_bboxes.append(bboxes) + + recalls = eval_recalls( + gt_bboxes, results, max_dets, iou_thrs, print_summary=False) + ar = recalls.mean(axis=1) + return ar + + +def xyxy2xywh(bbox): + _bbox = bbox.tolist() + return [ + _bbox[0], + _bbox[1], + _bbox[2] - _bbox[0] + 1, + _bbox[3] - _bbox[1] + 1, + ] + + +def proposal2json(dataset, results): + json_results = [] + for idx in range(len(dataset)): + img_id = dataset.img_ids[idx] + bboxes = results[idx] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = 1 + json_results.append(data) + return json_results + + +def det2json(dataset, results): + json_results = [] + for idx in range(len(dataset)): + img_id = dataset.img_ids[idx] + result = results[idx] + for label in range(len(result)): + bboxes = result[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = dataset.cat_ids[label] + json_results.append(data) + return json_results + + +def segm2json(dataset, results): + bbox_json_results = [] + segm_json_results = [] + for idx in range(len(dataset)): + img_id = dataset.img_ids[idx] + det, seg = results[idx] + for label in range(len(det)): + # bbox results + bboxes = det[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = dataset.cat_ids[label] + bbox_json_results.append(data) + + # segm results + # some detectors use different score for det and segm + if len(seg) == 2: + segms = seg[0][label] + mask_score = seg[1][label] + else: + segms = seg[label] + mask_score = [bbox[4] for bbox in bboxes] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['score'] = float(mask_score[i]) + data['category_id'] = dataset.cat_ids[label] + segms[i]['counts'] = segms[i]['counts'].decode() + data['segmentation'] = segms[i] + segm_json_results.append(data) + return bbox_json_results, segm_json_results + + +def results2json(dataset, results, out_file): + result_files = dict() + if isinstance(results[0], list): + json_results = det2json(dataset, results) + result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox') + result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox') + mmcv.dump(json_results, result_files['bbox']) + elif isinstance(results[0], tuple): + json_results = segm2json(dataset, results) + result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox') + result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox') + result_files['segm'] = '{}.{}.json'.format(out_file, 'segm') + mmcv.dump(json_results[0], result_files['bbox']) + mmcv.dump(json_results[1], result_files['segm']) + elif isinstance(results[0], np.ndarray): + json_results = proposal2json(dataset, results) + result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal') + mmcv.dump(json_results, result_files['proposal']) + else: + raise TypeError('invalid type of results') + return result_files diff --git a/CDARTS_detection/mmdet/core/evaluation/eval_hooks.py b/CDARTS_detection/mmdet/core/evaluation/eval_hooks.py new file mode 100644 index 0000000..e75bffb --- /dev/null +++ b/CDARTS_detection/mmdet/core/evaluation/eval_hooks.py @@ -0,0 +1,74 @@ +import os +import os.path as osp + +import mmcv +import torch +import torch.distributed as dist +from mmcv.parallel import collate, scatter +from mmcv.runner import Hook +from torch.utils.data import Dataset + + +class DistEvalHook(Hook): + + def __init__(self, dataset, interval=1, **eval_kwargs): + from mmdet import datasets + if isinstance(dataset, Dataset): + self.dataset = dataset + elif isinstance(dataset, dict): + self.dataset = datasets.build_dataset(dataset, {'test_mode': True}) + else: + raise TypeError( + 'dataset must be a Dataset object or a dict, not {}'.format( + type(dataset))) + self.interval = interval + self.eval_kwargs = eval_kwargs + + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + runner.model.eval() + results = [None for _ in range(len(self.dataset))] + if runner.rank == 0: + prog_bar = mmcv.ProgressBar(len(self.dataset)) + for idx in range(runner.rank, len(self.dataset), runner.world_size): + data = self.dataset[idx] + data_gpu = scatter( + collate([data], samples_per_gpu=1), + [torch.cuda.current_device()])[0] + + # compute output + with torch.no_grad(): + result = runner.model( + return_loss=False, rescale=True, **data_gpu) + results[idx] = result + + batch_size = runner.world_size + if idx % 200 == 0: + if runner.rank == 0: + for _ in range(0, batch_size, 10): + prog_bar.update() + + if runner.rank == 0: + print('\n') + dist.barrier() + for i in range(1, runner.world_size): + tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i)) + tmp_results = mmcv.load(tmp_file) + for idx in range(i, len(results), runner.world_size): + results[idx] = tmp_results[idx] + os.remove(tmp_file) + self.evaluate(runner, results) + else: + tmp_file = osp.join(runner.work_dir, + 'temp_{}.pkl'.format(runner.rank)) + mmcv.dump(results, tmp_file) + dist.barrier() + dist.barrier() + + def evaluate(self, runner, results): + eval_res = self.dataset.evaluate( + results, logger=runner.logger, **self.eval_kwargs) + for name, val in eval_res.items(): + runner.log_buffer.output[name] = val + runner.log_buffer.ready = True \ No newline at end of file diff --git a/CDARTS_detection/mmdet/core/evaluation/mean_ap.py b/CDARTS_detection/mmdet/core/evaluation/mean_ap.py new file mode 100644 index 0000000..b60d1cb --- /dev/null +++ b/CDARTS_detection/mmdet/core/evaluation/mean_ap.py @@ -0,0 +1,455 @@ +from multiprocessing import Pool + +import mmcv +import numpy as np +from terminaltables import AsciiTable + +from mmdet.utils import print_log +from .bbox_overlaps import bbox_overlaps +from .class_names import get_classes + + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) + precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or ndarray: calculated average precision + """ + no_scale = False + if recalls.ndim == 1: + no_scale = True + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + assert recalls.shape == precisions.shape and recalls.ndim == 2 + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + if no_scale: + ap = ap[0] + return ap + + +def tpfp_imagenet(det_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + default_iou_thr=0.5, + area_ranges=None): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + default_iou_thr (float): IoU threshold to be considered as matched for + medium and large bboxes (small ones have special rules). + Default: 0.5. + area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. Default: None. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + # an indicator of ignored gts + gt_ignore_inds = np.concatenate( + (np.zeros(gt_bboxes.shape[0], dtype=np.bool), + np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) + # stack gt_bboxes and gt_bboxes_ignore for convenience + gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp + # of a certain scale. + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + 1) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + ious = bbox_overlaps(det_bboxes, gt_bboxes - 1) + gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1 + gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1 + iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), + default_iou_thr) + # sort all detections by scores in descending order + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = gt_w * gt_h + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + max_iou = -1 + matched_gt = -1 + # find best overlapped available gt + for j in range(num_gts): + # different from PASCAL VOC: allow finding other gts if the + # best overlaped ones are already matched by other det bboxes + if gt_covered[j]: + continue + elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: + max_iou = ious[i, j] + matched_gt = j + # there are 4 cases for a det bbox: + # 1. it matches a gt, tp = 1, fp = 0 + # 2. it matches an ignored gt, tp = 0, fp = 0 + # 3. it matches no gt and within area range, tp = 0, fp = 1 + # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 + if matched_gt >= 0: + gt_covered[matched_gt] = 1 + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + tp[k, i] = 1 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def tpfp_default(det_bboxes, + gt_bboxes, + gt_bboxes_ignore=None, + iou_thr=0.5, + area_ranges=None): + """Check if detected bboxes are true positive or false positive. + + Args: + det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). + gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). + gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, + of shape (k, 4). Default: None + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. Default: None. + + Returns: + tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of + each array is (num_scales, m). + """ + # an indicator of ignored gts + gt_ignore_inds = np.concatenate( + (np.zeros(gt_bboxes.shape[0], dtype=np.bool), + np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool))) + # stack gt_bboxes and gt_bboxes_ignore for convenience + gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) + + num_dets = det_bboxes.shape[0] + num_gts = gt_bboxes.shape[0] + if area_ranges is None: + area_ranges = [(None, None)] + num_scales = len(area_ranges) + # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of + # a certain scale + tp = np.zeros((num_scales, num_dets), dtype=np.float32) + fp = np.zeros((num_scales, num_dets), dtype=np.float32) + + # if there is no gt bboxes in this image, then all det bboxes + # within area range are false positives + if gt_bboxes.shape[0] == 0: + if area_ranges == [(None, None)]: + fp[...] = 1 + else: + det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * ( + det_bboxes[:, 3] - det_bboxes[:, 1] + 1) + for i, (min_area, max_area) in enumerate(area_ranges): + fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 + return tp, fp + + ious = bbox_overlaps(det_bboxes, gt_bboxes) + # for each det, the max iou with all gts + ious_max = ious.max(axis=1) + # for each det, which gt overlaps most with it + ious_argmax = ious.argmax(axis=1) + # sort all dets in descending order by scores + sort_inds = np.argsort(-det_bboxes[:, -1]) + for k, (min_area, max_area) in enumerate(area_ranges): + gt_covered = np.zeros(num_gts, dtype=bool) + # if no area range is specified, gt_area_ignore is all False + if min_area is None: + gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) + else: + gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1) + gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) + for i in sort_inds: + if ious_max[i] >= iou_thr: + matched_gt = ious_argmax[i] + if not (gt_ignore_inds[matched_gt] + or gt_area_ignore[matched_gt]): + if not gt_covered[matched_gt]: + gt_covered[matched_gt] = True + tp[k, i] = 1 + else: + fp[k, i] = 1 + # otherwise ignore this detected bbox, tp = 0, fp = 0 + elif min_area is None: + fp[k, i] = 1 + else: + bbox = det_bboxes[i, :4] + area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1) + if area >= min_area and area < max_area: + fp[k, i] = 1 + return tp, fp + + +def get_cls_results(det_results, annotations, class_id): + """Get det results and gt information of a certain class. + + Args: + det_results (list[list]): Same as `eval_map()`. + annotations (list[dict]): Same as `eval_map()`. + + Returns: + tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes + """ + cls_dets = [img_res[class_id] for img_res in det_results] + cls_gts = [] + cls_gts_ignore = [] + for ann in annotations: + gt_inds = ann['labels'] == (class_id + 1) + cls_gts.append(ann['bboxes'][gt_inds, :]) + + if ann.get('labels_ignore', None) is not None: + ignore_inds = ann['labels_ignore'] == (class_id + 1) + cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) + else: + cls_gts_ignore.append(np.array((0, 4), dtype=np.float32)) + + return cls_dets, cls_gts, cls_gts_ignore + + +def eval_map(det_results, + annotations, + scale_ranges=None, + iou_thr=0.5, + dataset=None, + logger=None, + nproc=4): + """Evaluate mAP of a dataset. + + Args: + det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. + The outer list indicates images, and the inner list indicates + per-class detected bboxes. + annotations (list[dict]): Ground truth annotations where each item of + the list indicates an image. Keys of annotations are: + - "bboxes": numpy array of shape (n, 4) + - "labels": numpy array of shape (n, ) + - "bboxes_ignore" (optional): numpy array of shape (k, 4) + - "labels_ignore" (optional): numpy array of shape (k, ) + scale_ranges (list[tuple] | None): Range of scales to be evaluated, + in the format [(min1, max1), (min2, max2), ...]. A range of + (32, 64) means the area range between (32**2, 64**2). + Default: None. + iou_thr (float): IoU threshold to be considered as matched. + Default: 0.5. + dataset (list[str] | str | None): Dataset name or dataset classes, + there are minor differences in metrics for different datsets, e.g. + "voc07", "imagenet_det", etc. Default: None. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmdet.utils.print_log()` for details. Default: None. + nproc (int): Processes used for computing TP and FP. + Default: 4. + + Returns: + tuple: (mAP, [dict, dict, ...]) + """ + assert len(det_results) == len(annotations) + + num_imgs = len(det_results) + num_scales = len(scale_ranges) if scale_ranges is not None else 1 + num_classes = len(det_results[0]) # positive class num + area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] + if scale_ranges is not None else None) + + pool = Pool(nproc) + eval_results = [] + for i in range(num_classes): + # get gt and det bboxes of this class + cls_dets, cls_gts, cls_gts_ignore = get_cls_results( + det_results, annotations, i) + # choose proper function according to datasets to compute tp and fp + if dataset in ['det', 'vid']: + tpfp_func = tpfp_imagenet + else: + tpfp_func = tpfp_default + # compute tp and fp for each image with multiple processes + tpfp = pool.starmap( + tpfp_func, + zip(cls_dets, cls_gts, cls_gts_ignore, + [iou_thr for _ in range(num_imgs)], + [area_ranges for _ in range(num_imgs)])) + tp, fp = tuple(zip(*tpfp)) + # calculate gt number of each scale + # ignored gts or gts beyond the specific scale are not counted + num_gts = np.zeros(num_scales, dtype=int) + for j, bbox in enumerate(cls_gts): + if area_ranges is None: + num_gts[0] += bbox.shape[0] + else: + gt_areas = (bbox[:, 2] - bbox[:, 0] + 1) * ( + bbox[:, 3] - bbox[:, 1] + 1) + for k, (min_area, max_area) in enumerate(area_ranges): + num_gts[k] += np.sum((gt_areas >= min_area) + & (gt_areas < max_area)) + # sort all det bboxes by score, also sort tp and fp + cls_dets = np.vstack(cls_dets) + num_dets = cls_dets.shape[0] + sort_inds = np.argsort(-cls_dets[:, -1]) + tp = np.hstack(tp)[:, sort_inds] + fp = np.hstack(fp)[:, sort_inds] + # calculate recall and precision with tp and fp + tp = np.cumsum(tp, axis=1) + fp = np.cumsum(fp, axis=1) + eps = np.finfo(np.float32).eps + recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) + precisions = tp / np.maximum((tp + fp), eps) + # calculate AP + if scale_ranges is None: + recalls = recalls[0, :] + precisions = precisions[0, :] + num_gts = num_gts.item() + mode = 'area' if dataset != 'voc07' else '11points' + ap = average_precision(recalls, precisions, mode) + eval_results.append({ + 'num_gts': num_gts, + 'num_dets': num_dets, + 'recall': recalls, + 'precision': precisions, + 'ap': ap + }) + if scale_ranges is not None: + # shape (num_classes, num_scales) + all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) + all_num_gts = np.vstack( + [cls_result['num_gts'] for cls_result in eval_results]) + mean_ap = [] + for i in range(num_scales): + if np.any(all_num_gts[:, i] > 0): + mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) + else: + mean_ap.append(0.0) + else: + aps = [] + for cls_result in eval_results: + if cls_result['num_gts'] > 0: + aps.append(cls_result['ap']) + mean_ap = np.array(aps).mean().item() if aps else 0.0 + + print_map_summary( + mean_ap, eval_results, dataset, area_ranges, logger=logger) + + return mean_ap, eval_results + + +def print_map_summary(mean_ap, + results, + dataset=None, + scale_ranges=None, + logger=None): + """Print mAP and results of each class. + + A table will be printed to show the gts/dets/recall/AP of each class and + the mAP. + + Args: + mean_ap (float): Calculated from `eval_map()`. + results (list[dict]): Calculated from `eval_map()`. + dataset (list[str] | str | None): Dataset name or dataset classes. + scale_ranges (list[tuple] | None): Range of scales to be evaluated. + logger (logging.Logger | str | None): The way to print the mAP + summary. See `mmdet.utils.print_log()` for details. Default: None. + """ + + if logger == 'silent': + return + + if isinstance(results[0]['ap'], np.ndarray): + num_scales = len(results[0]['ap']) + else: + num_scales = 1 + + if scale_ranges is not None: + assert len(scale_ranges) == num_scales + + num_classes = len(results) + + recalls = np.zeros((num_scales, num_classes), dtype=np.float32) + aps = np.zeros((num_scales, num_classes), dtype=np.float32) + num_gts = np.zeros((num_scales, num_classes), dtype=int) + for i, cls_result in enumerate(results): + if cls_result['recall'].size > 0: + recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] + aps[:, i] = cls_result['ap'] + num_gts[:, i] = cls_result['num_gts'] + + if dataset is None: + label_names = [str(i) for i in range(1, num_classes + 1)] + elif mmcv.is_str(dataset): + label_names = get_classes(dataset) + else: + label_names = dataset + + if not isinstance(mean_ap, list): + mean_ap = [mean_ap] + + header = ['class', 'gts', 'dets', 'recall', 'ap'] + for i in range(num_scales): + if scale_ranges is not None: + print_log('Scale range {}'.format(scale_ranges[i]), logger=logger) + table_data = [header] + for j in range(num_classes): + row_data = [ + label_names[j], num_gts[i, j], results[j]['num_dets'], + '{:.3f}'.format(recalls[i, j]), '{:.3f}'.format(aps[i, j]) + ] + table_data.append(row_data) + table_data.append(['mAP', '', '', '', '{:.3f}'.format(mean_ap[i])]) + table = AsciiTable(table_data) + table.inner_footing_row_border = True + print_log('\n' + table.table, logger=logger) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/core/evaluation/recall.py b/CDARTS_detection/mmdet/core/evaluation/recall.py new file mode 100644 index 0000000..132c9ec --- /dev/null +++ b/CDARTS_detection/mmdet/core/evaluation/recall.py @@ -0,0 +1,193 @@ +from collections.abc import Sequence + +import numpy as np +from terminaltables import AsciiTable + +from mmdet.utils import print_log +from .bbox_overlaps import bbox_overlaps + + +def _recalls(all_ious, proposal_nums, thrs): + + img_num = all_ious.shape[0] + total_gt_num = sum([ious.shape[0] for ious in all_ious]) + + _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) + for k, proposal_num in enumerate(proposal_nums): + tmp_ious = np.zeros(0) + for i in range(img_num): + ious = all_ious[i][:, :proposal_num].copy() + gt_ious = np.zeros((ious.shape[0])) + if ious.size == 0: + tmp_ious = np.hstack((tmp_ious, gt_ious)) + continue + for j in range(ious.shape[0]): + gt_max_overlaps = ious.argmax(axis=1) + max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] + gt_idx = max_ious.argmax() + gt_ious[j] = max_ious[gt_idx] + box_idx = gt_max_overlaps[gt_idx] + ious[gt_idx, :] = -1 + ious[:, box_idx] = -1 + tmp_ious = np.hstack((tmp_ious, gt_ious)) + _ious[k, :] = tmp_ious + + _ious = np.fliplr(np.sort(_ious, axis=1)) + recalls = np.zeros((proposal_nums.size, thrs.size)) + for i, thr in enumerate(thrs): + recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) + + return recalls + + +def set_recall_param(proposal_nums, iou_thrs): + """Check proposal_nums and iou_thrs and set correct format. + """ + if isinstance(proposal_nums, Sequence): + _proposal_nums = np.array(proposal_nums) + elif isinstance(proposal_nums, int): + _proposal_nums = np.array([proposal_nums]) + else: + _proposal_nums = proposal_nums + + if iou_thrs is None: + _iou_thrs = np.array([0.5]) + elif isinstance(iou_thrs, Sequence): + _iou_thrs = np.array(iou_thrs) + elif isinstance(iou_thrs, float): + _iou_thrs = np.array([iou_thrs]) + else: + _iou_thrs = iou_thrs + + return _proposal_nums, _iou_thrs + + +def eval_recalls(gts, + proposals, + proposal_nums=None, + iou_thrs=0.5, + logger=None): + """Calculate recalls. + + Args: + gts (list[ndarray]): a list of arrays of shape (n, 4) + proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) + proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. + iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. + logger (logging.Logger | str | None): The way to print the recall + summary. See `mmdet.utils.print_log()` for details. Default: None. + + Returns: + ndarray: recalls of different ious and proposal nums + """ + + img_num = len(gts) + assert img_num == len(proposals) + + proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) + + all_ious = [] + for i in range(img_num): + if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: + scores = proposals[i][:, 4] + sort_idx = np.argsort(scores)[::-1] + img_proposal = proposals[i][sort_idx, :] + else: + img_proposal = proposals[i] + prop_num = min(img_proposal.shape[0], proposal_nums[-1]) + if gts[i] is None or gts[i].shape[0] == 0: + ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) + else: + ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4]) + all_ious.append(ious) + all_ious = np.array(all_ious) + recalls = _recalls(all_ious, proposal_nums, iou_thrs) + + print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) + return recalls + + +def print_recall_summary(recalls, + proposal_nums, + iou_thrs, + row_idxs=None, + col_idxs=None, + logger=None): + """Print recalls in a table. + + Args: + recalls (ndarray): calculated from `bbox_recalls` + proposal_nums (ndarray or list): top N proposals + iou_thrs (ndarray or list): iou thresholds + row_idxs (ndarray): which rows(proposal nums) to print + col_idxs (ndarray): which cols(iou thresholds) to print + logger (logging.Logger | str | None): The way to print the recall + summary. See `mmdet.utils.print_log()` for details. Default: None. + """ + proposal_nums = np.array(proposal_nums, dtype=np.int32) + iou_thrs = np.array(iou_thrs) + if row_idxs is None: + row_idxs = np.arange(proposal_nums.size) + if col_idxs is None: + col_idxs = np.arange(iou_thrs.size) + row_header = [''] + iou_thrs[col_idxs].tolist() + table_data = [row_header] + for i, num in enumerate(proposal_nums[row_idxs]): + row = [ + '{:.3f}'.format(val) + for val in recalls[row_idxs[i], col_idxs].tolist() + ] + row.insert(0, num) + table_data.append(row) + table = AsciiTable(table_data) + print_log('\n' + table.table, logger=logger) + + +def plot_num_recall(recalls, proposal_nums): + """Plot Proposal_num-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + proposal_nums(ndarray or list): same shape as `recalls` + """ + if isinstance(proposal_nums, np.ndarray): + _proposal_nums = proposal_nums.tolist() + else: + _proposal_nums = proposal_nums + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot([0] + _proposal_nums, [0] + _recalls) + plt.xlabel('Proposal num') + plt.ylabel('Recall') + plt.axis([0, proposal_nums.max(), 0, 1]) + f.show() + + +def plot_iou_recall(recalls, iou_thrs): + """Plot IoU-Recalls curve. + + Args: + recalls(ndarray or list): shape (k,) + iou_thrs(ndarray or list): same shape as `recalls` + """ + if isinstance(iou_thrs, np.ndarray): + _iou_thrs = iou_thrs.tolist() + else: + _iou_thrs = iou_thrs + if isinstance(recalls, np.ndarray): + _recalls = recalls.tolist() + else: + _recalls = recalls + + import matplotlib.pyplot as plt + f = plt.figure() + plt.plot(_iou_thrs + [1.0], _recalls + [0.]) + plt.xlabel('IoU') + plt.ylabel('Recall') + plt.axis([iou_thrs.min(), 1, 0, 1]) + f.show() \ No newline at end of file diff --git a/CDARTS_detection/mmdet/core/fp16/__init__.py b/CDARTS_detection/mmdet/core/fp16/__init__.py new file mode 100644 index 0000000..cc655b7 --- /dev/null +++ b/CDARTS_detection/mmdet/core/fp16/__init__.py @@ -0,0 +1,4 @@ +from .decorators import auto_fp16, force_fp32 +from .hooks import Fp16OptimizerHook, wrap_fp16_model + +__all__ = ['auto_fp16', 'force_fp32', 'Fp16OptimizerHook', 'wrap_fp16_model'] diff --git a/CDARTS_detection/mmdet/core/fp16/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/fp16/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1818421aa251935961499806dc140a1c9db57d2f GIT binary patch literal 330 zcmYLEOHRWu5Vf78X(LsXV`PK-kys%>ED$TYV3&<$#cpF^$C2#_g&T1K?&2*guFw@@ zkf^13dNcE8=6SbVR(Ge4e>k3e0R$3QBZC`kNJDVSiOx*kQyhRt=X4%W32_-+f0%SWi~hoFplm+r~h*Ld;f`0%S` z#tNw&fgLVOkleTe;JGeiC1h(I57zm!Bo-a)@MmQ#8RJ?r_JQ0YjVCxg(9RmU1?-R0 z$a~sOf_v%*>!~q92H6H>`#(?-d@9 zt-LqoSzZxZ1&@4fR7%glim_QC(aa80^^Wg5Vn?=_`;34d)U&Q5RZX!#a#2&gHoM`P zI0YVzWc128=sYwYlDg>;&wOC@&Cmq<8RvT#a*erMIEo9ZIAwve?K4_$FRfA~oMzAr zcBs=)yyd8->-c`JYTFc^+l{8AUNRLr9mya>+HJ6a_IQVOMN={Ab|g%uE$(%ECaDv6 zwCe;4eaTL{&4tqDTFrgt(Y9c2Q#N^!_^#5Mb=nNt9R+j6PN&5J-NF!8mTA0nxjg~L3#ruCQx zr``5@HE>bBshGgrEl}Q8ums1itU;Do5;hH-7Gwa%j)zxGOrg2En>=85VZk0GTVcL@ zKQSQFvAm;dIsxTv&e74G28&n0M=QsaP#6cPquY+V7w2JRdAN+V>-$ct?X$IbJJGr8 zJ5thHy<2*(mdZg;jhmHp434`NDrrv92R>~GC-poT-SSv;V@tqk7n2OYaZ0v-B80KnMjQBfc^-$)FVM=- za6Mj=yagGC+c1n72g6ol(&fuy$7o!5~vIe`eE#LHd~jnTPj0yz&AVR7ME3<&2@ouDP2TnDVU3dgg%qKvXXN;(?Zb8n<`z za{Z8mMo1okVO2OJ;KV$HTC``0uR?Rz3ZU1|>=^xQXoz1>;~Am?ijF^Y-l|qef!XSOvE{u zVWVH1HOTbs(1!G7Hw9+?;A* z-GqyVNDx^8>(-JXHC=Eb(u^$7>0V^xh%qy@`;Om%2;xFf$-vNIMT;!%d$oEeaG^CU zH$NNCuPrv1TH2}A*=0P94OkZ zz`Eoz2qQa3Eb}z6P0O4!vu2je!JkF4*bMthyuk2Y^yoF-;bZ5{n7cPO^j>lv1M?NrkpIBo;`hG*Lq$15x}}Nd?6D zx};)h^&cY@E6bC>t&gOpKF6>MpG*0#QHobYHY%a7j4D*iZ@RXj;p#gikfg1(YfY;2?l@o&)z%?TGbn*S^@e;wAk`G7P`r)e9Te}PfS;{~ID_IWiuX{w z4`PH%h)0SdnsvCi1`Yi=*aS@}ugi#&? z)@9U>gZ11|s~rq{h}QE5m(u~MyHqy{Wdg6!i$KaUf!A8A8G@b8|0Z#Ee&!SLG4v|t z<_-N?dMQx>Xe9iWSDG2>GZ5uEs_DykV-e%^yg2<7yIN>Z);eGv{Gz@BH`kDOVk}Yu K>Qu_uxBde1zGmA_3lL*Q-b)P}A<}@$|T- zds5Y7JJwi2nn=kL;xF)s#1G*&)GH4te_>yi@0=cA5}OC;QJ+3_J5}eabDOQDrRFE! z45M$aS=PU-3y%x_30|FnNK1;$>Q`UU7u>c-PTv7%XYSbRd%}{AbhFyH-mioAWG(Z@ zjebK|pIfpn{a2Rs6L0SHo3bIBudIGcwqzUMw(Q7dx$;W%m!$a0>Rx+`9j)!IFiT%d z%5fvX0$d>B|PrPiOUgY2$RO=GteIjr;GMEczRc z_Sv(I`$2Jf;9!Uo_nOTB&(1IjbeT+aPz-_r7GIc_>LAj=pvbb~Q1^KHMlbj(jR!GhhqW1RD1d%bA%<~5o(5K zZIBgF*?qV`YY@dHNtkA3I?2*R28U@mUi|^c zOw6p%n%SUY=78EW7u1<~psuuF24cJGnFawJ##uTseq7+nbex#w-D$j^l%a;*hT~n+ zJU_B&$yApr-JO<+?ADB{lWd?G*iki!w1~8cEP?#pQY-j;^U3nd_9biX zolcJl6nf};22g-TX^q58j1U3l{0*JbMcbPTc>V$&s29T+jW3gA!d&CbJ4~DH2yR+T zC)%tWs%R3@p+hzuZdX!(p|o|L9dt{yI2-88wy(ufDv%)gK7DhuQI8I}buWoz?ifTl7V`=)5( zw-;*&fJL% zmDaM|{Syq3fJ4nKL7aWYIa1uRX4c5PZk^aOn=DOOd)A3_bcg5FV4)pYC-9Q?E$abB zUOaqGE9xY1=FDwKv_`&CWrO0_sEM~VcZ79xcjjRAofB{7joQ+JZ66UgTEYyM?e~Z! zE%Vqv(7OV8uxsAA!)LWd*UtB0w|BOUOX1GcD%HEI>U+@7G?2mzPRi;!wd=*SoJ>p2 z!Or-*QM`W`DXG4TLG>XK3V@1$x)2%fpb6_w4?>Qarb&^vO7-vrBv?gppj%@y7t~^nZea9lyo<2p+rGVw z8&K4LwI0tff|vsx;9-c|_<94x9<>QwYOGxyoL@FZO}w=^<$eF?Ci+^eX&aihXM%m> z5&1?{1uU6ns?%JTQI6zq=Q&%8Y3!1Qq#$ndQ{uOYZ+uP}k6Dk)7jwOW0-20FAVh67 zETpL~+Qvgo6A#tLI6Z2NVVX&miz3yuB?8+k_BRP|?d=%6|@Rksn< zelXlCYq!BPkApSTZW);a3O6c@3zhDrd6ZH4M+hv0RV7=1N2{2t{9uit=t(O1X)!*u zWtthJ!sH z;ts2tcQJ#(TFjzYM=9!5tp!qFCgoJ+7Yf}|m@1UmNJyD0K#Fl;{LuAL$owH*OKBZO!$(kojIF0 zS8NHR_SD6*-8s8P-o!`ze2qJk0P#n&F~=L1>=KT-1j#P~tP(ESD?D9S>P$8<_v%(I zk;UAbJgO_vB5x=FdC?-WKH7aW{Rmx#WIyb@=L=YHB+jn@={j@x#?Ral`%ZH2iA0-2 zla{(thh|o%kz;x9sfAA#K1bXJl0M_P4YU%?PSO9Y4&u|QNcYf}C$ni%#$qND9qYQ% zaZ$+Bq{3uX+3sUesWd*WpS_5uCd+lNo~^>Z%!CnDQ4j7CVVaxI&v*YN)a&P!NOC>s z{kgD~OqV>geX8&f9SYuF0E&fH>^lbRj5nQvVFZeX8F58-tecs%KoB5M8{|lt3el}L z`X_0beyP>q0m6oy%g!JFhulqm)U{!zvr?O+6sh%0rNpWA-!?6_+*H=lMp<6gwo}SP zsbr=|hV8sb>N+b=tlRY4BX-&buT83wqnrlgXJtJ#y5-uiCc?KA5cx)AhiFqr7<3`T zpNo-fo8RGG@L`9y&G7A}sPeoWmu{(e<+P}0iXM`p;WU|TcbXQh`(%TDQORkZ4(JhT LdVO6UK;ZlaG5Xt} literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/fp16/decorators.py b/CDARTS_detection/mmdet/core/fp16/decorators.py new file mode 100644 index 0000000..10ffbf8 --- /dev/null +++ b/CDARTS_detection/mmdet/core/fp16/decorators.py @@ -0,0 +1,160 @@ +import functools +from inspect import getfullargspec + +import torch + +from .utils import cast_tensor_type + + +def auto_fp16(apply_to=None, out_fp32=False): + """Decorator to enable fp16 training automatically. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If inputs arguments are fp32 tensors, they will + be converted to fp16 automatically. Arguments other than fp32 tensors are + ignored. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp32 (bool): Whether to convert the output back to fp32. + + :Example: + + class MyModule1(nn.Module) + + # Convert x and y to fp16 + @auto_fp16() + def forward(self, x, y): + pass + + class MyModule2(nn.Module): + + # convert pred to fp16 + @auto_fp16(apply_to=('pred', )) + def do_something(self, pred, others): + pass + """ + + def auto_fp16_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@auto_fp16 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + # NOTE: default args are not taken into consideration + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.float, torch.half)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = {} + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.float, torch.half) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp32: + output = cast_tensor_type(output, torch.half, torch.float) + return output + + return new_func + + return auto_fp16_wrapper + + +def force_fp32(apply_to=None, out_fp16=False): + """Decorator to convert input arguments to fp32 in force. + + This decorator is useful when you write custom modules and want to support + mixed precision training. If there are some inputs that must be processed + in fp32 mode, then this decorator can handle it. If inputs arguments are + fp16 tensors, they will be converted to fp32 automatically. Arguments other + than fp16 tensors are ignored. + + Args: + apply_to (Iterable, optional): The argument names to be converted. + `None` indicates all arguments. + out_fp16 (bool): Whether to convert the output back to fp16. + + :Example: + + class MyModule1(nn.Module) + + # Convert x and y to fp32 + @force_fp32() + def loss(self, x, y): + pass + + class MyModule2(nn.Module): + + # convert pred to fp32 + @force_fp32(apply_to=('pred', )) + def post_process(self, pred, others): + pass + """ + + def force_fp32_wrapper(old_func): + + @functools.wraps(old_func) + def new_func(*args, **kwargs): + # check if the module has set the attribute `fp16_enabled`, if not, + # just fallback to the original method. + if not isinstance(args[0], torch.nn.Module): + raise TypeError('@force_fp32 can only be used to decorate the ' + 'method of nn.Module') + if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): + return old_func(*args, **kwargs) + # get the arg spec of the decorated method + args_info = getfullargspec(old_func) + # get the argument names to be casted + args_to_cast = args_info.args if apply_to is None else apply_to + # convert the args that need to be processed + new_args = [] + if args: + arg_names = args_info.args[:len(args)] + for i, arg_name in enumerate(arg_names): + if arg_name in args_to_cast: + new_args.append( + cast_tensor_type(args[i], torch.half, torch.float)) + else: + new_args.append(args[i]) + # convert the kwargs that need to be processed + new_kwargs = dict() + if kwargs: + for arg_name, arg_value in kwargs.items(): + if arg_name in args_to_cast: + new_kwargs[arg_name] = cast_tensor_type( + arg_value, torch.half, torch.float) + else: + new_kwargs[arg_name] = arg_value + # apply converted arguments to the decorated method + output = old_func(*new_args, **new_kwargs) + # cast the results back to fp32 if necessary + if out_fp16: + output = cast_tensor_type(output, torch.float, torch.half) + return output + + return new_func + + return force_fp32_wrapper diff --git a/CDARTS_detection/mmdet/core/fp16/hooks.py b/CDARTS_detection/mmdet/core/fp16/hooks.py new file mode 100644 index 0000000..b1ab45e --- /dev/null +++ b/CDARTS_detection/mmdet/core/fp16/hooks.py @@ -0,0 +1,126 @@ +import copy +import torch +import torch.nn as nn +from mmcv.runner import OptimizerHook + +from .utils import cast_tensor_type +from ..utils.dist_utils import allreduce_grads + + +class Fp16OptimizerHook(OptimizerHook): + """FP16 optimizer hook. + + The steps of fp16 optimizer is as follows. + 1. Scale the loss value. + 2. BP in the fp16 model. + 2. Copy gradients from fp16 model to fp32 weights. + 3. Update fp32 weights. + 4. Copy updated parameters from fp32 weights to fp16 model. + + Refer to https://arxiv.org/abs/1710.03740 for more details. + + Args: + loss_scale (float): Scale factor multiplied with loss. + """ + + def __init__(self, + grad_clip=None, + coalesce=True, + bucket_size_mb=-1, + loss_scale=512., + distributed=True): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.loss_scale = loss_scale + self.distributed = distributed + + def before_run(self, runner): + # keep a copy of fp32 weights + runner.optimizer.param_groups = copy.deepcopy( + runner.optimizer.param_groups) + # convert model to fp16 + wrap_fp16_model(runner.model) + + def copy_grads_to_fp32(self, fp16_net, fp32_weights): + """Copy gradients from fp16 model to fp32 weight copy.""" + for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()): + if fp16_param.grad is not None: + if fp32_param.grad is None: + fp32_param.grad = fp32_param.data.new(fp32_param.size()) + fp32_param.grad.copy_(fp16_param.grad) + + def copy_params_to_fp16(self, fp16_net, fp32_weights): + """Copy updated params from fp32 weight copy to fp16 model.""" + for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights): + fp16_param.data.copy_(fp32_param.data) + + def after_train_iter(self, runner): + # clear grads of last iteration + runner.model.zero_grad() + runner.optimizer.zero_grad() + # scale the loss value + scaled_loss = runner.outputs['loss'] * self.loss_scale + scaled_loss.backward() + # copy fp16 grads in the model to fp32 params in the optimizer + fp32_weights = [] + for param_group in runner.optimizer.param_groups: + fp32_weights += param_group['params'] + self.copy_grads_to_fp32(runner.model, fp32_weights) + # allreduce grads + if self.distributed: + allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb) + # scale the gradients back + for param in fp32_weights: + if param.grad is not None: + param.grad.div_(self.loss_scale) + if self.grad_clip is not None: + self.clip_grads(fp32_weights) + # update fp32 params + runner.optimizer.step() + # copy fp32 params to the fp16 model + self.copy_params_to_fp16(runner.model, fp32_weights) + + +def wrap_fp16_model(model): + # convert model to fp16 + model.half() + # patch the normalization layers to make it work in fp32 mode + patch_norm_fp32(model) + # set `fp16_enabled` flag + for m in model.modules(): + if hasattr(m, 'fp16_enabled'): + m.fp16_enabled = True + + +def patch_norm_fp32(module): + if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): + module.float() + module.forward = patch_forward_method(module.forward, torch.half, + torch.float) + for child in module.children(): + patch_norm_fp32(child) + return module + + +def patch_forward_method(func, src_type, dst_type, convert_output=True): + """Patch the forward method of a module. + + Args: + func (callable): The original forward method. + src_type (torch.dtype): Type of input arguments to be converted from. + dst_type (torch.dtype): Type of input arguments to be converted to. + convert_output (bool): Whether to convert the output back to src_type. + + Returns: + callable: The patched forward method. + """ + + def new_forward(*args, **kwargs): + output = func(*cast_tensor_type(args, src_type, dst_type), + **cast_tensor_type(kwargs, src_type, dst_type)) + if convert_output: + output = cast_tensor_type(output, dst_type, src_type) + return output + + return new_forward diff --git a/CDARTS_detection/mmdet/core/fp16/utils.py b/CDARTS_detection/mmdet/core/fp16/utils.py new file mode 100644 index 0000000..ce691c7 --- /dev/null +++ b/CDARTS_detection/mmdet/core/fp16/utils.py @@ -0,0 +1,23 @@ +from collections import abc + +import numpy as np +import torch + + +def cast_tensor_type(inputs, src_type, dst_type): + if isinstance(inputs, torch.Tensor): + return inputs.to(dst_type) + elif isinstance(inputs, str): + return inputs + elif isinstance(inputs, np.ndarray): + return inputs + elif isinstance(inputs, abc.Mapping): + return type(inputs)({ + k: cast_tensor_type(v, src_type, dst_type) + for k, v in inputs.items() + }) + elif isinstance(inputs, abc.Iterable): + return type(inputs)( + cast_tensor_type(item, src_type, dst_type) for item in inputs) + else: + return inputs diff --git a/CDARTS_detection/mmdet/core/mask/__init__.py b/CDARTS_detection/mmdet/core/mask/__init__.py new file mode 100644 index 0000000..b703b55 --- /dev/null +++ b/CDARTS_detection/mmdet/core/mask/__init__.py @@ -0,0 +1,4 @@ +from .utils import split_combined_polys +from .mask_target import mask_target + +__all__ = ['split_combined_polys', 'mask_target'] diff --git a/CDARTS_detection/mmdet/core/mask/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/mask/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..730e389d775d133fd4bae4417ab83e4cbf1c3e7e GIT binary patch literal 262 zcmYL@J!%6n5QQb}+A)dU`3PyYi6Cx6Vtjx!>9!e^l~{}DXVGYj?QL124lguikvrqciX2SF8zZ Gz83!o5kzkQ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/mask/__pycache__/mask_target.cpython-36.pyc b/CDARTS_detection/mmdet/core/mask/__pycache__/mask_target.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2029c607588da70c2b6be4ea1c56c15ad8c427c1 GIT binary patch literal 1305 zcmZuw&5qkP5GE-~mgS#yoGmt+LwhS=pjfw?>ta)&={1)e0tg8NMU=JjT0bG_#9mM* zCumOvdhHwZ3HmO)_LNH=U{9H$H;D_B#C#-&^C!-yN25{n`1UIO$0y`paxfU|e+E%M z00<)Jip2Ys#>}0)*b_{6??}uA7e1`MpwGxG_y8W{X~uDZ5!{IftGWRAjV!33g1rkh z(bSL~-O(q29oI~Ih8Sv?;XA+cHDCHVFg{vzzVRWt_cWsr0|%CfLGOd82|z&%c^P)} zck(BD&7YCKiA0+jv+V3gOOqM3UbZNl^7hyHRa52XfOWB5r}aGBid4;2+o*X}iCpKI zE}MG4pJ$EC=T)k%9V9xHMXt}seo>b&*?dmDkp*($-E7d=gdG#VWVHZe(@^WvAM5%rpz)DU_JBvVjGnCa8^1QI{%J z7IiL?LMLS{?yg2~!pQrhghrD%glBM$c6g9f?VpP}8;O0hgM({Ss0e_thcuuO9aD+V z{-x7XIP^e%0Z|OvWV|KmxFJjEH+HZf9duLBYc4;6mhH&W+mWtk=$Ad+{G~Z|{0Vva zM;8JUs0mNWDe3yAXF^~edCwN?lpr&fJ%}2a=mqZv-LM;VW6*_WWCjn2U?(6u)*U(B z*bEPJ6Ww1%rf-HYCnpfm3}7DN4r3c`_yT?JnP53E_jZ($*O$+U9xg`^ZEON$*5JM^-4ZT)&(wOfeW{l%&Q;xOIV zK;&=AEVunSf1BLovQe{XJo*ol?D7AR$cCT!v!?|%+4dk!0@m^fPVw+78KG!e#=Sci z%U``}HkNIEvTO^oIfwP!(zljg*s7*%}_Kpf9F4e1c(7~}!SF+FDA zI{qEgh)t;TnFa0{EA+ztCovgN A^8f$< literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/mask/__pycache__/utils.cpython-36.pyc b/CDARTS_detection/mmdet/core/mask/__pycache__/utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e4ac769404224a5b9e8f0200a7e700e7a8c18cd GIT binary patch literal 1218 zcmb7E&2H2%5Vn)-b~pV&t;&I$k6j5i0&Z2IASwhG4pagu5(;i?vMz}eY%j1<_OzUN z53W2;uAF#U%07xW?z?FJ);2`K0ZO#K#?0?otEuM9G{MGL&ZC+N-7dDNmdx`pPw z7Tf(iSfF{o4Oa4o3D2Xp*Y+2|eBk6ky8-gB4bOY+aDg%Uu=g6ZA!LB*S4i`;DTN_s z!bx6NXF_sD?%p{eO0=VAY*sR-hD(^t=R=7WvM9OrcS!J6*J@0j zaj^fyiR54kiR%?LZ{gi*B?=*FdBuPlQOgxWRp>(pYx(*z99V9n!zIaslFPzO$O9rO zT0p#MHL}cY-Fzgy29SZhCjSUxBN88A4Mb-RSDEd_e<49`XI!n2EGNj6(|qD;?7RAs zo2io5Xj{I2rRy4gRSTEeWhvI;{buI~E=vY`GlM?EC2&5Ye%Dh}7Q73g23_CO*1L5I<%Xsbh_g-t9qq3Y-*WTdS_7;nPBOb*ZJ#|?p|6z zv$9Lo&2^-udehx3kC?_+=--eU!k>F_oJ{J9--A^avniEHK4VlTx~X+iRg4>++bieg zB(D`uEJ!jnqSWJN*6sTM%Rkabvaf^Qg9YtIcn=To7TyYA;vK9uK|2am4>l*eJgzPF Vb?F92Rn4X)e~96v?GrT!e*rJcX@39! literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/mask/mask_target.py b/CDARTS_detection/mmdet/core/mask/mask_target.py new file mode 100644 index 0000000..be93dfc --- /dev/null +++ b/CDARTS_detection/mmdet/core/mask/mask_target.py @@ -0,0 +1,36 @@ +import torch +import numpy as np +import mmcv + + +def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, + cfg): + cfg_list = [cfg for _ in range(len(pos_proposals_list))] + mask_targets = map(mask_target_single, pos_proposals_list, + pos_assigned_gt_inds_list, gt_masks_list, cfg_list) + mask_targets = torch.cat(list(mask_targets)) + return mask_targets + + +def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): + mask_size = cfg.mask_size + num_pos = pos_proposals.size(0) + mask_targets = [] + if num_pos > 0: + proposals_np = pos_proposals.cpu().numpy() + pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() + for i in range(num_pos): + gt_mask = gt_masks[pos_assigned_gt_inds[i]] + bbox = proposals_np[i, :].astype(np.int32) + x1, y1, x2, y2 = bbox + w = np.maximum(x2 - x1 + 1, 1) + h = np.maximum(y2 - y1 + 1, 1) + # mask is uint8 both before and after resizing + target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], + (mask_size, mask_size)) + mask_targets.append(target) + mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( + pos_proposals.device) + else: + mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size)) + return mask_targets diff --git a/CDARTS_detection/mmdet/core/mask/utils.py b/CDARTS_detection/mmdet/core/mask/utils.py new file mode 100644 index 0000000..a68312b --- /dev/null +++ b/CDARTS_detection/mmdet/core/mask/utils.py @@ -0,0 +1,30 @@ +import mmcv + + +def split_combined_polys(polys, poly_lens, polys_per_mask): + """Split the combined 1-D polys into masks. + + A mask is represented as a list of polys, and a poly is represented as + a 1-D array. In dataset, all masks are concatenated into a single 1-D + tensor. Here we need to split the tensor into original representations. + + Args: + polys (list): a list (length = image num) of 1-D tensors + poly_lens (list): a list (length = image num) of poly length + polys_per_mask (list): a list (length = image num) of poly number + of each mask + + Returns: + list: a list (length = image num) of list (length = mask num) of + list (length = poly num) of numpy array + """ + mask_polys_list = [] + for img_id in range(len(polys)): + polys_single = polys[img_id] + polys_lens_single = poly_lens[img_id].tolist() + polys_per_mask_single = polys_per_mask[img_id].tolist() + + split_polys = mmcv.slice_list(polys_single, polys_lens_single) + mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) + mask_polys_list.append(mask_polys) + return mask_polys_list diff --git a/CDARTS_detection/mmdet/core/post_processing/__init__.py b/CDARTS_detection/mmdet/core/post_processing/__init__.py new file mode 100644 index 0000000..1b24a3f --- /dev/null +++ b/CDARTS_detection/mmdet/core/post_processing/__init__.py @@ -0,0 +1,8 @@ +from .bbox_nms import multiclass_nms +from .merge_augs import (merge_aug_proposals, merge_aug_bboxes, + merge_aug_scores, merge_aug_masks) + +__all__ = [ + 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', + 'merge_aug_scores', 'merge_aug_masks' +] diff --git a/CDARTS_detection/mmdet/core/post_processing/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/post_processing/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d3ef82090f47f729f1b8e8f3859dc586e8bf295 GIT binary patch literal 393 zcmZutO-sZu5KY?j!^Ks>i+4}G7*Oye>L1X{-f{`qCUlFLq-0vKKgqw;t0(`$o=g;6 z1q1VVyv)3L$<2Dbdf4yOB_-sY{5nPCkFoGJf+m_;0#s0%cxE%mMGj0bC`19JD5IWh zrVCx_sw+gL>kVm+KB6mW=yC#+9sAZQMCkw2{gV?){OWC`Al=5nrm--I8o~*e~ NUxO*=F}+|V{RC%yYqtOZ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-36.pyc b/CDARTS_detection/mmdet/core/post_processing/__pycache__/bbox_nms.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33a4a06200fe9240b8eede164df25990a0b70619 GIT binary patch literal 1839 zcmaJ?Pmkj?6t~^9Ni%I{c4nkyRx4z&tTaX4mOsmZ5gHc31&N&ly9g-)l{9wSv}uyH zUCk6632AnZgZKm-k@yaLk{q~j8i`M^7kGBs`NM%oaejI4=l6bo&rUvQxBX9EPs4{z zg#JP|t{UJcaLcz~V2I%a1?EW-ydVL=h`G#R^_MuPG5id5y-VPslP)f|(uEwJiZIW) zxWr(L8pZ|JKHLU>2DkhTTpG_H@&PIu6Er~t6>j0_+6AVAWh~@(bnx*;~dHFUmR;x#b@<75kG09+ZEez zi%xpqSagfk_YsWvE;8s9sk|B+pT*m{&YXEu{GkEq_5u0`72C?4dAg&wtBA}o%n4zb zH7-c8qqp?V80+TG_*-}@?tqe}-T^i40RlT@djmlPpLrLxVpqE};GbiRvyXJ;#M>kq5|w88()7<_>Uv)utOz2WAz|TCBCb~tPwJKtAXm3oQx7*>51bX}Q869VWE|!UkA4N$hB^`~jGcNnS zPvQDhOyrS8=FQTE6@eZ;=c&v@_lU}Am~(oV_UHo};+@A`3PFwGarZxyWt0gn|7Y^` z1WO&NDMT74S*ReBNln4R^P{#%fB}k(2C{3*dw)jn!3hqy98b5}Ia;jyR37 zRK|=8&iYpzv&U~`Gu8Pb`}#WJ6?GAw4b$a<9>%GW6_0@f!DtBg0v1^1->}vN3&#qS zLD1e+93v}BlND82PB(O`6Ex0wuCCQp)fk77%EZn3J)d&AvD*IRNz54p+!SQQp~L)E zN;t>KDo!UxOx5SFxLS(zMyu5_Pxui%thD!N5{`HxyFIFG*Coxyo4(I`^usOC2_y~ERau8> zM$@tug{t&YemWd4lcaQ#ES;1M^SMh|>A)^mWz%?n;3AXV{h)R8By8TQ4Olj>_HE#r zEftL?rMvEJ(7x41_^vJ&LwjbpEE`p=%SKh}z+2BAR>)K$Xc!x;2*-gJC2|<2OqTBY zNCxh3t-<;jSF*&zz&FGyX6cv-za#Mg3i0~sV45xX`!J@H)iO*6(Tas~AoENP77Lip zBNb<9g$L%~4#1^`xyT|eCA4B-Bo9GTA4=V^MX{?fCi7R2PhdcIee7cow_x1C`+x}P z;9YVbEXMjKSRH&1ydB(t(Xc9?PXH6~5Mqce7@*(gang0fEaE(sB|07bcMXasU7T literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/post_processing/__pycache__/merge_augs.cpython-36.pyc b/CDARTS_detection/mmdet/core/post_processing/__pycache__/merge_augs.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79cd6dfabe331118847a90ce8ae43974961b0d91 GIT binary patch literal 3155 zcmcgu-HzKt6!!Qpi8t9@+Nx^R0%lu@brofyEnHMp0ntlXq_zkYsDkU6Bu-;HopIXT zm2;K$axaj030{T=@GTP3zCv%{JL5FTZmGl-wmdWToH=vOne%O`<7w+ zX{=oq#$V!5`)Ihq+1Th-J?olyH^)|DcWq{Hi`!!-al0_=;qOYO1(ANul#OB7Vk()(>+uVL- zqIF)GeMl8!4Jlp=k$nB9+Sls2cV{>NYF7`G@wYiC%z1c?L zE%Z91bJi@HqdH^}f@6}{n6>f-8 zXF8xQN>51vlNJvK`#+` z2tT#8JdSd*wUUj~e&)w%G@f$U=kI7+dU~rJKTNs5Ljl=YsgskmN8a?J{=nZQWzdC> z+C@Lg(tbSXRFQii@~KSMf>#CV&YHuwgZ5g`qD;cgNtS9`;OJS$=!hy)$_DwRtu33{ z++IJ7@=TUC8N7dDBflk%oiJhXIC+HL;|QYA+Jk0k9mSK+=FP$c(qm2l*x~)^WQ5mQM-)*i3t$SXID0_~) z;Hyk38_T3B>*N`oi*(2bLaeNj8Qx01yPZ%vuWMkv(p89{P^FVeE+lASn*46aD_Hyb zyS-tSh?{8B!QnJa_o71{syzf-?Ij5pxrp*OORMo7soBF+PCG_IDdfhU-mwQKRcCTo zo?pJMtZHBU`rppmn}Aua;v5SQ5_vv8K|V~y;>p0g1cjyuN;^NhPs z>+*TwJT!8eWoxq@GGGGLhkqV%2wfyYjC05>$z*~FXYWu+&$fNY%P>!$v1?Ks5 zG8T9I-O5Ywo6gG06%ENVi5)OWnX@0bk`h6KQKFI>auW>zmZOT5b|S(w*ywr`iAr^; z$CY|{fn-~9-SY1C3TlfLZq>_+r2aI%XsMd3q_DynW&L8$0ACeJ8coXaroJh= z&mr4w^Bj}cFc%T?&_z9ExvsgNV?a4gIh?Z*GFx6pZ{e{EW6qG1<}tD@cTh1Q#VvZ{ zHgeB7x_Y*Nktd7Tb{0&h;|QX&!-H5Mo~0K9x`d>=xJaB7&_?1CwGFe-n`WQR(}_BHnUOrEugJU-WRCTz#wp) zUR_23*yOg3hlP=QY|-BcVejbmS^twzJw@>lRD(cW?*tpky~+EOhf;=zRR7MRrR@*f z{)-jh-onh=YyU6MEVIc%Sg8~|#G`Ja86cY;-vBqVnOWe665NQ~QdbL>ZiCrO{8n1{ zL$l~_=vVU@Ws+M#kkoYcPbfc6{>G&bKd?%(^9i7akW~;lof5CTyYvqcWy$2;k7NUt zN0%L4TNMKx?atkChFY!eVem$N|4oiw$C?0{kFKn}5bpo70mT~x2f&n3+dB7lv`(vrCe3SG)B{9n`WJDn>H$z z{1jgTwjXRPXb9DGawy5dvY|_6Cz~k!gHYOp#frD6u4xL;=+H{maHUPr#8SVTWPCan UR8p$#>t@{qomh3hex+Xj2hZ3&i2wiq literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/post_processing/bbox_nms.py b/CDARTS_detection/mmdet/core/post_processing/bbox_nms.py new file mode 100644 index 0000000..cb3fe21 --- /dev/null +++ b/CDARTS_detection/mmdet/core/post_processing/bbox_nms.py @@ -0,0 +1,64 @@ +import torch + +from mmdet.ops.nms import nms_wrapper + + +def multiclass_nms(multi_bboxes, + multi_scores, + score_thr, + nms_cfg, + max_num=-1, + score_factors=None): + """NMS for multi-class bboxes. + + Args: + multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) + multi_scores (Tensor): shape (n, #class) + score_thr (float): bbox threshold, bboxes with scores lower than it + will not be considered. + nms_thr (float): NMS IoU threshold + max_num (int): if there are more than max_num bboxes after NMS, + only top max_num will be kept. + score_factors (Tensor): The factors multiplied to scores before + applying NMS + + Returns: + tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels + are 0-based. + """ + num_classes = multi_scores.shape[1] + bboxes, labels = [], [] + nms_cfg_ = nms_cfg.copy() + nms_type = nms_cfg_.pop('type', 'nms') + nms_op = getattr(nms_wrapper, nms_type) + for i in range(1, num_classes): + cls_inds = multi_scores[:, i] > score_thr + if not cls_inds.any(): + continue + # get bboxes and scores of this class + if multi_bboxes.shape[1] == 4: + _bboxes = multi_bboxes[cls_inds, :] + else: + _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] + _scores = multi_scores[cls_inds, i] + if score_factors is not None: + _scores *= score_factors[cls_inds] + cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) + cls_dets, _ = nms_op(cls_dets, **nms_cfg_) + cls_labels = multi_bboxes.new_full( + (cls_dets.shape[0], ), i - 1, dtype=torch.long) + bboxes.append(cls_dets) + labels.append(cls_labels) + if bboxes: + bboxes = torch.cat(bboxes) + labels = torch.cat(labels) + if bboxes.shape[0] > max_num: + _, inds = bboxes[:, -1].sort(descending=True) + inds = inds[:max_num] + bboxes = bboxes[inds] + labels = labels[inds] + else: + bboxes = multi_bboxes.new_zeros((0, 5)) + labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) + + return bboxes, labels diff --git a/CDARTS_detection/mmdet/core/post_processing/merge_augs.py b/CDARTS_detection/mmdet/core/post_processing/merge_augs.py new file mode 100644 index 0000000..f97954b --- /dev/null +++ b/CDARTS_detection/mmdet/core/post_processing/merge_augs.py @@ -0,0 +1,96 @@ +import torch + +import numpy as np + +from mmdet.ops import nms +from ..bbox import bbox_mapping_back + + +def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg): + """Merge augmented proposals (multiscale, flip, etc.) + + Args: + aug_proposals (list[Tensor]): proposals from different testing + schemes, shape (n, 5). Note that they are not rescaled to the + original image size. + img_metas (list[dict]): image info including "shape_scale" and "flip". + rpn_test_cfg (dict): rpn test config. + + Returns: + Tensor: shape (n, 4), proposals corresponding to original image scale. + """ + recovered_proposals = [] + for proposals, img_info in zip(aug_proposals, img_metas): + img_shape = img_info['img_shape'] + scale_factor = img_info['scale_factor'] + flip = img_info['flip'] + _proposals = proposals.clone() + _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, + scale_factor, flip) + recovered_proposals.append(_proposals) + aug_proposals = torch.cat(recovered_proposals, dim=0) + merged_proposals, _ = nms(aug_proposals, rpn_test_cfg.nms_thr) + scores = merged_proposals[:, 4] + _, order = scores.sort(0, descending=True) + num = min(rpn_test_cfg.max_num, merged_proposals.shape[0]) + order = order[:num] + merged_proposals = merged_proposals[order, :] + return merged_proposals + + +def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): + """Merge augmented detection bboxes and scores. + + Args: + aug_bboxes (list[Tensor]): shape (n, 4*#class) + aug_scores (list[Tensor] or None): shape (n, #class) + img_shapes (list[Tensor]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_bboxes = [] + for bboxes, img_info in zip(aug_bboxes, img_metas): + img_shape = img_info[0]['img_shape'] + scale_factor = img_info[0]['scale_factor'] + flip = img_info[0]['flip'] + bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) + recovered_bboxes.append(bboxes) + bboxes = torch.stack(recovered_bboxes).mean(dim=0) + if aug_scores is None: + return bboxes + else: + scores = torch.stack(aug_scores).mean(dim=0) + return bboxes, scores + + +def merge_aug_scores(aug_scores): + """Merge augmented bbox scores.""" + if isinstance(aug_scores[0], torch.Tensor): + return torch.mean(torch.stack(aug_scores), dim=0) + else: + return np.mean(aug_scores, axis=0) + + +def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): + """Merge augmented mask prediction. + + Args: + aug_masks (list[ndarray]): shape (n, #class, h, w) + img_shapes (list[ndarray]): shape (3, ). + rcnn_test_cfg (dict): rcnn test config. + + Returns: + tuple: (bboxes, scores) + """ + recovered_masks = [ + mask if not img_info[0]['flip'] else mask[..., ::-1] + for mask, img_info in zip(aug_masks, img_metas) + ] + if weights is None: + merged_masks = np.mean(recovered_masks, axis=0) + else: + merged_masks = np.average( + np.array(recovered_masks), axis=0, weights=np.array(weights)) + return merged_masks diff --git a/CDARTS_detection/mmdet/core/utils/__init__.py b/CDARTS_detection/mmdet/core/utils/__init__.py new file mode 100644 index 0000000..c1a24a6 --- /dev/null +++ b/CDARTS_detection/mmdet/core/utils/__init__.py @@ -0,0 +1,7 @@ +from .dist_utils import allreduce_grads, DistOptimizerHook, DistOptimizerArchHook +from .misc import tensor2imgs, unmap, multi_apply + +__all__ = [ + 'allreduce_grads', 'DistOptimizerHook', 'tensor2imgs', 'unmap', + 'multi_apply', 'DistOptimizerArchHook' +] diff --git a/CDARTS_detection/mmdet/core/utils/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/core/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc2680adfcefec90e392a6a6f84bd1c6ff761a58 GIT binary patch literal 397 zcmZ9IJx;_h5QUxpY*s6T1c%rLOG~@rM^I2fLFdM@GFH~Y6GygFgd1@sw^UqVD{N>M zEX}9UvuECuuQ!|S_IMDhjF3-qa_ZRM#d7-yAdqQ-8OtzdITozQl9uZdD^_96YHU~& zwE{}0poRw8p=K?tUdgumio40S2xmfTD`D;>AFKcu&L5TYFOyfO-lcsq<}F9sGk@gD+N8Mx`MFi<(@0A&Ud)gZ->5n7j cT#d?eel;zj-J^k7%bWBuE+xIlE{Zn$2E&bM2LJ#7 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/utils/__pycache__/dist_utils.cpython-36.pyc b/CDARTS_detection/mmdet/core/utils/__pycache__/dist_utils.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c7ab8305c9b4ee92740536f70d129eb7b06f733 GIT binary patch literal 2906 zcmbtW&5j(m5hnX@W_o_SyIKpjVF-qtI3CB@O=Rcipva0NAxaKw0~RcxVWZL8q;_Yf ze`ZOJtX=e6GJ1hr^9Ff?JPQxG7CHac2nH{h85Fl)@x>zid#j1}*el{G&U%Wca z{xx9i-)yS{;P0dA3lPBsFIdSF&WZO5FY(;pp9V9_Z%1_=xdniU|kK_RD9pQb$#y4JKeKzK{ z|CJI_N%3Hko1?LBZ>CR+%otgvLRMO);B>9DeJ5R1*Y^xllg&1YW6uu1nwv>Exsd8n zUC-?9ruhezpOWY$$J)717NEqa5$YdN^*=!xPw-iwKEU7Ghg=^^X+oY{C%b@j!*(l?P%2b3T_g%I0%f2^&4jiiOmQ9AE=6(ZdoVw@`zf~9?k>&itdJUOP}uct5XQTFzz1H$V{r8C@vcXF zj4$9fxq1&;k2ioJu#jgQVEzI|aN)fGKprrJ5>S~sfHYivY`n$?7`&CwS#|6A`;Ctv z?k_{=02b4r305xDX9t@Q<`hCI6ggVv8&v%h5a5b^|NfFUe9BY+$_1PCT5jt7%{vQ# z0ZxZ#sN~sVqNGkwR3>cDQjwVq$HP1Lr48qq%1Y9XXGn5A5r`1U z%6I9DC4u1ZU2JzBU^|<9LAm1SvFc)EPQIW*e~OF?{!K6qZ&-?)0m>0TNz((UX@VT%5x$G}-Z1fCt(uczROnBJ zIwQXYLy#dV=sv41#@}<+C3$C0vATSk^)oHg-_JF;;rZ6XcjACa<|k zH2h`ceC(-@(P1OCs4A(h`fDe9ifK}#vPqRrzJn!&}+i&{s>d=?} zK|wZstUkx^acB{YDl280+Bi*N`l29ykfvvgtY~}G7^-g3Ez2B)>1e5sKz@Ph&J-*O zog{~3Q+#muQU5ojzWt+ZtZ%me2d!5CIS!n^uSmBx=k`Urc1CX@^wzbey@^uV@CK#N z(IAvM-0c&09bQk^5|=W1gIT0LH{oBo@L#vSbS=U&^?gk(6Vb8U!u5K7-xb(_eQy!W=7^D0CiLQ~VV#ORhn1`OO?MfVleogn5H zKfgW>*Ur}>{Sxx(*JR3Xi2Rm_L-+5AyEN|--M>S}7pRx$CP_OdcfXeRUZ1F4+t2Hw zXm4-bzEQkdC%dMG0}lj}u$mk%;5XafYPbs~8MI36_3j=q?w#VEJ9w9t`Ll!9DS|Mm tIz-4*t@r65DrcQD|B?`#ZTD__y6@2?tam^L9_5XFZ^w^s#>1Vx{{rB9woL#4 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/core/utils/__pycache__/misc.cpython-36.pyc b/CDARTS_detection/mmdet/core/utils/__pycache__/misc.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78299d29dc12929b0f12ca2c1ba7732470ca7ae4 GIT binary patch literal 1353 zcmZ8g%Z?m16!oKC{meuPK?n&735nEZ8ai2I5fLO6=VC$P^idCO{kAuEonrDTL#KEA%beV?Zu&SvAMA1=hsBjjIlYvE3Q!q(3Kl1N&T zil&r0nUySM!^u+*ColafNP{X&L%h#rAj7vbjbtQ8Z%I0mcjQ=3pczZ{8%a)X;E^QM zhOJPh5an6IEU!ds`Bl;0P&_`*TzI!4>=65BZ2dQ&CtDwh)a}x;d zVp+c{YV#P^w#{p4?+TqYwJA1T)9E2f64}7ywQdv~<)XG+8wpjjQk!!dA=;{_Hrjdv z2EbJzEywwusIO7Oho_5+rplMVdb8_9y~uV_=!HveQ6VXlXQpWCqrAu(l`lF|lz1$3 zc8Xdi$M{PZ!HrVi0}wi;Kg zWumO`d+)ewM9N2+kI`k~@m79g)0PCBsEz#4m&_+3aCBF66U7cA^ zD6}ECeC^KEZ=EuofRQVe>#j6<=*AIZ=W0?xTddIea`zb&Isg!qF{kP~NFV9z^sbPB zz9|Gp-vbDI$<()ykv>Y`2e$V>POoQANWC0%WXqhLWPgFKpM%PSwSU`IGWWX1e|it) z@~ao|iyB$SBG%ox&Q08`+zp8yKi%TJJS^@CF^0nZb9{1zv6DGFRr4y>11| 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + world_size = dist.get_world_size() + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +class DistOptimizerHook(OptimizerHook): + def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + + def after_train_iter(self, runner): + #if runner.rank == 0: + # os.system('df -h /dev/shm/') + runner.optimizer.zero_grad() + runner.outputs['loss'].backward() + allreduce_grads(runner.model.parameters(), self.coalesce, + self.bucket_size_mb) + if self.grad_clip is not None: + self.clip_grads(runner.model.parameters()) + runner.optimizer.step() + + +class DistOptimizerArchHook(OptimizerArchHook): + def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + + def arch_after_train_iter(self, runner): + if runner.optimizer_arch is not None: + runner.optimizer_arch.zero_grad() + #runner.optimizer.zero_grad() + runner.outputs_arch['loss'].backward() + #allreduce_grads(runner.model.parameters(), self.coalesce, + # self.bucket_size_mb) + params = [] + if 'backbone' in runner.arch_name: + raise NotImplementedError + if 'neck' in runner.arch_name: + raise NotImplementedError + if 'head' in runner.arch_name: + raise NotImplementedError + allreduce_grads(params, self.coalesce, self.bucket_size_mb) + #if self.grad_clip is not None: + # self.clip_grads(runner.model.parameters()) + #runner.optimizer.step() + if runner.optimizer_arch is not None: + runner.optimizer_arch.step() + \ No newline at end of file diff --git a/CDARTS_detection/mmdet/core/utils/misc.py b/CDARTS_detection/mmdet/core/utils/misc.py new file mode 100644 index 0000000..262f168 --- /dev/null +++ b/CDARTS_detection/mmdet/core/utils/misc.py @@ -0,0 +1,37 @@ +from functools import partial + +import mmcv +import numpy as np +from six.moves import map, zip + + +def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True): + num_imgs = tensor.size(0) + mean = np.array(mean, dtype=np.float32) + std = np.array(std, dtype=np.float32) + imgs = [] + for img_id in range(num_imgs): + img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0) + img = mmcv.imdenormalize( + img, mean, std, to_bgr=to_rgb).astype(np.uint8) + imgs.append(np.ascontiguousarray(img)) + return imgs + + +def multi_apply(func, *args, **kwargs): + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def unmap(data, count, inds, fill=0): + """ Unmap a subset of item (data) back to the original set of items (of + size count) """ + if data.dim() == 1: + ret = data.new_full((count, ), fill) + ret[inds] = data + else: + new_size = (count, ) + data.size()[1:] + ret = data.new_full(new_size, fill) + ret[inds, :] = data + return ret diff --git a/CDARTS_detection/mmdet/datasets/__init__.py b/CDARTS_detection/mmdet/datasets/__init__.py new file mode 100644 index 0000000..361090e --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/__init__.py @@ -0,0 +1,17 @@ +from .custom import CustomDataset +from .cityscapes import CityscapesDataset +from .xml_style import XMLDataset +from .coco import CocoDataset +from .voc import VOCDataset +from .wider_face import WIDERFaceDataset +from .loader import GroupSampler, DistributedGroupSampler, build_dataloader, build_dataloader_arch +from .dataset_wrappers import ConcatDataset, RepeatDataset +from .registry import DATASETS +from .builder import build_dataset + +__all__ = [ + 'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', + 'CityscapesDataset', 'GroupSampler', 'DistributedGroupSampler', + 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'WIDERFaceDataset', + 'DATASETS', 'build_dataset', 'build_dataloader_arch' +] \ No newline at end of file diff --git a/CDARTS_detection/mmdet/datasets/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b104556e4ce60ac5733bd1927df93837ea1d0d4 GIT binary patch literal 836 zcmZ9K&u-H|5XS8|u^lH)oTO>ezCkWnf-3@|q#z{zNKFN3FRQI*Ni6MVtzDbqM?&I> zeC5O|^u+GA)KFXg>6@`NGvBPA#c}lV_CT7Bs#;VyDrqdTm_ zJ>*&Ku`chSp4DCE^8f`__gKjLsBg8;B0fL^s{=OVF^c&JjrbUid4dv~57~rI(Ui~7 z%*K6o!slpib;M5j0xh&2zz|{>!59*lz!YY10&_Tp1uXX?w1l&rvpWCn>XBD&6K^V2 z3ce*JQMA;F$tEvrm63v~HW__+|E`S&89r10O1KLK|p^xf1JTYW`|Wrk1dWFDeqe}@g*(jL#SPZ z+x69YcX72d7TY4)qSS}z$gV-pz&G#=LW90RWH2;{4FUsm)^cnx(D<-ALw;sgXA7bz zdsv&ThWg(*!>X*cW77=18717~e(6>I)USeR;O0RYR?##9PI$&PYOaz;Q$k&gU-{Tj^=E`y`6YM>Icfro%Pe>PD z#=6(_I=TS^N?tzsndqboH?~y4xrwv7p3RF=n53u{vsQ|Eab*%E_IS|BrfpPKy0G-f zcneg`D@APrnbiwn{Jd#bCgwu44kd*uYNfL}hp#M@Hi2HY*dol@R@B@KwvFN24oDvr zO%vmxN2brkJX@A}mdS;h_HNm!Y5zb^V0Ivg+3-Dz3RJe4)ikb`)r{LV(*jDtvQ@%g zM2@iM&7<*)rV`)4){E6LtH=3@XKJi)+;LTLp+&BXrrzT5ZPKx0FXRah{lP4EVw&xo zu~o>IFoedmZ-2}O8qx%K_|CY?;Li9$2`@b^zgZ3dE!h;cc13?0XBP&c+e`Ry=XmrSTsl(1i!Q0mnc8c+h z2SGE)B0kUURk4q~0ap1RVW@jB3#58JTvMc^cAG!hOaEe9&Xmf#h`{42zj#j$CzGyKj+|!!2J+)qV9jfU?;LTK5T>$6)WE zrG2(NDaF6cd78)}Ld=m<%*j$0rE*)D;~mr4Z_)~1ssk$%OR>P|9V&#HwrePba|f;mbC z+)oG}+g=khTV-{t8Lfmqw=%;@C@o9ls*DR|2&2llwQAVAA-hI{rdetHLe^5TmgT~X zc3CGjp{HO!H3Wg&w4k#PB!uEn(^4)-GFht>)5-SO*US~cGih@9c@du zM{13v*4%D+G};yg87CA0gGmAv0u_FWswDiTNL9!~fIva03Ppf7swt{M!9!eB!TUZ? zeBW)2Mw)e4qM1H@`}X}k_uO;7b8erVo6A4*X484%eVX>~+SJcP`dM7TFMtS5=xwc% z-1U}GNvWK4C9Upe#Z>o9C5yY!wmP{=PG{*X$FxxOn}! z8`tY>c=vY|iDz*IcR*aNq6@8Jh)qoWBjVZ}4O71^Zs2YTDP9pb@933`_?Y-Oa$daI8}FKobULdFOv+*@no;>4tF*eCkyLuhEU?dXC)ix{Yeb z4P6C3>4&7kSJJ>>gavE{biUX!h*Yc9rT4&9n`;%viX z$8K!Zoccx&S8Vk=HR-n7c!~0&<}~6|=%GZ=_rf~XD$cG;FQ~b0oDMd;0K@D0TbKog zPzZXy41;P7s~)E}ywHt{HB{jHk{B+D(8Z}Tu+j7q1)|xK~-Wv zZWw23ei(wY`7N*ORvT?62r#RDFHW!faw9f3x*n!rqI0YR7`oL)6{m~8*mQb5)U*Oj zWy=GvGCp|P;Dn`LulsE;&S1TyA7Z+GSXJ!ndBY*93cuS2zv|8g8m~98in#fGG*awg zK(45ARl)O`E8?Q?x?GshsiD72=zDF9+z4;SnYPoz0>);??}K|8=|VOzfDLy$nQguf zc2|2%9}<%5cy;M-Ip9qi41!pNz7ue*(}CCBh>Ow%_ZzNsAYPoSkNMZzJ|*ck7sm?f zUJzjY<2*(vwySEn^BjFuNDmF=L*EaZHQ5iEbpxArHRa`Bo znjH=4l+cB7M+?))gCe-BsS>6tkx?bG4Q*r$o<>ggvUcm_D5c(6QL2H~MCzzEN=Ipt zKB|G5=e5Xa<)ZYCu4|i@Z{i8ND|4kz1;Jkm7YB76SFjA?VGy?#Uo%Gf$cUf_4x`HF zjLkHv=`w=?U)5Gpv9Y=q>s|%R8dg0aE67*m&G(&Z`W^QSXt%N5ceL2QCk zosPhcs)wH6P12`CqL_kHb!_(4-u9dM^TMl#b-&ZQ@EAu#-erv8+LSU_ZTpU>I^C`x zI&3OfJ&&bv+QEg@y~>jFi*xG`kZRZIV9A(dY9%k(C`1ljR&otDyyS#0iXhG=bHQG7 zyZsIqACiK+Gi|5lwpGa&I}KIR>ohRyAhtLZWCn2tTfps#6%!)lw%20|Qj#?yf$Tqf`W`g9K?UUaB;thfFw`gc+1wZb8M29unL}Ay~ofmaqaFPUD6H15$&R)D|?k1#exz zen^kF2{ZBopxKDqE47i8AJ}|6v?7z+4ivdN*fkrbe0*x#JwD!c=J53MUK@JG_Bu|( z4Qwb0JLo`f+rG57ys+u_L%SjUeplGATwyazldr4Y%tKdh)_KHp(8-+VF{Kc2b5h;3`&pdY|pq zi_I-hgi!cRmo{}JAE$sb5{XD-Ik7PSQTlEm>u6kwOFTbr((UxZ?P?>GE=QN@cwN~* zY9*7%h=mN;;~^{(n(zb!NCcH4wq2E+qC6xU3e&}Ie88qEP7nUQXgVl^XhvFJ)(_&( z(3iNCX2fT@N}cTHZyr|`my(=UpjjX}DO_n>Ca%ojYf$Bel9bj3Z?^nSVtimd>>_yXNg?$w7Cp6RI#KrKw?+hujcdi_;*n@f^(B6 zX6`Bw`$XFd!dGcZZmpcNFHUOPloQ)uN6BOluFlj~f~7h<0i)kW)tgN>Y+}QPzO7W* z{^0~uch`EpUNUo!+W_TxA@$9qHQA*_vWnfV8l|!nd>~+`1274!&T9HLvT) z?WEm3#z0Z4fjkc+ghh$dXx~xxRl$Kc>jgwp0H!dOZo`+`)apeHtK(w2G|{jbrUI*ghTWTe04hKg)JMOEN)C3IxbatP z@GOW{OvB#e&!ELOVCjy59z(ilSlp*IxzF|q7((~} zLAa7iI7<%1ref(n9>lr9_F((WV0)`cfE?sOG~L1sF-LuPXnIbQl9SmG6-+GrFX92Z z*%;gjwGaTCRvnf+R4~Vg5}2&3yE%9b`rB*)dQ(H9)CWhIFbzJy&PUL;0MQzCT$JH^ z#v`Kf!}q zldsII$rm2DCO@sp0Dq|!>5azx9%TnLtX6hvwXTjVwOVjh-T#PlS~#%{sA9r zBn=RFAWf$KkmQd@CQC%em8%w;O3Cc4nEYdwn^hQbt_pujt<^8#ZEy@^Z><(*)~f`p zGrN5zbtg+zLiNc~eKCPK29_&zPak}Gdcp4L*hcgY48mZ*Vx4yYpUfzWvahJJnPlNm z_A<+b^Fj+bPAhmRb@zSa1@zND0ONSN-p2ka8P)k?4sfD>H z8|DGLVr)h#G4@>K#vG^Tx=WD~hEY)f6aoC_2>M5JgD*g^rbqYb(f?<3bCc21+on)W zMuxEz`BvZx3p)njQg<(>RfIo3evg_eg&7r)PPME^&FG%KsX-Lsxo;k!8i`Wt^u>yA zBa%v+%=iuyC;uRpxk2xtxa&Rq@~(SNb)@y@vYF6stqEnYE7yVCBK5@~t;o~#P zfYx70=9F_jsfK6}T%0hFKact=b3fANiSr&}G(qTeVXw`w&%l#e+YX5y5-_t;+S^{0 z1={WEv_7mXaBMSKQ~NK49Yr6eA*rkO?Z1l2szD`BBVSFl_AGPae04JO-z0NrQ@JP8 z*nf27myBsGTGC5t=9Z9KnrPRuN*_V~5q-GyV75+t6WTj2t+L>GY}%0aI|Yje{{uot z59F%y2k6zNg#owT5Gi=6DQ>Qr=QOOE*~&nR;vF`ip=;f5Oj^pKrN2T;7K2uA-_p2c zQ}u+6k+iHfEfdB=}^HO(RjgV^cV6hH9pu{0(4!R{`m>HgRh}pJ80qrs2}>Yko3=RVW!1jX^a2}k=cX?Z5NNk2EjfL} z@d2h8>)*yu>VBgO=U7dQBRr`fz8KH7!>LUsHtnRm)oI#gM&dJZliHE3HWuz(p^!7Z zs2gq&(pT9n4~}3qlL68*;cpz$+w&ioiMR$i425X?1$+ z_KOehXj}pOIvyb8*mJ_>sli}y{G{T^-sBY@?V4*d`X}Bp-kHj)R2QqPi)Loua9@w# zyiFUs;*pe|lByr-AifHIoU=i0owKj{LEs_0t$acyg2y>1H69Fs_uUSVhD^O>G)cro zRRyrba`pT!0_TkhuOSay6#f)oALic}t>T~aRw_TXcDrCWv(Zu3WO&%?sz{8I)u{~u z4X&WhH=u^%GzCb0oyiR1oDkVK-RabC!?%KLN?Hv0Et1c(NOmk=@@ss`-GCrJhjRis z4iqYgR4L6$55x1sNGiM}*fsz(+g_r``HUx6Ev39|^F6j>ipy@NntxxtOdf?M*^C?Dk!MnPPE#|U1J@&fJ2 z;U-f+HwZ8xxHOs@m4IS0Q2~ge5X~WYWFd@Hj7mwWk(>p*4y2TAAZa;R0L z!>2~_&uSD1FCm{sA%VsRQELuZO(zn;`9dDSoB3$IRRWSKU|!{@48>c*iKXmg8{d{5Hw&fH0__&Shf*8g!q0me+CIRGm7#{cqF=9tR;-U~Cd@KIGBB6PZC;J6a%WkwsK9AfFOho;C9pHTEasc%9=A!62sCBGDA zT3O(ld}!e?E8ohC9O8)SF5Y5GD15*}ic!HM)Yn?r1ow==Iktb#xV1JiL_TR3TO*}f z{Jtn~Lm(D87csAR7ib5_D6>J3F3?5^LQa2v4)4;Vd2GY-;6gYb!Lh+sJOg)UVYC>{ zM+-nNMNvZgi#s}?d%0B>^L$F)kr%oK7{7E=6J?CPj4f40ujSodH@}Vf7dGHJ$p{=+ z*yF(VVl>xUz$V6~M6CliweE?u7A|h&QT7`;g8n+TqgX@~7vB(w19wxpc2)bh)-^uH zxtMA$-w~-UI6OKS9*X9o1JS{q)aVfAw-g;}9j+c3mowz+;-Jbo3U!1zs`N7GLp$gN z_haMx!@P?_Vo4kpN5oOF+{lcU2Hyo|mK0|mSDZN{C>CkKNU=g6~fFeja0j+~px9xqB&TLE0WwGcQ81 zy~lB|C7z>&Re?>!O>KUVK}Z)K>SwMl+mz26iOK1f23Xa9m)HoL>=mNoMiCG~|isBQv zz^3@aw+rk;h0|%{0ex-JN9o4y0Fbk4Uv}3WSn6}C{n-H#YwU-FlYz|iSLde)TOQ5? z9uSltPnR77jM^8ks&Ttcy=kj@vz^YUc>Bb4ce9UgJKR?>OLFq+*aFzF05t^tCw$oJ zWoBdvCzO^)7#`m&*iJxb4v7Bj>9w_!Q|YzYfw1O;IVV)vv#fyB+RALvT{|>2TowA` zfM4`KifIF#H2t>tQHHxpP>qwi!~>!aL`Y z(SSaMZf{m=t4~dBhPQ#f$&4n<9WIR#m)*hlOB8EIiTv;xx(o=4Hp1o<0JY%*;Bdpn zayo5JiJ~y}Ls_C^nIWS^!RhwH~D8El|7?R13FwD?=lo0HyQMM{MyFFSt`SI z!}9Llhrj_budc>=9w>$wA8bRYsr1>Y^pjKRr;;>~pW*a|WwgrP>V9v#$8U00j!XI#@-H~L(#0TfCAE$6 z5|V)HxB15Ukh`t&Vk-obs9EC08%W_xB)NSVUwYN?EzfqG@&kO))bjCdSepKz{0EV ztt?L=)l4F#N(L(P`xI#XOPeyTEqzh&K(S5VV*rK!Yi|I*ahDWSK`SrhsR-Q zAY-0UEaHHS@iYT@C7|X-g`s&Qh8EO8T3)qPL_PeWR0g?6(5iaYGpRD3=?#|9hpoyE z4^Btmc2{uas?1}QvOrQKnImBcT@pS^erh1;A<9LP10;;R%0ndlenTE6;Q@<0N0F{lNU&yA$gYMBFXznE|FX&d5+|Hk{3um zK=LBV2T49e@)F61Nv@E*OmdavBP7>Iu9Ms#`6$UNBsWPuM)GkGXyGk&yMklJe_-xE zlKd0%7GkTaJ_D;(5uvSie9=cRsu~xoRhkSc0z73ey>#*V_2;h3HY?sH`4q_}$(KpK zM)Gx%Z;>e7p)|(l@rZB0)Tx~k8;Qnk1xzcL*!j_-xp)AfP(`>GgWb{L?9Y~WonY#MKSZ4WPNw>lH u&$gsL8P6w=_moM* literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/__pycache__/custom.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/__pycache__/custom.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98c8259a58cb3a3ab29a77caea5325b65bcabd2f GIT binary patch literal 6737 zcmb7I&2JmW72jDdmrIJIB*%6v$4Rz{6N{-z%Sn^EhG0vH?bP`Yg2;gby6$GpS&>U| zxzx-umgFT+LkzDEb%dsXz~{ue~*={@(18B55IQN$l+G z%$u3_{ocGcDwWc;uiNgQU(mF_YZJdb`s;XP8x7Yu>uMd=&{Z$j$f>u{$m6Yd&7jaI zFoMc;t)SQ_2Bk&`-v-Zj=Yn#htomlR5>y*irg_!Q{JR=2aO;uAEw6BxJ7kRo&bG8# z@d-}THft;?Tz4Z^dQnYJEbnu->jZ8;DXX6Fnr^o%pD=(D}$0NT$<7o}XH4wr;gx)attK8&;M+~G`y!c3Kn7qX2@GkH&ui$O*Dxb%@ z$QSq`-X(s4Kf_OA#T-AyPh+IapXF!puJ9#(7Vj#5j-SJOo5g==H+Lr2(1_+~|rLzN=aS zcsTCS*M)7r>-F5gyKu$6;MP0+)`gX6fCoO0+L*oc`sLM`skZ00+7afy_xkLV+v{O` z`Yd(c{rlmbCo#9y>(_hS6~f)WYA^Rz?C;j5w^!g@_rBLX7W_EqbaTgV^+G`uO`ox& z&ajumKC#E`+S|U@<+UktCl39_ZN7Pz;dsyC5l!`aT@Bw$1$Dda*`I+;pQUKnzO;`J zgJ2_1Mt-lAuGvhqo41=qs3!eNcyiezKZRbz?5CQe#W}PrHqh#E9&Ii*(He09Z9cZp z;=2RQ-YJT|M1>AUI@%$VAERHS8TG9LPD2l7ykqsZGdLY^2NQ7mA;ID4z{~;n^9&AV zDt|ixms4=StYav+-)C?J;0TA&xrTxR?wt}IV1?c1vQqJlN<>BbUX&0XYk0=Xj)pV-K!22(_*%y{X*Y0wzRLv+Z6qv<@mkrP{K3m_0@#= z7&WBV-4-OgudiKfhk^Gx+Foma;Px&y_qi)CO1P+tK`^%Iw0|+PcX_d?JVL#{|8;4t z>&vJKr*`#yLWr<4wS(8Dd`8^?5`N@3^|fvYcPOu-Gg|il5~dI*+WH~{n%J$Ti!zQC z6*M&_s?;ay#5^?%XgGm^nan!~Cr(R*gTAxfbz6esLSps(zSs48Uagdpc><%triRo(oTTOyHKZ8g zS!&LpNlFMxK8F;z%=nkCCk`dnNIb9LORk~OiYA-*=Sr;1PO#J2Lthpv=~Y%@3%aSB z_&SNPle)z${owTRvLDNt6q(fI9Xyic6hVPI?2tk#+k~R#5D65cri3|`Xi{(-Kse5A z+JJV_*ukmULls0*%>sblGiP>KJaMKG1kKMAg9HP0QYxf)t4Ca=nF>(62V@@Ay)$wN&W#BeE^%BygCGsu$W|D)7g4v{; z+4)J%=X?9s-BKsZjHPe z<3{$*XK!;2c^gnJY|f~9V~#>DEC_*AHs%6X?u)O5LAL_&0 z(1`TdAkm=Jm$XChV<#^V4m!CQL5*p<3wN}bQnTE_nVZUG*nZ%)JZVQ^Xv+ZUW2wXgm)dnFqhx@*hJq2S16 zz^IgEwk!L{T5Yh!53|ITGOt@HKOgu}dkVyE36D5vM{T!fU$P(g-L8G#v*D`&hgrC5 z`_fUW6-I4OJn*HLO|7O`6di#Gmgmua-xJ>f{i03{smErmkmxrKjYo$VjJ}XZ}fls_a&5Xl%5o3KksfqmZf&brKs`k8kBs1UY%A)th)h7r_NjT&BJW?0Fa@X0sjoO>acOGR+zN zFzn8dkW?I60k5NY83Rs|=4k9JJ9ut}j7i8TBF3(tZ~*ei3eaW&VDF`roI|Fx3Q43` z#Ym3A!;tNsAX`&j2f@iuF*R9$nfG1z29*uKLXySth;U&<7!p@;)--*TZOqa)=Qwbe ze&hv?^JA=#l&@%rz9ptBA4Bhhr5Q?(){N;SRk)AnOj2q-`Q_@+AIpt}fhy16wP7MuAkZPpnctq-PR9?fH zU*eexC5pzGWMWJsWt^mAxnSd?Cmc(ktiZgU{1asU17G<4?OL;t_SCh&LZ5gbuL6#4 zA}xJbfZz)|1@S_J!Ut3zmU_s(VCpV{(#In!(j(lCXq4_OL(9iahWNFki_aq?&PVw} z9XB-d3l9&K?N&b-*TWZ(xXeaTT zINxH!d{m6f*bVzrMdNXPSlN3!Dv{E`;lG7ltHb%d6^!K7h>ekj;bL5gtMS4%1%M(i zftvYv@euOc{p%fV_YVk#aQxn=>@g89HbhIN^$l|E^ve`e#0Q*=H(lI6 zNWWuyGwgZT5PM;(Y;+(!uIe{^ffxhuam8SSk?^{7hOJB#;ph+Pb ze5HZ)ORK9Z6a9&JJ0fR{=dyx*gBGB-TARk%4+l=v1{b%H(?=sQs1L&VR`?0z2O+n^ zF0b3SB0Ip9n1tdY*T9OW*oYX|8#4^NgDtPvh-`KfnS@$Lg0^SDQo5#bBT~j>tk&P0 zo|RyPM?sS^LZyCpqd~vx-2)M%0lvRl5;L`>r_kS`6X~+8YAE%zy#aRN$UzXeV*j9=DfYwR`op105UGIU?)mZ{zwt4~YL&!# z-`l?-L?{w7Ewzg8(^)U0L0LokJsG*MwZYR{yhjRIO?rh*n!RUv%SGK{ir@jfkok0$0}kKqH4J1MkG<|gVO zjZtx2UAr0uI>SNDoH>nRD{@lyHE=CcW}eojr@Q=+3_Tf#A;5WDW zn2{8rwP)#)RzQVv9aj4YoI7!K&TG9Xp>!1MS><-*+ zHiIGu51Wo7KB1K_QFEResz3-T1SsQ3lMlLz3Ci_V&MDre-a0juYlt6FL%f=3RAr3P z1TrnTipDx+=tdru$LwFaVCAyERkEzIW#Cn`N~3u}t80r%A(ZvL8?}+WqZLaSRmF@X z4{`PP(;L7-6pCiMK8XCTtg9@T6uwbXCB`~#WuDSPrF@Oz=)x_tqL=VmaE)SLP>$1> mPd`y8OF5?ynIKJ3nySNE3PD*=npc;oLdD8DiME`pvi|@wfI}z% literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/__pycache__/dataset_wrappers.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/__pycache__/dataset_wrappers.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eac569da2f6560a7974dce6393258fc3d41bb3fd GIT binary patch literal 2170 zcmaJ?TW{Mo6ecBEmYp~)x-@IB4deDO#KYhKEwG1R=;|fKFklFVIl}-C1*3?xEJK!D zl17@qeVGIFU+jHdsBZHP$GO0oo4H4cD5z1icFX_;)cbg)D-AG1m zw^+c_?aBsM3piM2FuDQv@TbkaZT51yY5669MAzn*9Y6Y1<3dIB`P zAtNe0;on5V;0+1LumL&(C$XKa+mDI1lFWz#rC$upT4 zrec$owZ^_m^^v=b`fF=6JIFI_eyPl;<8uC@d&K$-573JhYp>Fbe&Pt~2jiqiKnyW- zL6+XcThJw?WI+YpClh}`xNm}LCA_7lE>iN^d-a)V2x>y)U3w?v#qMi=;Z2$gy71vv zgc4pp{h657q`mN#Ael#kK&1Usg?Y09y4>Be4Lyf&yYSnH^Q_1W=eBwLeSa``KCsPk ztYc%8ZK$|NrS)@J*bq}x2!(ZLy<%p!Zl$Ul*g(sCWTTbmwt>2|r5RL9i}NIpwdVYH z^6tCCaXFQbffni2JT4BCD-r8MJuCI$bSk8QEM#R-%@0@dRuf*=itf#>R1<7@0FT}Q zLg;z~eUO(^AZHoNsgfavtaPNt1I?CY(DJwo}`m z$UKL=T~Ovom#X)0{tyW!84c2y<){^SZf64@$np$X4uIUohr&Rq@HdFZjTai$sk=UR^DIlCOoiW?bNUdAFTQic+dDTf>*0G+&AgcAp6 zdK%|>kNu?a!iIw^0SlBA^F*#MZi>E;=2FVSiITH28SDQ;EIZi5DnJ!OQ`^H#iBVXE zr-Cfv-Gzl6WQBnXm4Ve>jp|jm!#6(1nAvZD03Hb^3lAt=P{20fo&iixpi2XmADdv( zz||xKl;UYmwd>3G!^t|Ja<pI6nqvJ-yt-qB-QqlzCNpx%`4eE$P6@rqV2$oT9L1Cx7)UjVm^gVhPfO*`mb-A>my&`X`Be>2QRUsc`jDMr$j0Th6@?eEZNcZdE3eb^#` literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/__pycache__/registry.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/__pycache__/registry.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da2687960acf0f1e2609899f79f69b90ee814afd GIT binary patch literal 252 zcmX|5&1%9x5T1!CUA4YLuerETucaDj4nh%WJb7E(sV-!bkl7q^?$h*@_S#cl!BeMJ z9Qb}8rB zVJ*QW2!2K`{0x2qNG%RX9KctOIZ-vUdCj=X)jieKRn=cr?WJC?{lWd6=(9Fsf3wFP z7x^nV)Q3m}6Fg-ZAM(r^TAT?>*r}a4L#O7Q)Xlu1$C-3?{g0S%g?Ge+C%u_Hag?U&nA2x&f@>Q$Hny(c*h0Sj&1aupi*%%@T%&14 z5{gM0<hi**KJHUO+1YwB5&|zrWk7SHPJ2C80I+fP}3P1!nN~R;@?cIrBtniv!VHl^8 zQek+H-CtkZDKdExX+A!fM)_KNAR@J ztS%D9mw1~8G_1puk8XbTp`~`V@x|t`K+)qjG3LkrVzB?Aos9kUK}>GZLvRal&`qd) zl+&!VG@n`OthP#P#s%NA)Uvj}=cOgAQ>?=D5YE`D{L(7@Js+-UN4UE#$(uXM<^oA0C6g=%qs6;}{TxN;U(PE<=}zg#F!jL>U5uZKP^mP<2ho&C&j zIbX9||9#EM{%&9JQ;5_(hpfQ)g5aWmmscnEUw?SQ3(X1T$#S9AwLrRNyw-FBwX_hs zi<{u!B>?3IyoSVhP5C%n&ZLGUUiq$CNz@7f7@;{S05k51Y@+Qaaj0jIj9JNU_A81l zX48!SV%>7K7`%d?5SHDs)FCDV!g^F7Y^~Mvh9?6KFX8uE2wp|*etSJll#YvRa+!82 zN?MN{o_h>%ROqIWmaFS&fz?i3M&`kuIth)706zcTz%gBg0y2asA4_HYq?m?!2aG>Q zy7AItJeE(E2y$wRjwYnmPIW6@V!EGVjZ5f_x4m8LOVv1M!CNS4Z1k>igbc5}fi>jb z!7+CO-F+KphutR5SH7$857jx*exeTDjW21sh$*j2D|{fzq{3 zeG_~l4;s!J6_8Oet^G9aD8-5kXUK|6NQ{5=(~YgI>svs<1WG*9lKC`~fKjCC8BMTQ zXEMqMPu2Hfe2{9IYGu3z3ANd#H^CBLC~P>mS;0lpc7e1lV9zP;ND(&D%5>_o7m=~* zVP~o}4t=4*Jj$?5(19U#E*_fJScgqW>FBzJ!76FC@g&1{jg41ltO(|CYEVOfX*wXm z;aQxikC8CH%@?s!(c9ik(l1R zJ;X9i!|)Ma5t=WP5XH1^K^`)73IsOe^g33z({PN}5}SjTY1dt3H7-CthOPkQ7&8(M vfPVOx)_#_TN*|;WNUK?BK1-O}Tx)w|O2ajmyWV(pnQD}xz|-Y)?LPk>7R7Bc literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/__pycache__/wider_face.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/__pycache__/wider_face.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37bb44a0163e619e740be360364b2f9396bb7128 GIT binary patch literal 1551 zcmZWp&5s*36u0L)lgW007J;}77o?zWBEbzIN|ap?(y~;|ilUK_<&K?XhWV=PY}<-O;h-?#JmXcRtpwWbeTg#Jah7Rcyp*zzF| zh8QkTjuWfn?A*!S#OruJ@tMW!A}9yR03+a?A}oi=5F16f^cwRGe2=& zpe1HDbDjr@+dE9W7sx@056%L8`1rfylSkjslPyfP@9Uu%?`F!s*ztz zr>jh@H%}&MT~1|vAt;dPvkbf+Ey3SWLuE>fql-E{qE%HZs=%=9ENI6dQd26kEZC+N zL)h|tAPOnmqMemnOVr{&Ff;){*7$5}YgcX>E`({O4hYGrOc64+v?F=3)ZQ=8s8~rT z`hbwMpi&a@H+nUnuIrM22DDmTZD=)3uNak6+0=4cmW(T&n&^6dy04V%vmqwdNpmFv zaQU7I(Zq}J^@ne_VDdVgv_k-B1lwGj!;%l+1abHDZTV28Fr8{`9KZVXh|4uE%eI3I!>?qxuC@~}ht zM%qTLl}BpSI-QrOb$3=9wnG;D4wf)_diODEEvWS1?ArTvJIR}s9?ou>*C{W{0*ako zbL~R6)mnRNo~_nOd&|0j?vEX9WlRUltl$+bxj2Np#XCU82ihsi^gXI0g zxoM_Q+qG=8lh;|L!>nA9M(|~JsjZW9?KV_MuKg8PqOKK8P*yQ*XBDuDUxIhqG#pe8 z%ThHSi2*p&0Zd-+VqolL)v}h_?^q_a*E8a72Zg*yLm?kW3C^`++=x3S_BRQLca0dm zA*Q|hu&5~`w?^Ddu>1rF!VV7iUj)Aa>_hk>&|C0@un(;3kN-DEZ(AH=;ek1L_Av3L zriAE_kg{f*!tfCxPdBvKpZJ8ZIt8^qKbfTlej5MPgcn)@~$nLbzLBE!hzD$OS4*;U?LG32c7|tgr1uuD}&P@s3PNj#pbmAl9 z;GVe@2)=?b5Ld#yeLXPsm+GqE!ZGrl{+Nq>nw|fffP1cMK=5T>h_B55i8S2_abyKn Jgd_U^{|DjLrJMi& literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/__pycache__/xml_style.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/__pycache__/xml_style.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c48b40985bd2634071d09b617106037997828c1 GIT binary patch literal 2709 zcmZuzTW=$`6(;Adt7X}{Yd7g7ZQ4{tD+jwpAA;?s?yeKGNQ@RX&_GBq7>krVleuU) z_6(0}QvMRD?Q5BW4f+Ki#czSqpbb2zT z@2-E&#wdp{BpY$TFtgxmHsLgfmyMAv9O1rXBS(0`$J_myIc(%%#D;Eu_UR{^5st^! zCX|ggl@?0*5Kys-*KB1a)`Zp8-#C_p;4fKa*DP^X{25!JJ#=eAqynMs<1wv+eiVvk zd4f`Y2N1KGebN8iTJoh;W4;d%^dHu92i>@$N&E}_wKaTS^H|&Cs8SxB2HyPk;JnD> zLts8#TtxZ7cp;+lpqv%uL6!+w$#E4I`Px1Z>*SRO^DGU^YLUv}Z1JY~Sj6LMTx7E+ zJ(?2@>9NcAZgVjV!#IztFdRNki*b~ePlmUB2EKO7ii4AUq;W&-TG7Q%XZlYOr z09}-la=>_>-{+S*-!jvi-2-`9fbtZj{5xREmR1GA)_ld6_WUok{fvF_)zT3>v1;tz z(yc&Z?4(PCS^HM@(yP40uRXN;wVwnlwhZbZX$b1ksG4;Hw!7j<3-#uvj;~FeNo{*3G&l{8#A3+4J2$VXLvt!SUtx@BeW0@$a%|D$9dEFNeu&s#}M7UQ`k7UwP@l ziCsgckTLDPh(&d-y>l5)&#Qr>tyt(_5~nhckn#surrHAxy4uOI@mIQ=#$^>wRFQ@B zmG&lu%A!i!MLE+>QpCA##@RHSDLIMf+B!Pb?krLzwr(mbRTPzWCUGvb9p^ABIj^)A z&1MoVhv+kCt3C$QL6qm~AsQ4f&L>5wePfDJduu}x?R7vboJClNQz1H20eY z+)`c9_Um3+L?Xl(*NNGIw|pDGxWk*9r;T@j(!=8rL*BZ>kZ(q~VahIo_$Y=c|HW|y z2UyMbak}@3GsF*;*oGBOZLK(1VOMVA)ex;A9@K6U5L3Xk zH_K+-+;FA2Y$0N+ZXtfVY8yXsripkRQ}bbU(b7eaVBNB*HU6)L9@J~o1`bkf>MXZJ zd)dPno!4CL;oWVs^550gUwOSHI{OUR-Di7jxeaT})O)7hueYh!m+Un!ckBMOt@gw= z;vri3Zrwx74{!#0^%k)iC!kli4X;mY@5bxjdck%NJ@aZi>5F?AaO*gLf+ zI%4OQC3=_>weHlE0lyB=@1N%D2dD?e+Jn{D^b79vqt=}o`N6r|#Z2y@jxlg*$wGh3 z`P$dG_KDqB&T^;T!D-w-R{NlVx(B#9cV2OpKufhl2TV6aJekDfi?kYfXSp~l=GvJ< z&$P2JIL{(@7Qm0LNKO@r{}m~}5%kk5x&RXk)26>v|YZ^Fm48 zS-UsYfo0;U4@?kaf17Jh{fq|u9>Mnsen9YDf_DghNbnxA(anxAVMLh(%nX+zIQK#Vle>dVgojCY|IYu#|58T9l`y~mqY7p-01w;ZTS z+o;MO+Ue`I)kKSDtUmD8vQVR^$1fk;QJ=e`5BM=IPh6P9sW8;dkTQIclHCr&=ND1B zZZWr>dLPqNe+1~x=4KqrG`+qZ=|05&1rjkMmF6}!e_5y{u)A*1GrmDj`$aii3ooHz zh`t9k9zyD+93G`IlX-PYqU;)%Zr$;%+4B}n#KjFiTPRXY?TyVJG({}efs#|)IBKDY p>3VBjm1?s= min_size: + valid_inds.append(i) + return valid_inds + + def _parse_ann_info(self, img_info, ann_info): + """Parse bbox and mask annotation. + + Args: + ann_info (list[dict]): Annotation info of an image. + with_mask (bool): Whether to parse mask annotations. + + Returns: + dict: A dict containing the following keys: bboxes, bboxes_ignore, + labels, masks, seg_map. "masks" are raw annotations and not + decoded into binary masks. + """ + gt_bboxes = [] + gt_labels = [] + gt_bboxes_ignore = [] + gt_masks_ann = [] + + for i, ann in enumerate(ann_info): + if ann.get('ignore', False): + continue + x1, y1, w, h = ann['bbox'] + if ann['area'] <= 0 or w < 1 or h < 1: + continue + bbox = [x1, y1, x1 + w - 1, y1 + h - 1] + if ann.get('iscrowd', False): + gt_bboxes_ignore.append(bbox) + else: + gt_bboxes.append(bbox) + gt_labels.append(self.cat2label[ann['category_id']]) + gt_masks_ann.append(ann['segmentation']) + + if gt_bboxes: + gt_bboxes = np.array(gt_bboxes, dtype=np.float32) + gt_labels = np.array(gt_labels, dtype=np.int64) + else: + gt_bboxes = np.zeros((0, 4), dtype=np.float32) + gt_labels = np.array([], dtype=np.int64) + + if gt_bboxes_ignore: + gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) + else: + gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + + seg_map = img_info['filename'].replace('jpg', 'png') + + ann = dict( + bboxes=gt_bboxes, + labels=gt_labels, + bboxes_ignore=gt_bboxes_ignore, + masks=gt_masks_ann, + seg_map=seg_map) + + return ann + + def xyxy2xywh(self, bbox): + _bbox = bbox.tolist() + return [ + _bbox[0], + _bbox[1], + _bbox[2] - _bbox[0] + 1, + _bbox[3] - _bbox[1] + 1, + ] + + def _proposal2json(self, results): + json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + bboxes = results[idx] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = 1 + json_results.append(data) + return json_results + + def _det2json(self, results): + json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + result = results[idx] + for label in range(len(result)): + bboxes = result[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = self.cat_ids[label] + json_results.append(data) + return json_results + + def _segm2json(self, results): + bbox_json_results = [] + segm_json_results = [] + for idx in range(len(self)): + img_id = self.img_ids[idx] + det, seg = results[idx] + for label in range(len(det)): + # bbox results + bboxes = det[label] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(bboxes[i][4]) + data['category_id'] = self.cat_ids[label] + bbox_json_results.append(data) + + # segm results + # some detectors use different scores for bbox and mask + if isinstance(seg, tuple): + segms = seg[0][label] + mask_score = seg[1][label] + else: + segms = seg[label] + mask_score = [bbox[4] for bbox in bboxes] + for i in range(bboxes.shape[0]): + data = dict() + data['image_id'] = img_id + data['bbox'] = self.xyxy2xywh(bboxes[i]) + data['score'] = float(mask_score[i]) + data['category_id'] = self.cat_ids[label] + if isinstance(segms[i]['counts'], bytes): + segms[i]['counts'] = segms[i]['counts'].decode() + data['segmentation'] = segms[i] + segm_json_results.append(data) + return bbox_json_results, segm_json_results + + def results2json(self, results, outfile_prefix): + """Dump the detection results to a json file. + + There are 3 types of results: proposals, bbox predictions, mask + predictions, and they have different data types. This method will + automatically recognize the type, and dump them to json files. + + Args: + results (list[list | tuple | ndarray]): Testing results of the + dataset. + outfile_prefix (str): The filename prefix of the json files. If the + prefix is "somepath/xxx", the json files will be named + "somepath/xxx.bbox.json", "somepath/xxx.segm.json", + "somepath/xxx.proposal.json". + + Returns: + dict[str: str]: Possible keys are "bbox", "segm", "proposal", and + values are corresponding filenames. + """ + result_files = dict() + if isinstance(results[0], list): + json_results = self._det2json(results) + result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox') + result_files['proposal'] = '{}.{}.json'.format( + outfile_prefix, 'bbox') + mmcv.dump(json_results, result_files['bbox']) + elif isinstance(results[0], tuple): + json_results = self._segm2json(results) + result_files['bbox'] = '{}.{}.json'.format(outfile_prefix, 'bbox') + result_files['proposal'] = '{}.{}.json'.format( + outfile_prefix, 'bbox') + result_files['segm'] = '{}.{}.json'.format(outfile_prefix, 'segm') + mmcv.dump(json_results[0], result_files['bbox']) + mmcv.dump(json_results[1], result_files['segm']) + elif isinstance(results[0], np.ndarray): + json_results = self._proposal2json(results) + result_files['proposal'] = '{}.{}.json'.format( + outfile_prefix, 'proposal') + mmcv.dump(json_results, result_files['proposal']) + else: + raise TypeError('invalid type of results') + return result_files + + def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): + gt_bboxes = [] + for i in range(len(self.img_ids)): + ann_ids = self.coco.getAnnIds(imgIds=self.img_ids[i]) + ann_info = self.coco.loadAnns(ann_ids) + if len(ann_info) == 0: + gt_bboxes.append(np.zeros((0, 4))) + continue + bboxes = [] + for ann in ann_info: + if ann.get('ignore', False) or ann['iscrowd']: + continue + x1, y1, w, h = ann['bbox'] + bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1]) + bboxes = np.array(bboxes, dtype=np.float32) + if bboxes.shape[0] == 0: + bboxes = np.zeros((0, 4)) + gt_bboxes.append(bboxes) + + recalls = eval_recalls( + gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) + ar = recalls.mean(axis=1) + return ar + + def evaluate(self, + results, + metric='bbox', + logger=None, + jsonfile_prefix=None, + classwise=False, + proposal_nums=(100, 300, 1000), + iou_thrs=np.arange(0.5, 0.96, 0.05)): + """Evaluation in COCO protocol. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + jsonfile_prefix (str | None): + classwise (bool): Whether to evaluating the AP for each class. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thrs (Sequence[float]): IoU threshold used for evaluating + recalls. If set to a list, the average recall of all IoUs will + also be computed. Default: 0.5. + + Returns: + dict[str: float] + """ + assert isinstance(results, list), 'results must be a list' + assert len(results) == len(self), ( + 'The length of results is not equal to the dataset len: {} != {}'. + format(len(results), len(self))) + + metrics = metric if isinstance(metric, list) else [metric] + allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] + for metric in metrics: + if metric not in allowed_metrics: + raise KeyError('metric {} is not supported'.format(metric)) + + if jsonfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + jsonfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + result_files = self.results2json(results, jsonfile_prefix) + + eval_results = {} + cocoGt = self.coco + for metric in metrics: + msg = 'Evaluating {}...'.format(metric) + if logger is None: + msg = '\n' + msg + print_log(msg, logger=logger) + + if metric == 'proposal_fast': + ar = self.fast_eval_recall( + results, proposal_nums, iou_thrs, logger='silent') + log_msg = [] + for i, num in enumerate(proposal_nums): + eval_results['AR@{}'.format(num)] = ar[i] + log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) + log_msg = ''.join(log_msg) + print_log(log_msg, logger=logger) + continue + + if metric not in result_files: + raise KeyError('{} is not in results'.format(metric)) + try: + cocoDt = cocoGt.loadRes(result_files[metric]) + except IndexError: + print_log( + 'The testing results of the whole dataset is empty.', + logger=logger, + level=logging.ERROR) + break + + iou_type = 'bbox' if metric == 'proposal' else metric + cocoEval = COCOeval(cocoGt, cocoDt, iou_type) + cocoEval.params.imgIds = self.img_ids + if metric == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.params.maxDets = list(proposal_nums) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + metric_items = [ + 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', + 'AR_l@1000' + ] + for i, item in enumerate(metric_items): + val = float('{:.3f}'.format(cocoEval.stats[i + 6])) + eval_results[item] = val + else: + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + if classwise: # Compute per-category AP + pass # TODO + metric_items = [ + 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' + ] + for i in range(len(metric_items)): + key = '{}_{}'.format(metric, metric_items[i]) + val = float('{:.3f}'.format(cocoEval.stats[i])) + eval_results[key] = val + eval_results['{}_mAP_copypaste'.format(metric)] = ( + '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' + '{ap[4]:.3f} {ap[5]:.3f}').format(ap=cocoEval.stats[:6]) + if tmp_dir is not None: + tmp_dir.cleanup() + return eval_results diff --git a/CDARTS_detection/mmdet/datasets/custom.py b/CDARTS_detection/mmdet/datasets/custom.py new file mode 100644 index 0000000..aea338a --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/custom.py @@ -0,0 +1,211 @@ +import os.path as osp + +import mmcv +import numpy as np +from torch.utils.data import Dataset + +from mmdet.core import eval_map, eval_recalls +from .pipelines import Compose +from .registry import DATASETS + + +@DATASETS.register_module +class CustomDataset(Dataset): + """Custom dataset for detection. + + Annotation format: + [ + { + 'filename': 'a.jpg', + 'width': 1280, + 'height': 720, + 'ann': { + 'bboxes': (n, 4), + 'labels': (n, ), + 'bboxes_ignore': (k, 4), (optional field) + 'labels_ignore': (k, 4) (optional field) + } + }, + ... + ] + + The `ann` field is optional for testing. + """ + + CLASSES = None + + def __init__(self, + ann_file, + pipeline, + data_root=None, + img_prefix='', + seg_prefix=None, + proposal_file=None, + test_mode=False, + filter_empty_gt=True): + self.ann_file = ann_file + self.data_root = data_root + self.img_prefix = img_prefix + self.seg_prefix = seg_prefix + self.proposal_file = proposal_file + self.test_mode = test_mode + self.filter_empty_gt = filter_empty_gt + + # join paths if data_root is specified + if self.data_root is not None: + if not osp.isabs(self.ann_file): + self.ann_file = osp.join(self.data_root, self.ann_file) + if not (self.img_prefix is None or osp.isabs(self.img_prefix)): + self.img_prefix = osp.join(self.data_root, self.img_prefix) + if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): + self.seg_prefix = osp.join(self.data_root, self.seg_prefix) + if not (self.proposal_file is None + or osp.isabs(self.proposal_file)): + self.proposal_file = osp.join(self.data_root, + self.proposal_file) + # load annotations (and proposals) + self.img_infos = self.load_annotations(self.ann_file) + if self.proposal_file is not None: + self.proposals = self.load_proposals(self.proposal_file) + else: + self.proposals = None + # filter images too small + if not test_mode: + valid_inds = self._filter_imgs() + self.img_infos = [self.img_infos[i] for i in valid_inds] + if self.proposals is not None: + self.proposals = [self.proposals[i] for i in valid_inds] + # set group flag for the sampler + if not self.test_mode: + self._set_group_flag() + # processing pipeline + self.pipeline = Compose(pipeline) + + def __len__(self): + return len(self.img_infos) + + def load_annotations(self, ann_file): + return mmcv.load(ann_file) + + def load_proposals(self, proposal_file): + return mmcv.load(proposal_file) + + def get_ann_info(self, idx): + return self.img_infos[idx]['ann'] + + def pre_pipeline(self, results): + results['img_prefix'] = self.img_prefix + results['seg_prefix'] = self.seg_prefix + results['proposal_file'] = self.proposal_file + results['bbox_fields'] = [] + results['mask_fields'] = [] + results['seg_fields'] = [] + + def _filter_imgs(self, min_size=32): + """Filter images too small.""" + valid_inds = [] + for i, img_info in enumerate(self.img_infos): + if min(img_info['width'], img_info['height']) >= min_size: + valid_inds.append(i) + return valid_inds + + def _set_group_flag(self): + """Set flag according to image aspect ratio. + + Images with aspect ratio greater than 1 will be set as group 1, + otherwise group 0. + """ + self.flag = np.zeros(len(self), dtype=np.uint8) + for i in range(len(self)): + img_info = self.img_infos[i] + if img_info['width'] / img_info['height'] > 1: + self.flag[i] = 1 + + def _rand_another(self, idx): + pool = np.where(self.flag == self.flag[idx])[0] + return np.random.choice(pool) + + def __getitem__(self, idx): + if self.test_mode: + return self.prepare_test_img(idx) + while True: + data = self.prepare_train_img(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def prepare_train_img(self, idx): + img_info = self.img_infos[idx] + ann_info = self.get_ann_info(idx) + results = dict(img_info=img_info, ann_info=ann_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + def prepare_test_img(self, idx): + img_info = self.img_infos[idx] + results = dict(img_info=img_info) + if self.proposals is not None: + results['proposals'] = self.proposals[idx] + self.pre_pipeline(results) + return self.pipeline(results) + + def evaluate(self, + results, + metric='mAP', + logger=None, + proposal_nums=(100, 300, 1000), + iou_thr=0.5, + scale_ranges=None): + """Evaluate the dataset. + + Args: + results (list): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + logger (logging.Logger | None | str): Logger used for printing + related information during evaluation. Default: None. + proposal_nums (Sequence[int]): Proposal number used for evaluating + recalls, such as recall@100, recall@1000. + Default: (100, 300, 1000). + iou_thr (float | list[float]): IoU threshold. It must be a float + when evaluating mAP, and can be a list when evaluating recall. + Default: 0.5. + scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. + Default: None. + """ + allowed_metrics = ['mAP', 'recall'] + if metric not in allowed_metrics: + raise KeyError('metric {} is not supported'.format(metric)) + annotations = [self.get_ann_info(i) for i in range(len(self))] + eval_results = {} + if metric == 'mAP': + assert isinstance(iou_thr, float) + mean_ap, _ = eval_map( + results, + annotations, + scale_ranges=scale_ranges, + iou_thr=iou_thr, + dataset=self.CLASSES, + logger=logger) + eval_results['mAP'] = mean_ap + elif metric == 'recall': + gt_bboxes = [ann['bboxes'] for ann in annotations] + if isinstance(iou_thr, float): + iou_thr = [iou_thr] + recalls = eval_recalls( + gt_bboxes, + results, + proposal_nums, + iou_thr, + print_summary=False) + for i, num in enumerate(proposal_nums): + for j, iou in enumerate(iou_thr): + eval_results['recall@{}@{}'.format(num, iou)] = recalls[i, + j] + if recalls.shape[1] > 1: + ar = recalls.mean(axis=1) + for i, num in enumerate(proposal_nums): + eval_results['AR@{}'.format(num)] = ar[i] + return eval_results diff --git a/CDARTS_detection/mmdet/datasets/dataset_wrappers.py b/CDARTS_detection/mmdet/datasets/dataset_wrappers.py new file mode 100644 index 0000000..e749cb0 --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/dataset_wrappers.py @@ -0,0 +1,55 @@ +import numpy as np +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + +from .registry import DATASETS + + +@DATASETS.register_module +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + concat the group flag for image aspect ratio. + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + """ + + def __init__(self, datasets): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + if hasattr(datasets[0], 'flag'): + flags = [] + for i in range(0, len(datasets)): + flags.append(datasets[i].flag) + self.flag = np.concatenate(flags) + + +@DATASETS.register_module +class RepeatDataset(object): + """A wrapper of repeated dataset. + + The length of repeated dataset will be `times` larger than the original + dataset. This is useful when the data loading time is long but the dataset + is small. Using RepeatDataset can reduce the data loading time between + epochs. + + Args: + dataset (:obj:`Dataset`): The dataset to be repeated. + times (int): Repeat times. + """ + + def __init__(self, dataset, times): + self.dataset = dataset + self.times = times + self.CLASSES = dataset.CLASSES + if hasattr(self.dataset, 'flag'): + self.flag = np.tile(self.dataset.flag, times) + + self._ori_len = len(self.dataset) + + def __getitem__(self, idx): + return self.dataset[idx % self._ori_len] + + def __len__(self): + return self.times * self._ori_len diff --git a/CDARTS_detection/mmdet/datasets/loader/__init__.py b/CDARTS_detection/mmdet/datasets/loader/__init__.py new file mode 100644 index 0000000..87f88d6 --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/loader/__init__.py @@ -0,0 +1,4 @@ +from .build_loader import build_dataloader, build_dataloader_arch +from .sampler import DistributedGroupSampler, GroupSampler + +__all__ = ['GroupSampler', 'DistributedGroupSampler', 'build_dataloader', 'build_dataloader_arch'] diff --git a/CDARTS_detection/mmdet/datasets/loader/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/loader/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9f6fb9052bd7d332e7d38f900bf1cf6bfe5bf33 GIT binary patch literal 361 zcmZ8d!A`^=5Cz(9o6T<0i}4$J5#wHsdN9$Gm%VWzY=>+~pfo@a{Um>>SL4B7*pmbGmz;nM@M#5R3LQ2xX+@x?(vjffXT0Y{a33)as0TCY|(S4@WS!g#J?&v;ejbE@)~&3Y_rf0N zHgz&5?}z<-5DxNTIBcVh!cj81+JiFQ;&318iPEW?>{I&)Ey$LDXP1K|UJ-P@M%G=lb~ z_lC^^EbUDnK<&Ud7^UtsfZ7F62LS4@cnDGd;GT{OF}9|LwFlNecUG&l_FIGpEy6?Y zYlH`)Cx$%W5S6-suuVIFu&ogu?R@(G>9Z$BVsD||2JcMMaV{;+;eWoZa34=0lFBR>O>@M}fooN~oip_Sg_AX=F(uNwJbv%vijZ7NEC;U}?dnwaChn z{U7efY@`D3DWMLR?JF?>5&C+K5sLvncA(7aiiRD{@Z{@F`HjLfUfEU1abr70^1|f zu-?OCJlWAc!IQhZgT3Ls<$u>^A3>xkVG8{7snOKzePuk7okd;Dd0A$aenF%KDOs)A zedrAjc?qjIu6deO$7(0XH!qf@ZSZPY_-RW0PHNfp1BiWl5$V zDLHsMcJqqq_iXpPNtY#6S+jW(cGTmSrFqK3?zg8;zJ78Zoqcoij13p+iH)##4`GD=1;g_5W&i*H literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/loader/__pycache__/sampler.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/loader/__pycache__/sampler.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec07ce21fbfe5a90006466674826e96877d528ec GIT binary patch literal 6004 zcmb_g&5zs073c6nq&`+FuVs6^P7}uHN94NNz;Tn*brZXB;3RDqMH3)}(14*yN=q$8 zYKJ4oYe+!Rb>J9C02c*wkFZ6uYE-LtumnK+|ruf}x?^?I)^w3}L33GI7YXh*euqgO@S39D#VqdM9Rv}<7< z?K-ztLj6sx)A$rC&^n@U!uWQa$630C#>ya4UKr=fi_?Av!rBcqWPEF)qVP?BG)^K3 zsy(j;B;(*md!t|eO%##V6QPFv*!Q)*2#wIZSM3?06;|$PJ@dY1YCQ|9a3qb|dMmJK zB!2X!$H0?6fkJ8f+E7fzRG%85_{b32)SPM_6oujXR!1)y>15=|Xq?1>pBJX|)18h{ zSovfe$-?ollUR9PVdvYEem{viR$=B*(l6}LSAHHTS-~{YrlN)d+pBtBkobA-d4JMA zzqz@cjiO5^)4|@vPd9_T(9bvXah7k6Mq#9)KtX$DeRD1|-%K(;jO1os>a087lT`>` z!IQJ1#G1H!?u*Lp9;`PI=t@7*1g-EDJoz6`OtqmVFDo(B_q7=`Gc-Uk_hCe!S*n7T zy$?FHr4JqS(bG0K)dSAHD5=mWw20m{aP)n#uD!3H)m}jVvQ3;FJy)v!E7HC;Y!C&{ z%KOIIDuZ6b*~-Mn&^_o=1|-;8iIzS)L?mtf-S^ zlqUegh!t$tkagmSL1D*f7za@tU!d}7=Z|5;K1rdrk{ZoiHA5E?tW{4;u>m)A^BgJOd6H&B{HU8glQIf7VQh1pO1futS7K&2n2 zg>l%ZfYKk*Bgu{@>?Xc z;^%5_92FLZK7YAzZpCSkO(3Q|AdRyP=kgqNtwu0da+#VsDGr-5Y%V-LviPxqS#>2Nj((rD4dlsGv{Y;dDq1l#A~vN9mIx^A_v7N3#W}U z$$zeF#0>|%s$?U>+Tu)WZLRFLuFWdo*QeGL>2O-1C^dkrnT7rhI@yOpVn(!9FosST zt2vFtxnsz`BF0%9)10Y!z&l4#4}BrSo*I~iM}*e81}pgtBWUDjQ=3QHJgZH!&aCtf z`ZmG}&pCbcoLJ4JuR1HnjLUhswwM`f;HV6pX~WfFi}Z##PudVui?twj-M5dPjC1ET z?tMb7VQ=Jx(CY^KBfROt%mDY~6n+Rl1qU9A#C(Qyf;#PUU!OS-#qKBYC1)m7dw7bZ z3{O+OSx7ts9q9QwpWiz15k~ASc?m{?6l#TJ^z2pg8Ia^RsOS*$th|U=K2MPew(B5=S;Fb4{Hx$VB;+ z7YeJ)8Wn#$j?z$4WG`xcnT2Ix&nH+Zj$ zCs8)^2KqFGD=r9AZ|dzwxGpE21!$mXMh^#T%7r*w0!#uRkN1ferIBONqp&>>IoI=U zftSPMA?0rSSd`!nNThtClw-C!73tzFpQhqG6%;yn&+;{@9i~{WaVGmZ7$hR6Ex@z*5fr?JCnvL7U`onjD~u%t@|Ybuz?4>44QqH;!g|=i+vdF1EY2)ptmTdP zzr^CRQ#zH((mNHhZ)P=1MCn>{2_q|wcbN?$0Ew8-GSg4z(-7zptl0->&X{9%?f}hUm z?{?kSQ>-A^b^&msi*DoxrKqvY0AO;EQ+Eu!iiIx8IM`gA=E_fl$o07cB>$MPsavKi zd6Aa|*WoRHVY8Wx2XX2r2fHqnMO(fTB$GV89g&LSxi-38H_cRZs zBEN;y|5ilGgo0>o$|WBw)p=bG&?i;-p@cc6zOoPn4To+&ljRO@NaawPjr{T8TfluL z8>QE*Ga}6G zBbZ2uEga0mXAg5&UR$|h_Mt5XS^;HR`jJ~MvThenOkY6As`ku5oYr<6c^XmKK!&CI z`KgU;OE`)#x`(eM)WJn*hD1lURrb(9_5>zF7%+|?vnnAeucqqYAg=?>Db)a)LuYn3Io?!2K+fVRr$>?+n)o#DFx=&Xgg){tAkn^w;v0~7ii9-SBMIa+J& z{vH_yS%=Zte^IMqj#j$CG>daAA;T7N7_(809+vC1c}|QO8s8)IS7k(3*($RRfuI2? zRMYMYuw`f>brIM+gMVR>Xsy%owu3)_{HYYH)33Y{D^DX>oPQ*mXNIs4^lhG_ za<#NqYBO~e7>~y>F02qHI*tp`_XK?d086>BY*Vd)f_1UHlXKihYeoHl2fQeN`u+_R zW)fZmF07WLLX&VIWUYaRK3uc8*D-9!^Okn|+pjh+WcI&g<_{M$e+WcK>%+{4*l=p8 zHqPc0^v9ZkuZpnp5is&ujZvHRfi?vSexU7M09LGi04#{L5`q(XL7Km>-4}ga)*zKS zb6E$ynj7?;$I@u3%0gEF=Tk;F(=6Y$>0YF7w=pI2*r{xsX%zaEz=oV66*Up;B8~3V^ILHzczNy z@0x^pILdZ(c?}X6MdEr$?>@^eCVv7Vcm(y95E1#>=e4VJ=MZG0@k@U}n}JG2(~Xw6 zdwD6(9(N)}V%@7r7Whg25-JBmI_DU9$~Bsc_9QvrED$i`V)=byaD+Jqf`3Ge1rjcA zt*#P{z6a$GsGy(%FiV;16t&M#L6NA8Q8v|<0q)m`aD$2kw3V--iL01HOk2tNeYz$i z=ypU?9FXYh;x~A55~Jw=tA;rTw}7_hr8%%AuuD*ufNcqJ8Q6;RXtxc5!y8rucVcb* zO9RyZ1?4g({X4B0F^v%JQ;$DQ2D<_GqwUR~SMbj1vXC>c#(9UKH618)#L$`#my}k< zaMBh&CUC#X*IfTV^I-m`A2f(I}xCyw2S%7P!W61kW>UOvY zuJO6O4n`jjZ^KzJ_1crpCC89N?= 0: + indice = np.where(self.flag == i)[0] + assert len(indice) == size + extra = int( + math.ceil( + size * 1.0 / self.samples_per_gpu / self.num_replicas) + ) * self.samples_per_gpu * self.num_replicas - len(indice) + indice = np.concatenate([indice, indice[:extra]]) + + if self.mode == 'train': + split = int(size/sum(self.group_sizes)*self.split) + if (size * self.split) % sum(self.group_sizes) != 0: + size_flag += 1 + if i == len(self.group_sizes) - 1 and size_flag != 0: + split += 1 + indice = indice[:split] + elif self.mode == 'val': + split = int(size/sum(self.group_sizes)*self.split) + if (size * self.split) % sum(self.group_sizes) != 0: + size_flag += 1 + if i == len(self.group_sizes) - 1 and size_flag != 0: + split += 1 + indice = indice[split:] + indice = indice[list(torch.randperm(int(len(indice)), generator=g))].tolist() + indices += indice + assert len(indices) == self.total_size + + indices = [ + indices[j] for i in list( + torch.randperm( + len(indices) // self.samples_per_gpu, generator=g)) + for j in range(i * self.samples_per_gpu, (i + 1) * + self.samples_per_gpu) + ] + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/CDARTS_detection/mmdet/datasets/pipelines/__init__.py b/CDARTS_detection/mmdet/datasets/pipelines/__init__.py new file mode 100644 index 0000000..ae55b25 --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/pipelines/__init__.py @@ -0,0 +1,16 @@ +from .compose import Compose +from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor, + Transpose, to_tensor) +from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals +from .test_aug import MultiScaleFlipAug +from .transforms import (Albu, Expand, MinIoURandomCrop, Normalize, Pad, + PhotoMetricDistortion, RandomCrop, RandomFlip, Resize, + SegResizeFlipPadRescale) + +__all__ = [ + 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer', + 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile', + 'LoadProposals', 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', + 'RandomCrop', 'Normalize', 'SegResizeFlipPadRescale', 'MinIoURandomCrop', + 'Expand', 'PhotoMetricDistortion', 'Albu' +] diff --git a/CDARTS_detection/mmdet/datasets/pipelines/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/pipelines/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7369ac5d6485af87ae26b85f82037ecd133bd628 GIT binary patch literal 908 zcmY+COK;RL5P*|?B-!j{-;X{xaNSE6!4&~jOQi~0Rqb-|B{DlEjpT_}u~(w}BQE?P zUpes?dcxyHP$K2i#P)c;*z+pO(zlQI{Ja?iKZ7UF%*F5AtA98^0HF*pWMN2gLpHF< zn%H73Y_m2-EW((@n6SjpH)RJ?mSUH6vB!FTtR*w-v%Zhpa)3iN#1R|en2mA5COBnN zoUs|s*&G*afd}jW580txA3+QWbRdN;^dN&i3}6T&7{dgnFoQWP-~bNcsF<-MIKB&( z&whnYdKudIOyO2(v5Y8`Qf!RPE|C{vt=6K_syn)q5;3{MXm6nl)`2-bW8@C&wsk5$?`-t`>y}qwhgW5FsXpDhSmBw=wVi8KBbVhjVVgG`?Cj=V8FeL0U2e`x zZB$JHHhq%vC#D)Ux)qvo7k8rAhcxUqavvm8H{3DR2_Yi12yH@4a4iwk9YRXz5_$yc zje1HL5hjEoVNMtj`h+ndbGTj}+jz6T3tyCVzR;hUvMTmv{+0G7zCMYuSh|CxW&ew? z#0YJ2zALT zQi^lkR3bZ{f8nS{_2BKRx2JDjO;67<^1K~neSy;)p-?OV$r3I>^3N>sC^oKs7cuF{ zK1K>%|7BAx8ly70OvA&8#Mn!mra)oY1}qf3@OIoBSpO5u*@lVU<(mzhun#9&jvm|w zJFZV`A9vx7Z~2B5y<)K8I{>ykv=Qxe@|AW^!VW&c^6)*T3E}YR)sfX{ZO$8AjjNTh z*PZt~jLw9b^<%bK!Ona!KpI4_-LE<97ql_J~d4`Y-j z)2Y^tcG2DY#`*M?CgPeHyC7XGR4rY28_^Bktrx1j7<oy za?w=kCzSPky-Mpzww9@x7|iXYs-&_VzHUE}sZEWtX0pf^s?2L;CRta9TQ8$k#oe)aICaL2dK|EVBVv8~`U++04zyfduTcUTv`!JKblgU#_V z)FI`48NSCFWBvhqt|b_I`XOG#3?Wblw&mD~Z%B^_DMvzlizh=c84M8@efN=dP8pB` z4nlR;ThH-8xC)T)m~G+m2u~bQK0%b&w7;MZSFiA>cj&ZyL^!42SrBh;dJWGUL|6SW zj=z4~Uhyc7eK$aOWonENgtSg8B}7-afI2I+xBT|IqcB$I^;{F1|DmeYLJRQ&8qE*| zQ6;hmUy<$Hrg5OZM<*8vQ8jW^Vr(}QV!2Ao8zhg=tRIs&BtZqfCn&+1ekFaJCO{E- z;V|sCx;WINc6)749j|O&nzn;osMS129_y}`A9P2hZ$P-9nHAWB?JM`9wZXfZ{?@^L OPSr9Y#pOuU8^OQg(=b#3 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/pipelines/__pycache__/formating.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/pipelines/__pycache__/formating.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..844b362798f65223575709e5fd676340dede7712 GIT binary patch literal 7156 zcmb_hOLN@D5yoquT)rg5mK;Z!Y!rvxH70%)jBnwQ|3MQNff*SCbI&wsY(>_drFwhMQGI2vg5Hj*gW6sl zbDW?OH3rSSCdSpE_OY?Iz?%oH_l=+)G(I+h246h0&&<809NG*P0A0wT%Q>_aECRZi zLsxR>Qm_o@at>Y1p)0{EpsP9bQVzWoTn6-V4!s;$4~@>1&u}86V`lY-{K<&NKHtI1 z;$1KG?j&*Qg)tYOS$az2!v`PU`{2ROy@$R@tMH@V!E*&*OvsSaiz{~HH*%0>WMHJNj4m(`$^2w@sLZFCTt6HwjQU6 z@b@=%c`Ot0c%#{5_&ppAhlxmekWcmm-(p*SmqIHF* zSW>WgVs;p(kC_(-#Xcqs^={f}QGBH2f$%=h9rNm-XOUmuKUN?5AM-Pq}@X3bEB zu}nQkF{>yVvZ_8!X2rv-mJ^yadmB>}ORgE&UL0H!OAc^}q zl8PcMTewg3<*>#s$h#**f_1$_47@aRh+=4w6F`eTHN?x<;`0wS_mcsC6K&idkGy!( z9|xY?l*2@B4h8{FA)+ve^>~w3N}kHiVL0Rwlu&MJ=_>nz)+E1=2j+SHq~b zOvh}QP4gu@O*Lv+Ra0EUH=iaHKRWU)Jn{w_ZtR(Xfg239!OgS-J8;nB&IVQVm7s=O zQqStUi9WDTtLaBQJxcYP7@U|Vh6{^;`6gW==a7$O$I2YZqn`LSW++bRhA7VJt{cW- z>bl>@n^cI+FtRV9b3GLE~^PHM^P#>8)VKQ%=ek3+}DDIew*g510J`}F|BTSsf zH~n>|Qx$E@$eOM@NPcDL|+6_r|iF!1`kkaj6#LPd|_;~c%xjIlIh=4h3mGrdW)5EjEceral-`C3IH zRvT_$aI}_d)%A0&I#3rRu*p%)=$Er2NGS#Rg3qXdiYdBC-EdQd7t<~0x#o%MH2J&K zC?=nCngR9`Jf*^2pF{lrL+ix%X!kZXU!rPCEw+fJi=sP-ZQ!FH^`%;^;6qdcfOj@M5ShH7#y{8HM@FhY14H83yRQTR>5FRbB&^9f=DT7d#*0s!^BdT zuFg<5M~_PC=-v};=hh2v2w@5fcDb-%>r8P<*|N$!t{FDcLYHI}$xKTaqCBhp`RGtI}Zv_x_H z1y-;%e>ifJI2uneWkx^>(I}QkulX@&K{&Yi#x%DTXDHd5`Mr=3N}EZRi)^~O@_)l1 zfK58AinmiZvJ(X2*Wg^~LIe&)aft2g)H#8>rBvq=xZuFtHjZ8omXS}iPwf*}x5JwF z<&!NnPgy$3uX8)M^#~;jh5v-?V6bq_0G^h@v#^aqh5Ghay*60H*A>c5%Doo1b#}74 zBi2Fdc?c*G*;}5G#0Ko{&oEt~ z5UNMhfonwgLm*yCA3api6$r9?|q=?zdX!bdl`V6ZIOMStng|*g6(|IJD z)m%47eB@;eNG?v4eu}SM+D1jQD~?rP76hBJ)HMKZ(IkrErY!ZU3MweL&_P8dSPYg_ zaIuVoUdn2BV8c)(@cs-%S2rdHFe(->DrqQ#jM@Z(S0;GGpTbD05((_I9C9C4+h-^& z9Oia#cR!R2|3?xTVKmmuJqa71A~Pe4P8MFF&#@NPeuJUx5t5k3jIvop4=|x-K^sbx zI+z@F=@nSC&1VjzLF+DwFU6xtn>r{HP_cfeDx&BBxq=ulqt%p0T9 zl?+Fbhx*k9WAG7FJ3`?^SHtLZM4Mizs36CTGK?q2EZ{>P2Pm7xtWBt0I%s=?-KI~< zeS$s2K;_%G+|mUzvSBwhTDk8Hd3%egzF=oaSuYk1N2P3Ho<2*&Avr3KrK4fQ6)lA| zT-#q~&(@jW!4Y_miaArg%HVeCcoWOFVCRBmVrr1X=hf*G?-aH zkRW^-qP#mZ7v)6J?_xShWJVm_zEIqss=8vfLXp*bxeT&K$=|F&=T$s`11p%7wI;PE zb4b8hYmzfxD_L)pce6-}Rl-im@4SrY?__)_R4^PQ>?^qq{bDXekpxYodv$R%$CGU0Eqk4T;zBEfsc|Z90uyaR1sdmKWZmWJIBM8r~R_@ z|K;=azvTUMN&s}2=XmCS5xGhr2oNSo%>D7FH9@Joc#oP})KJYu+@^+vBkrOB{`aYy z&9uZe&3J)Q)H|5>M?5OitY3E=!)!YBrXYCdO1A84=ZlLeH@vQ|{%jyx_?T7n-w`N! z66D>pMfzjH#?TY+jyw`JeIQw8b;0`~{=hI6q+BYR%9b^Zi@ZXcRrOzMREbqkF-#RM zRLZBK2}RL_VwdH^tiG*_yzf$|qzbVutBydcZnmtJUH{gSSzpSZ74(()b$4vXcHEX#+(eu@Qfv1%t?MXpt=6fB(Zp&y!%2&QxF87%1n6DJ zwrH{skv(=EGSfb`{RN%rFWT2W`7ijX-#H*5SyEhZaQ5uM0=s)I-#N>h3k$W6U-!a) zT`-LQ8pkdd{3dSoR}f(cGco#R%VN%M+1xrUhg-Mhaa-y8t*Wquozw<%tvS<(=KAx$ zHH0JFXNGX2g+u$$Y%NZuo~S@tnM#+YQeRXdtxl!O!hB#fYOk;cqxs4LZJ4^ceQ*2j z{d>*3585Q3T{f6a-0B06$Y`0uzZTlVo9j^G3tcj*60wB*p#PkQFqqKilYK zgXjim+CA70(~b6l2-SufW@=+F5K$hr^EgWjenW(LsG?kL4C7&x#A&29@NE&N-Sy#t z_Jbf!<2(o!X*Ik=b9Cj@>epw(%3U}ZPdD%45ty+l%*)utUR8dO!>UCe)PmH$Y#dq> zE1f^;PpsVP+hgm{oY-Td@5tuZ?z_3yudu|zNc)yCw$VD&_I+?}$$8+&ij}6W#e;4z zlu;*sqJ84yw3BJS1FfgwAksEux=IgJFC0d?n#s6e8&z#*YN(xl7N^=74BC&h7Y}3< z3f8#xLX{uj6)PR;%G4&><0#TtA|th*7>zcbQMcULZyO$Rz=zY=XEkU2y@~0oWf0UcpCY>(-@4plARKwC;mP+f zQrCiDkcs_-CYuj}hx=htOf9Q;EH4xJ4v{0pF-O{yG}#<(McVf5n(Z&Ktc`AY(X&n5 zstHm?>3M;=vz_-{{hbU7yAP0;LiI{(}x86c{kE#ui0=5y3gP zh=0WKo+XFyQ4Oopzw6Ga1ru~VXJAI&Q-y6fj4^5Nta%}Z0uq9!O zGe*_`_WTp-HbsrlLCjq?`o5^q=(2~msRz5xH?m+4I-th-JH!4DM@iA37;jo zABXqoqX$rYJ?};Jn83Zh`gF32ZkX45p{l1@{ZW|2qTbExY}19}t3ljNGa2dS^5Zkf z91RFMag+!}UfZ4W1BjA{l)0t)A|cQTK?7*7DL-S4CCp0rQhzL zLq3|DhnBon_#(pBKC~wedhjdcBR5@TKZbYnxKH00=ce$vi3^!Kb}`x;dyK%^YiIju zt{NV4l!{Z8hiN;~P7j-z%EOEk`DiYT2MjUl?-I*0rESjn7F_{ef*gNUMhUs#nmY+W49>w}U?7dt$vb`-c4Vh4I3CVRdY-lKipt8=wK91V;KC zsZ~TO_%b$YO%6Kk^`f}j%eDJB7Vzc|F)tK^eX24YlDiT7zk^#{0qG(k{`uU*q^ZZ| zOXH<=X#d@M=sYL_0U2%+qfl8{>pzko6KiX;$L^-h$AtnZPF~k;k)hhzV0e2MeXfo? zW3+L06!5CnZzUPhwYm*v_OA93_WV@OQ8Np7s{rL6^9qi6`|ltqV!)PniKpGkEg~Gg zb4B>_7ykmOqSDPUufU=n;+JA|^xj+6SeleHM-YAow^|3GvaN7zGA)c#mTpw$euH2* zBp|0+y?OvF6BY}$6=_FxSFF_9~8iZ*cx50D)T<0*WB+JPo zz^kZ-`%yHC-Ub2Z3LPB(T9e)&-{?Z$|Abqedw!u(QC9hJl8YA6I zsGK$Se0h~}H>8%l4vONGpHcaG3w^ijd-6W|mD#(11C;i@>e8R2*R-AD$>}d4* zw2mQES261N4hr+>ujcy~e8*q%tG?$iNE+A1U0yc0P27qKva(wB>5O~knUd6A)eGd# z+cLv34+zsd+jY-6Pp+i<1H|ML1nTtVnSVXN{3o?b8hK8OBNs8u(Zx5WJzWISW=a(K z8#;&-pa;QwpVNb=?p1IkM1h9`3S?BiePyCVL<#qtsQujP0BP_BB8rQ+FX$=tp0hl6 z=$I%xl#h>cx9=g1@UQEjR}qzOaJ1xPVsbL^x$NU2kv+E7MD0A=`YS5yUQzacMXMhD zs(eycsH78tO72kv9w$XrUrVpof6}ODlDVHYuGe?>^LhtaUDTgWxI_fJxq=Rf@;bf; z&HfRM{1fq2`7Rp0_=!@Hq?5y4DKl9(KCAvC8jAl&kkmpc!vje%El*_xDp5|Fi4ILK zakKML5Go}UR*onMjX=4$&jXf3o zjj0_xee!hg`zMpVgA3YAf=5j?Z3;5l0UWrZ)kiSZ4iHTYT~R|)s+nQhSuy#JVYO@6 z6s4~AX1*aaR|hMm?(6V3Y8;RcI00XD5=RnKWFsxJS{*mCQZ_17Q7fI*xv*|oC~XHt zHBA);WZkODpQB2OJ85%lJ-wL2#?6OH38{;w-jlMC);)&UW7oP$l*cjvCo)2E3&@{D zZoqQ+-S}BkiTgn7>2i_P<9x|8HCB+waaD1lMUDo0e#|qSDWTPPUd%;V)IyE5fEcpH zbTnVuFinfP&}oWpR0xF7h;Gp#J$v^*8690I=U7o3H(+8ay3GCs%#^eYGDqZ)Q_iN4 zQ%8p9*A=MCZ;qhEftN$b9KcjVAO!?;6t#2;u=#_XxCebDx^l$KveCA6 zei6&=zbAO=GTPxqu5*C+?t5qo?fT?wbX{HnCS{tAcFG1gwL5xsRCc}McOiCQux{SW zmmN5)pUvl@=GH5;s8oDIz6CyPAccZ%P)P_?P7wh~h`_?4%T3Vej6v;R=vlYZS-;Xw zF>bkqrkv+l`Nq8vGkv(|OuY|;P?v7fH9$_-eGwh9A^Z&iN!R13H}J2Nzr@kUU=T4h zqb)U*14cSLU=;~e*ZG{F94g2rXLs+t>cOvmZ9l#9vgx`%yfrX!?ArlEU1mz9DPEw? zDgm62&70{rIxS{>9z!YSGF4h$lYwq4{SY(IcTGtB|LvMyc@oq8*q3<5Mrm3#d{JT$ zYiWA2$jUw>`8I6J50GFUUh|C2FA-SM2ZC>dLMC(~KiueKaVW7NEyO*AcSbHHzIrx5 s2*NAdKU%l?O(9 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/datasets/pipelines/__pycache__/transforms.cpython-36.pyc b/CDARTS_detection/mmdet/datasets/pipelines/__pycache__/transforms.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b117f390491869be31f1a824d4ad0e0d61e57de GIT binary patch literal 25683 zcmb_^dvILWdEb5PqUB}%p=hN4J4Xj8TjmSQXywr9E6djTx4 z*agqMAVFx6hag2%6S++6$vAH6dQ!KIoyKi5na(8Bq-mVVblSKt_mA}cxwij}ETWxbazmPW>#%>qZiwi}u=i2tVv*3t5-!83}7s{{~ zykfhuUR|h~hF@JB`?%pbUg>qiEBUoud)HhTk6p`N1+JCYbs~1HdSh@Mi(L=It~GBQ zuH&)mWb8WO9f0eB*!5uSI_VvR>%rJ{Dt4Xn4#D+M?0P76J?tHU>yg;?aO^tm9fj-B z*!4*4dd#~AuJ^>Q)3NJu??Z6?Q0#itGha4l?|p}jJwIzk#pk-~8{NRCwb@l_b0dVc zpc);oyAIpf;$nBJ-o5Foc4H%W$3ir?mabgB^1`Ps&%f|;lL;li94r^`1RWT@!LC9# znG3de)XRH?*Ug2TpZAKM^SXg%F2G*$%3?3VUh%48cf2vL_PVi9^2WUhyvyDJZxZi{ zchH-{yXqbC4&y!M9r32|u6Y)!bSx@d^@G;7-*^JW&{ns#-dOen_W_r+z-zq`tj(3n zE`HZmT7es)p21RAt-DI%!ezyGTb+%~P{(z{t{ZgMeK%+}+WwsTEYd-OXdOulHswol zh}Ve|@!VUja79uI8c9@lsnHC(YRwdV>_1rji+$h&u>&tZs zd);j|IcQB)?V>qZh6Y>^ z)?HHFb*ZcVl&F##`&YV%rPx43)Hm%D8541T?w2?7l@zXL#jUfr*WIU|c27u7Cq@)3 z&0SY8Eg(ybdd+>n{BAbdoBsRFPfE;Yk}?Ew&t6o^aZ|H#(r$KVbhn}(*^8f*ug}k( zbua6gqtkFPMfBiC=j%!^m7%u5*a#J(zMuz9+)5lGCv*j}n0YmbT8OExYFX8`{mCS?qS(i1Eq_#xjN}<|=(Mws8Vf^%WvHM3_UZb^M6EYf|~^vpFY@aWU2_AD>AR?uHP3n@808)m78 zH0ou~0Ex@bzXNt?M0sgWRf2uiQWdF%g@F$jEx2JgI}R98iI+wVlXgcg-F6^~|Hjj)l%Km&kF<`B_JeBc__5 zae&4ojL7P2D6+z+h{5EwI-xp^hQmccBcONoSCuj`zow^@xpTZLtN0nip zEe{VBjtLnr>K@wIAnG`clQdiyvjz1bycTNdu&K8(ffmNI?_i~?!i5r@>%O<_FOm%wR5?V7ms!i#2Ub89nQWv6JJ3^Q@yrFE@o0mn(!O)0Z^nUFSB&0gQJ7VtdsW8-00ItD`2XI zbqo`#%WLSNwAD>%mqS@I&*Xvq0gz{5f`v=@rhQETDG9 z`K+ad-Dt-15X!6Wg#lV9WdBF#AgwE1ApC$0uRaVTIym1AFB8PA`<>ADgx}2O)Dij> zl^+1G1u9K*xZTIla+)d<3)c}z6MwU8=Fr|Xa}qXrtQ;6YN+buOrR~n)OZF~A^5`m!{}HPFGT^21Hl(dZoCc`v-Z;l~_*r9taP!>b2k zt!n0?We(xQ=pN2{wf0>zSA>)-vYC^Ty}zg2Vu3N#DVoB0*t)Qb}OE0f>cn{tbJH{h9;hD&Sq1 zHg>_XhC`eKDeH?_7>b$O6Eh8gzRa-{=ZqVNUrA@r8I(Gzh&TdweD+pUe+d|%s38a8CasKA8$VTKshlE%~CKY?^LQ9=Vx0y&(;fBVo-L+h|< zTaz9oG$|l>*t&U#ZEVJEytHH80gXWY354AnHH=(Soqy9@(on+Dz5*xg&fFb93WUuc zHDVjue93ytc+)`mBEZH#_@2$!Xt`S4a?X5Iq1ay6jW)osIU-190vCpWh-rcd&`k3V zN{3ugqPJ}gC}!<=)OLJGq5;($W8ek8+UT}P>&emu&3zP!#q1rJ8ClPodgQ0LLpVGs zhUV~A3OJJ1XRKfp;85&A>1pZs(X}B8klIZ3*Tyr`nte;>}jC#s?|q|HARqg(;-Jh!AGVyX^V$0Tq~+cO%!)cljIR@u?++|wqd%9 zjan^hUZ^E%F^447IU3K>xC{fu&uG{nV52+%V%V=9IFrJ-r(fNHtV?BurrXpsh+Ch?CEuCU zh0od9VpKxO+l?To*I9`U)J?DzsL^VKnhOg9P6e{K6(UFZ8>$X@eVAk;74fQnjZIO5 zVSsdyV)H-H7&_c6a2O@g`D?S|>Qk^p6%0Z+i$IiRVx1`b-y2iX#6)zM#Q4TK@MfW%sm9GjgxdejU zm;fTBMf4C_+cZi6`QC~^_rwyC>DK`+wHwROWVPE=$%)oaG#Q!ZPZV9+TSFx%zdR@c zJ>PQ|Z@Z0HAwuPdCdwr~&7E=|e`qaJ3~E&#)Yn9;TCGWK z;)>r|UI|ZO)_CDc7Fu}lJM1Gpa5!e>L`^X-Bjp{A%k2+e?m*Yp^0JYgju`jEt2-y& z`9Jspp*_WPCSgRmW)}*o@~5e>73oI31n1eJ9$6O|P9Ok>7hyIgjPFI}t;k#%8f{UD zb+0qX?;vo%!Dkc?gBtTcV?LC;Eeq@x3^t`eb7{ns!Lp(pgsfHY2QWM!Brw<+q9qPI z@~Sl`AUvysVL>ey5ELX9FDK#7l8?b9cg~>uQA7P66+o~P*TEgbx$rjQKyKVRbH`XM zdc_zoo&Y8{R-HSLCnciv3g$Z8kGY5--?n1>XsIRw#fgHd z5G+Q8);e>>XCLqy1sDGVLdTG6`@z1xN}$Imv^ZPJbVE^&1YKpcNwBJiR5gBUuxwhA z*4Z!8>jDi@u>`{k9U~+CE}p=JLH<51V~Ofh$k7%h=mONJlYnkh)(~|Yf~^?+bW> zvoHo=L+w3bgRCMiI07HWBl}9jYkU@MiT40(AWV+wW44GE-s-xL^`Ti_cJ&&XJM-u) zowx+jaF<$JVE2UmvW7KtCiP2}KA`eyt#7XDFyTrAtDS-CwQgd~U=b@vKnk+x+1XrQ z^wmf(AgOCAI{*VNN#0567%E%!;TNVTa>1I=3}ml?m;?}sCl(W#L|lt31vu70kVoN< zD{+I!+Q=#Q5mv(Uml{CTF$B>Pmy96v7vB~h8Mu^y2CxSxR-IZPAduhzFiLrAMJA=* z;HlKbzTSb-+ z3e6N@1NCQY*g)kI+;ti`V%kbQ9)o?M#tO?A6wz~Gg5Uj30&Xbv7Xq4Zt;1j1B03=U zdf2Tei5-SL*ObzZQW9W$x99qUa4Y7eTF@~>l!gh~;u_K=3RiQ3-AFzQ)g za`&bR4aH^>tfAOv0}P31abJc6H4KJgss=FQW=GQqGhp9KgAH&aOJ|pnRbc`5Gat4P zM-kS^PF&&8VvC4mK8hR3{DIaa*Vt+W$Wzn?T)TjlFl;o}2Dr}!xGz+Z9=mBki6E7i z(y$=m{HpET3!Oc*q2kLP4*t_atXQo1s>6x zI2Eu<$Ei3aC$H$!#FPFwu`b{V9)dBzFw4}ki=54ULX@**mk{La61orz+w)Xuw9$6k zf1X-1PC1fAt^3%)G86bWHhnCDLl$_64#FP-ZdZBCZLTyr9lwn|5)>Zz3qo%o%FFnE z_ZT2nYI$+gUIgsxp$wXpQg2v!_TyK{rmued*;8q@%sOUP4`Q)85(3s0L;YF_W%B%z#pL*{(1psNZGTN*E3ov%s2S zX2e+yt&=bwTMd>ms#2@g!Dy*BmzGgiVKSB0I-O(y4M)gv6IL>z_9F|AXQW9hHxCb0 z^@l|uj?b_&gdU(jk7)pWj5>^HzX)3f%L5~0x!mK6~qnQvH zm&T9@9>hM$G@qnvZX?vFdGsRNjICm%vJES_cM?q@2tk6-fzKd@1}Qt3_gGVu6-!JR ziz}B<0AV)^wrXe}u8PqI@7O2DElR6EeNYM6vaKVZy5wS)0u)O!5t-r<@Z6o^jssPY ztQ#{eaVzbVp|vaaoHwjqsaM8I8}Q*9))M@QC&3ykvZ@S?La%}n14(ZECcqTdM`JMc z^_?p6E+XALAFQ6*s}6MvJ7Z|oD#|`KZR|j`v|8%bbm|^+DaNaLRxB4};e*!H zbSiKnlBSel)Ky#uS&-a&$F- z#8x$l?Nh9t!MIb@ro^m7Fl4(nP?rxxHR-|faC_-f?9OD5s$m~t7JNM6Yt7~1qA zU@~P%>KP_vH#)bYJT~tsKPoh!DC>B$Q+@D_3}C@oZ>VML7+gthw-%i4(h@e1(Ji*! zS}4j?y!~imITKs95|^zrT1Rw&sp@rF9a&w3i`s?}m4O9XkS;o2uu#+Xq=u2%ii+{a z@B}Bxe2EkM5-fyd$M7e+oHD2EW6-rgqi0XbYIs$aXsb3?#P2mv00dKPDVq~;p=|=X zK5PI>wK6f07-B!j>-}kEYBMQ?5(cwJDZ{=(ZwqD~V*ft|uO4e95QShl)`Q1oO4!K< z<|SLm;8|usBUeZOw_QMmr>%%$a{*kz1W+d(-pvR_jOS z;(h9p_L(`+ZHxV}Ici4=N%SQc9rJM%*91=tE_*w1f%T3A63kdd-XS z&Fm=NtJgSnQDaRG*snC1WElH!LwTBGcQeGEe9eAEb^pG3uq^r~-1l(|li|rp6nmGW zEatxMK7(Q}E9SsUrNFP?1H_Z6@e?tw)#RH@31EhbmCiK5IYRC6fg83(s87KkyWy}8 zlT4F1xCkZ!j0^X?Fi?DhIe^U`6?G06Iz%Ld_=4m~3|&Zalkm_y2r)0u%L;!q4ic4& zG+6ZhqWCsFO#cWRP4=Wh`;)z2 zWgSwo_bz2jXrP(2;|GM*yQthxGgBdjV}RARb4by(i6{NBNEh(zC57Vxq`8nSXPp47 z^#XRmtAf=}M2D`dbi?jT*m~4zK9B9O*jCDw-P@QH7eS|PyBqY!>M;B<=aT>=Uh#An zIh(|?efyN_L-M_yksu+ELRk9fo}9>N^sg0DDt~0|qjF%x#oq81%Romiec-GIPs*qU z4x1pAR!7ncd^mbMWDwg>AZ&;LsSs}xI~{jXaUBqQ$pi5K-5XAHhPrh35xqtTTA0Z1 z&2AgX%UOOC3WgS1Do@*TA~WE$I7NYZ1Ay>=^-JuAd;2zo=BsmHPwZB zR2S;W#2@_;+-hv33XD-?Hmh*m^V?xVH0~g@`U8!c1<3RtNxs}Mxs203azFr)^d}-E z0kSAr)%T5*1<9hJVfHB(d~~pPTny#!N$@|P%VpMzbbNiBo* zuu~Sg5A2?W9^V!_bs;%9PN6j-Pz)BLiL4cLaiUsMMQpad<*UB$O+Zi;O=Pu+L-f1o zFr~D1$@aaUMe+KQlJ*=(N?g=k1RKSmzMaf{BVPal#Cq?&BPZ-$CPgC7N80BalF>|x z@*<_hT~nQcO_VPc%7->3Mz9X5Ez>aAmH>H>yNT0Sv@w?KZbBTgr~Fflg98Jag{5Kd zVztD{V?SOT$3eW(uJsh?HRQ6m9Cr=%Ul6X;7p^QND(_lPNJ#in_OU3>c!jkbLwbcL z6F#nIDN^{lr(KSg*W~xsh_k9R-0ZZtr%Q1Zsh^|qHVx5j z6&IJ)nU&zC`U0K)Dh-YzY|0I8o`GLf!Khu|)F5@wPTnXl^Z+_Q5I=1HZEvhJh6fwSLKnvXl7H2Bm zGgojPQS=)C7o@Zo@azQ_69Os-E;OjfdwO}h;1vKa4nzg$*1#tlzl*G{YKG~Hiy-{| zEpmP$LFr(`9qmR_pJ>JYF$nq)#&iTAg9Z<&-h^lc#`K`D-v`|-V1fQ{!dNMbKz3=j zA#MrmkhKA=*w<)7VryI>g_04Wp$9;S=4|h>%8>BMEM3Np53*%UZ)mZgwd8huEdgSD zri%q#vm%0hA8Q9bIrN8)V7#Z4Vv6s^cb>Kv^7a&C8^&zKwd!kRs`AeMVB2 zA>z}~8L$!BbxkOl`pXy#^C^37a>nwGLL{0e_I6q!eJO{&F3y)m467lu%KQbxi1< zgw7&=mGS-tjUn9zg~BP&pt^%R_~FJnv<;3|+)y;|cy4qg~hx*(@21 zwU$$dxp7IhHgUnr@yc8;PR>TH+Mly|jAW<4c!-fO*CLrJPT}QxML8Aapk>6?D=Y!l zkeNb>XsuSjzJQSj2ex=l1Qt9PlcPeh3UK#(~5vXjTzAg1|#t+I{%8oX2 zu2Q9oRqmqX0tF*?^#CMbW0pQ9gm(d|NPI&TX(|t@$ja7rxI#e+CFC`zcK0biQpI^O zl)WltAL~_nWBZn+vO3lq6J@1&BL`(=g*hPKQk?ICMuBPp`es} zkHRiGhLdbLjaDYjoOKv#)Cr)H31}9kiB7(Q|{4!z}SR&F*r<%YIY`v&BA5rvup8#ZBwhIt|$b}-% zZpdXK*gh~O+Kq{qTAjgmSMW>AD0@DV^nuHMiqfMC%Zjj*|bMa7lIP2Wwn z&ml>H7Ye!$>?EfuSaaz?2om2v;s-iO#6ot^;jVPsasdah2DBS(ees7r@QoZr;eed# zt?p)miUt=k6VHtI_Z6%!3*O!UwG31ySkr`>gj$tA3M!@N=7YuVCQqs_!-7*~sWWp( z7YH)ckYq!z2Lr7Ro@veDew39{x7@Q&yN!piT-tc(R<^@IR&fB>&Ogo8%My*3RKNd~ zU*hk#p85V`^80Qg?nCZhlHZ>_^ZoDq5r2RFOq1-DKfd`qhewVvd)y+B`!Xec4Lble zFaYu~c;JWzt>9qBOTMhAqHh(E>ph~e!NTbS0g*#jc7yr{h&l=@)H#Yt`VEkp?V~RZ z4Y$2Wfi%;>!{P`339;X_a2yUu0hr*RU;-W~1sVWXH-HL&33iHit*zgeon;i$!RdQg zAcatV;SR1N=s7zj_{P*XEBAnt zt{K6V*j@rs`O=Lm61LE*04bO|WAc^nRpqM|7FQkI2mpQzvnJ+PuYyHn`R-L|TgvU4 zZ<%2k`_?Mnlm|h+2lRmJdtd|Ct$2r#E*4N%tJr{ET7w#1m2X_(Eg5oH6Wo?41Sm0a zDJ9FIN8oo47EhC21yrvPyF$0VhV$g9dlDMxV2vlwhr>gaK=-%6 z>sQBiQLf-uSgt!zDggIjBRt;d8RUnQVIuq+{6g+9k*Y2?wD@~UzFK1l)&mNPvA5+; z5UFikH)5{g`b!*X4aBMS#fe);a9VM794RNEfC!&tS*<%*@TWiC+`gH^o0~Vs?^v>I z{7vj&L>qe(9C=|boVbI2<74d~qymJP&Wjy$O z?-`A(DEeq7xd0nV;p*2(x)XMdt{g)p$5b^WR5|t;MtBMjk(l5%!8P%zf}ch_aCQQs zQAZ1YF(Ez$!!ieQrYs_0^pPaz8ER30W05CJ_sBm9#EpIh7i-2lRpxsc2Mau5?>XU;TXsDbWzT zD&+Sbem+4%@F~?2a(BQA&iyFqfgO52L*sAKU<=Gn4&hVvb+|?ay-`73XE>r%^|xqz z8OA~}ruvG_ALTp#t@era?-=*>EG`>aS@4-Mm|A=;aeQ|Su!$Mu`pgZSjBBuL?0L_qMeBMWhRPn=xHeZ%~}iNSsCSEvJ?q=7&e#s6&jq# zQGOG!^huQ#IvFA|PlhV+A|m_-D?kKiU=GwogzK-y4`YBIpUH|Ii(|TlN0073Mc+^O z`YjfHV08Dj|IkWtVR!>3JMTSqV*d_;{2KEpzyf(Thn<&n8yKzZ`wMsm4=s(w`>$;r zDlK5&<-k3Su~1cvg(}D6?yy>vyVzdboWHb3@y8Ahej5fEO&v@ICJuqZ#0n8iI|Ov% zrD*CZb{+zT;0>T6Z-(OOUSUX3{pE3$8omHrFq@u+si0QU>q|JNi1UYafYA6R35O>I zR}BFwIB)2ccpkA{?%BatsF$Wr`j-LW>J=y)PKjKkC_`MelT6s+-(~?CouAE~n8iEmQ>v#FvajQ&=kiE64gSF3Q6VZ0y?L8t!lN zwy>X(%fS1`49+@|XI1TP)<9L#L+5Y1FR3QxwvQo~*v-*9MCz{yY zz(R^I=Y;h2Mu$cY7k&HJEk-%06)<`QAnsK42k=pUNP{hk-KCq$QB4nE9yw#bpvgji zJ%}o?^CFKm#g`xbvm|;{(7x)Q)8&t7kWVE&D4j=Y{cYBqtkam}`T_pf=R>X?aA7~!IT4niZKvye588#!WLWX6)oM7VPwVr%eoTyC6S7M1;*j|2 zlWnw7id+II{sP%RVf(-1J}>MRavMC0id#Q4L1b%Y1TiCqmphoqtq@8h-l~fwr|U8m z>eP1e#Kv{E-Cb@q`^ej%&Wn>A!BUFzT(`5-TE>o~W{A6WLtGEg-Go*R+Z9Q5)5~>* zWT4yJ6I5fL$jWn7Zq@3$8kHGz7gzO$M9?$bk+Lbr0^kVMSr!v&a3r;TiY6{^L0;Gm z*}w@REZr#{8p5>!^gHD);x^%B+-Cdgj^ZFnXB~$jdsZa0_oN&$eiMRGN+(G&>@t{l zKawnAv}nR%Zhu6^6+0v$J0polEt@}bR8@Tr=|uKMcSG+pDB$8yR1TY*ee5NGr8d~d zQ4#lz@(2Yifm})xm9C-w*)0g_yGSwK5EI$xMI0r>wX_hS7FX+Rs;Ed?v#qbSN@^w( zij((0b3z@5#VFw#1lfg9_{<6OXN)+xe=|O#RV;aj(P-x7W+kcWWk>@c6!I>M__@;S8 zHde0sya6X5)5E^sCg7zgTfy5kDd8r#o4lmuUB^9+rg5Buzf*BcYnnGlZJbmR=G6xg zst!jmV9KtvWej)ZxMz}Qu_S#<&!;qXoNTtYTN_ahX#`P0uba#HPwb#FJ@s$UlnYf& zraheSZid1b)IX)W2+nIm93m>{CK~5B6*OPXVcCVtG}{&FK<%G)9p?%uA;KHM7O)v5 zRa_OujQteXJej5oy_)(Gk8Gs*RCvW7zjV41dSuF5k09SF))^! z_*bO-&`X9WKYXvmHGqrM8>^a#r`r?jp>MHW#}O_>+SqcnI)OKih1`jcgI?Q=@Z1_24st2k495cdxex5Ti>mJ12niDLC|(0)NhvBRqLgIGDx{3r1JJ#seA9qf|Sx0cO1_f{vJ zM-_c%kEwshB>tGjI1FeZH`h0A>vO!EF$)s{^)t0apw%p#3k&zl4cl|@^yhriZfPZb zNl>D^RHUuE6>b@lqq?oAIF82kC*&ZF?4p9kKyrbaT!$h@yybMK9Ltdn>9TQ57H>sW zCK?E-MmFcPjYBDN&grzzgQOe5c^JIHn}6= 0 and flip_ratio <= 1 + assert direction in ['horizontal', 'vertical'] + + def bbox_flip(self, bboxes, img_shape, direction): + """Flip bboxes horizontally. + + Args: + bboxes(ndarray): shape (..., 4*k) + img_shape(tuple): (height, width) + """ + assert bboxes.shape[-1] % 4 == 0 + flipped = bboxes.copy() + if direction == 'horizontal': + w = img_shape[1] + flipped[..., 0::4] = w - bboxes[..., 2::4] - 1 + flipped[..., 2::4] = w - bboxes[..., 0::4] - 1 + elif direction == 'vertical': + h = img_shape[0] + flipped[..., 1::4] = h - bboxes[..., 3::4] - 1 + flipped[..., 3::4] = h - bboxes[..., 1::4] - 1 + else: + raise ValueError( + 'Invalid flipping direction "{}"'.format(direction)) + return flipped + + def __call__(self, results): + if 'flip' not in results: + flip = True if np.random.rand() < self.flip_ratio else False + results['flip'] = flip + if 'flip_direction' not in results: + results['flip_direction'] = self.direction + if results['flip']: + # flip image + results['img'] = mmcv.imflip( + results['img'], direction=results['flip_direction']) + # flip bboxes + for key in results.get('bbox_fields', []): + results[key] = self.bbox_flip(results[key], + results['img_shape'], + results['flip_direction']) + # flip masks + for key in results.get('mask_fields', []): + results[key] = [ + mmcv.imflip(mask, direction=results['flip_direction']) + for mask in results[key] + ] + return results + + def __repr__(self): + return self.__class__.__name__ + '(flip_ratio={})'.format( + self.flip_ratio) + + +@PIPELINES.register_module +class Pad(object): + """Pad the image & mask. + + There are two padding modes: (1) pad to a fixed size and (2) pad to the + minimum size that is divisible by some number. + + Args: + size (tuple, optional): Fixed padding size. + size_divisor (int, optional): The divisor of padded size. + pad_val (float, optional): Padding value, 0 by default. + """ + + def __init__(self, size=None, size_divisor=None, pad_val=0): + self.size = size + self.size_divisor = size_divisor + self.pad_val = pad_val + # only one of size and size_divisor should be valid + assert size is not None or size_divisor is not None + assert size is None or size_divisor is None + + def _pad_img(self, results): + if self.size is not None: + padded_img = mmcv.impad(results['img'], self.size) + elif self.size_divisor is not None: + padded_img = mmcv.impad_to_multiple( + results['img'], self.size_divisor, pad_val=self.pad_val) + results['img'] = padded_img + results['pad_shape'] = padded_img.shape + results['pad_fixed_size'] = self.size + results['pad_size_divisor'] = self.size_divisor + + def _pad_masks(self, results): + pad_shape = results['pad_shape'][:2] + for key in results.get('mask_fields', []): + padded_masks = [ + mmcv.impad(mask, pad_shape, pad_val=self.pad_val) + for mask in results[key] + ] + results[key] = np.stack(padded_masks, axis=0) + + def __call__(self, results): + self._pad_img(results) + self._pad_masks(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += '(size={}, size_divisor={}, pad_val={})'.format( + self.size, self.size_divisor, self.pad_val) + return repr_str + + +@PIPELINES.register_module +class Normalize(object): + """Normalize the image. + + Args: + mean (sequence): Mean values of 3 channels. + std (sequence): Std values of 3 channels. + to_rgb (bool): Whether to convert the image from BGR to RGB, + default is true. + """ + + def __init__(self, mean, std, to_rgb=True): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + + def __call__(self, results): + results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std, + self.to_rgb) + results['img_norm_cfg'] = dict( + mean=self.mean, std=self.std, to_rgb=self.to_rgb) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += '(mean={}, std={}, to_rgb={})'.format( + self.mean, self.std, self.to_rgb) + return repr_str + + +@PIPELINES.register_module +class RandomCrop(object): + """Random crop the image & bboxes & masks. + + Args: + crop_size (tuple): Expected size after cropping, (h, w). + """ + + def __init__(self, crop_size): + self.crop_size = crop_size + + def __call__(self, results): + img = results['img'] + margin_h = max(img.shape[0] - self.crop_size[0], 0) + margin_w = max(img.shape[1] - self.crop_size[1], 0) + offset_h = np.random.randint(0, margin_h + 1) + offset_w = np.random.randint(0, margin_w + 1) + crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0] + crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1] + + # crop the image + img = img[crop_y1:crop_y2, crop_x1:crop_x2, :] + img_shape = img.shape + results['img'] = img + results['img_shape'] = img_shape + + # crop bboxes accordingly and clip to the image boundary + for key in results.get('bbox_fields', []): + bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], + dtype=np.float32) + bboxes = results[key] - bbox_offset + bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1] - 1) + bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0] - 1) + results[key] = bboxes + + # filter out the gt bboxes that are completely cropped + if 'gt_bboxes' in results: + gt_bboxes = results['gt_bboxes'] + valid_inds = (gt_bboxes[:, 2] > gt_bboxes[:, 0]) & ( + gt_bboxes[:, 3] > gt_bboxes[:, 1]) + # if no gt bbox remains after cropping, just skip this image + if not np.any(valid_inds): + return None + results['gt_bboxes'] = gt_bboxes[valid_inds, :] + if 'gt_labels' in results: + results['gt_labels'] = results['gt_labels'][valid_inds] + + # filter and crop the masks + if 'gt_masks' in results: + valid_gt_masks = [] + for i in np.where(valid_inds)[0]: + gt_mask = results['gt_masks'][i][crop_y1:crop_y2, + crop_x1:crop_x2] + valid_gt_masks.append(gt_mask) + results['gt_masks'] = valid_gt_masks + + return results + + def __repr__(self): + return self.__class__.__name__ + '(crop_size={})'.format( + self.crop_size) + + +@PIPELINES.register_module +class SegResizeFlipPadRescale(object): + """A sequential transforms to semantic segmentation maps. + + The same pipeline as input images is applied to the semantic segmentation + map, and finally rescale it by some scale factor. The transforms include: + 1. resize + 2. flip + 3. pad + 4. rescale (so that the final size can be different from the image size) + + Args: + scale_factor (float): The scale factor of the final output. + """ + + def __init__(self, scale_factor=1): + self.scale_factor = scale_factor + + def __call__(self, results): + if results['keep_ratio']: + gt_seg = mmcv.imrescale( + results['gt_semantic_seg'], + results['scale'], + interpolation='nearest') + else: + gt_seg = mmcv.imresize( + results['gt_semantic_seg'], + results['scale'], + interpolation='nearest') + if results['flip']: + gt_seg = mmcv.imflip(gt_seg) + if gt_seg.shape != results['pad_shape']: + gt_seg = mmcv.impad(gt_seg, results['pad_shape'][:2]) + if self.scale_factor != 1: + gt_seg = mmcv.imrescale( + gt_seg, self.scale_factor, interpolation='nearest') + results['gt_semantic_seg'] = gt_seg + return results + + def __repr__(self): + return self.__class__.__name__ + '(scale_factor={})'.format( + self.scale_factor) + + +@PIPELINES.register_module +class PhotoMetricDistortion(object): + """Apply photometric distortion to image sequentially, every transformation + is applied with a probability of 0.5. The position of random contrast is in + second or second to last. + + 1. random brightness + 2. random contrast (mode 0) + 3. convert color from BGR to HSV + 4. random saturation + 5. random hue + 6. convert color from HSV to BGR + 7. random contrast (mode 1) + 8. randomly swap channels + + Args: + brightness_delta (int): delta of brightness. + contrast_range (tuple): range of contrast. + saturation_range (tuple): range of saturation. + hue_delta (int): delta of hue. + """ + + def __init__(self, + brightness_delta=32, + contrast_range=(0.5, 1.5), + saturation_range=(0.5, 1.5), + hue_delta=18): + self.brightness_delta = brightness_delta + self.contrast_lower, self.contrast_upper = contrast_range + self.saturation_lower, self.saturation_upper = saturation_range + self.hue_delta = hue_delta + + def __call__(self, results): + img = results['img'] + # random brightness + if random.randint(2): + delta = random.uniform(-self.brightness_delta, + self.brightness_delta) + img += delta + + # mode == 0 --> do random contrast first + # mode == 1 --> do random contrast last + mode = random.randint(2) + if mode == 1: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # convert color from BGR to HSV + img = mmcv.bgr2hsv(img) + + # random saturation + if random.randint(2): + img[..., 1] *= random.uniform(self.saturation_lower, + self.saturation_upper) + + # random hue + if random.randint(2): + img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) + img[..., 0][img[..., 0] > 360] -= 360 + img[..., 0][img[..., 0] < 0] += 360 + + # convert color from HSV to BGR + img = mmcv.hsv2bgr(img) + + # random contrast + if mode == 0: + if random.randint(2): + alpha = random.uniform(self.contrast_lower, + self.contrast_upper) + img *= alpha + + # randomly swap channels + if random.randint(2): + img = img[..., random.permutation(3)] + + results['img'] = img + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += ('(brightness_delta={}, contrast_range={}, ' + 'saturation_range={}, hue_delta={})').format( + self.brightness_delta, self.contrast_range, + self.saturation_range, self.hue_delta) + return repr_str + + +@PIPELINES.register_module +class Expand(object): + """Random expand the image & bboxes. + + Randomly place the original image on a canvas of 'ratio' x original image + size filled with mean values. The ratio is in the range of ratio_range. + + Args: + mean (tuple): mean value of dataset. + to_rgb (bool): if need to convert the order of mean to align with RGB. + ratio_range (tuple): range of expand ratio. + prob (float): probability of applying this transformation + """ + + def __init__(self, + mean=(0, 0, 0), + to_rgb=True, + ratio_range=(1, 4), + seg_ignore_label=None, + prob=0.5): + self.to_rgb = to_rgb + self.ratio_range = ratio_range + if to_rgb: + self.mean = mean[::-1] + else: + self.mean = mean + self.min_ratio, self.max_ratio = ratio_range + self.seg_ignore_label = seg_ignore_label + self.prob = prob + + def __call__(self, results): + if random.uniform(0, 1) > self.prob: + return results + + img, boxes = [results[k] for k in ('img', 'gt_bboxes')] + + h, w, c = img.shape + ratio = random.uniform(self.min_ratio, self.max_ratio) + expand_img = np.full((int(h * ratio), int(w * ratio), c), + self.mean).astype(img.dtype) + left = int(random.uniform(0, w * ratio - w)) + top = int(random.uniform(0, h * ratio - h)) + expand_img[top:top + h, left:left + w] = img + boxes = boxes + np.tile((left, top), 2).astype(boxes.dtype) + + results['img'] = expand_img + results['gt_bboxes'] = boxes + + if 'gt_masks' in results: + expand_gt_masks = [] + for mask in results['gt_masks']: + expand_mask = np.full((int(h * ratio), int(w * ratio)), + 0).astype(mask.dtype) + expand_mask[top:top + h, left:left + w] = mask + expand_gt_masks.append(expand_mask) + results['gt_masks'] = expand_gt_masks + + # not tested + if 'gt_semantic_seg' in results: + assert self.seg_ignore_label is not None + gt_seg = results['gt_semantic_seg'] + expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), + self.seg_ignore_label).astype(gt_seg.dtype) + expand_gt_seg[top:top + h, left:left + w] = gt_seg + results['gt_semantic_seg'] = expand_gt_seg + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += '(mean={}, to_rgb={}, ratio_range={}, ' \ + 'seg_ignore_label={})'.format( + self.mean, self.to_rgb, self.ratio_range, + self.seg_ignore_label) + return repr_str + + +@PIPELINES.register_module +class MinIoURandomCrop(object): + """Random crop the image & bboxes, the cropped patches have minimum IoU + requirement with original image & bboxes, the IoU threshold is randomly + selected from min_ious. + + Args: + min_ious (tuple): minimum IoU threshold for all intersections with + bounding boxes + min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, + where a >= min_crop_size). + """ + + def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3): + # 1: return ori img + self.sample_mode = (1, *min_ious, 0) + self.min_crop_size = min_crop_size + + def __call__(self, results): + img, boxes, labels = [ + results[k] for k in ('img', 'gt_bboxes', 'gt_labels') + ] + h, w, c = img.shape + while True: + mode = random.choice(self.sample_mode) + if mode == 1: + return results + + min_iou = mode + for i in range(50): + new_w = random.uniform(self.min_crop_size * w, w) + new_h = random.uniform(self.min_crop_size * h, h) + + # h / w in [0.5, 2] + if new_h / new_w < 0.5 or new_h / new_w > 2: + continue + + left = random.uniform(w - new_w) + top = random.uniform(h - new_h) + + patch = np.array( + (int(left), int(top), int(left + new_w), int(top + new_h))) + overlaps = bbox_overlaps( + patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) + if overlaps.min() < min_iou: + continue + + # center of boxes should inside the crop img + center = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask = ((center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * + (center[:, 0] < patch[2]) * (center[:, 1] < patch[3])) + if not mask.any(): + continue + boxes = boxes[mask] + labels = labels[mask] + + # adjust boxes + img = img[patch[1]:patch[3], patch[0]:patch[2]] + boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) + boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) + boxes -= np.tile(patch[:2], 2) + + results['img'] = img + results['gt_bboxes'] = boxes + results['gt_labels'] = labels + + if 'gt_masks' in results: + valid_masks = [ + results['gt_masks'][i] for i in range(len(mask)) + if mask[i] + ] + results['gt_masks'] = [ + gt_mask[patch[1]:patch[3], patch[0]:patch[2]] + for gt_mask in valid_masks + ] + + # not tested + if 'gt_semantic_seg' in results: + results['gt_semantic_seg'] = results['gt_semantic_seg'][ + patch[1]:patch[3], patch[0]:patch[2]] + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += '(min_ious={}, min_crop_size={})'.format( + self.min_ious, self.min_crop_size) + return repr_str + + +@PIPELINES.register_module +class Corrupt(object): + + def __init__(self, corruption, severity=1): + self.corruption = corruption + self.severity = severity + + def __call__(self, results): + results['img'] = corrupt( + results['img'].astype(np.uint8), + corruption_name=self.corruption, + severity=self.severity) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += '(corruption={}, severity={})'.format( + self.corruption, self.severity) + return repr_str + + +@PIPELINES.register_module +class Albu(object): + + def __init__(self, + transforms, + bbox_params=None, + keymap=None, + update_pad_shape=False, + skip_img_without_anno=False): + """ + Adds custom transformations from Albumentations lib. + Please, visit `https://albumentations.readthedocs.io` + to get more information. + + transforms (list): list of albu transformations + bbox_params (dict): bbox_params for albumentation `Compose` + keymap (dict): contains {'input key':'albumentation-style key'} + skip_img_without_anno (bool): whether to skip the image + if no ann left after aug + """ + + self.transforms = transforms + self.filter_lost_elements = False + self.update_pad_shape = update_pad_shape + self.skip_img_without_anno = skip_img_without_anno + + # A simple workaround to remove masks without boxes + if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params + and 'filter_lost_elements' in bbox_params): + self.filter_lost_elements = True + self.origin_label_fields = bbox_params['label_fields'] + bbox_params['label_fields'] = ['idx_mapper'] + del bbox_params['filter_lost_elements'] + + self.bbox_params = ( + self.albu_builder(bbox_params) if bbox_params else None) + self.aug = Compose([self.albu_builder(t) for t in self.transforms], + bbox_params=self.bbox_params) + + if not keymap: + self.keymap_to_albu = { + 'img': 'image', + 'gt_masks': 'masks', + 'gt_bboxes': 'bboxes' + } + else: + self.keymap_to_albu = keymap + self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} + + def albu_builder(self, cfg): + """Import a module from albumentations. + Inherits some of `build_from_cfg` logic. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + Returns: + obj: The constructed object. + """ + assert isinstance(cfg, dict) and "type" in cfg + args = cfg.copy() + + obj_type = args.pop("type") + if mmcv.is_str(obj_type): + obj_cls = getattr(albumentations, obj_type) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + 'type must be a str or valid type, but got {}'.format( + type(obj_type))) + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + @staticmethod + def mapper(d, keymap): + """ + Dictionary mapper. + Renames keys according to keymap provided. + + Args: + d (dict): old dict + keymap (dict): {'old_key':'new_key'} + Returns: + dict: new dict. + """ + updated_dict = {} + for k, v in zip(d.keys(), d.values()): + new_k = keymap.get(k, k) + updated_dict[new_k] = d[k] + return updated_dict + + def __call__(self, results): + # dict to albumentations format + results = self.mapper(results, self.keymap_to_albu) + + if 'bboxes' in results: + # to list of boxes + if isinstance(results['bboxes'], np.ndarray): + results['bboxes'] = [x for x in results['bboxes']] + # add pseudo-field for filtration + if self.filter_lost_elements: + results['idx_mapper'] = np.arange(len(results['bboxes'])) + + results = self.aug(**results) + + if 'bboxes' in results: + if isinstance(results['bboxes'], list): + results['bboxes'] = np.array( + results['bboxes'], dtype=np.float32) + + # filter label_fields + if self.filter_lost_elements: + + results['idx_mapper'] = np.arange(len(results['bboxes'])) + + for label in self.origin_label_fields: + results[label] = np.array( + [results[label][i] for i in results['idx_mapper']]) + if 'masks' in results: + results['masks'] = [ + results['masks'][i] for i in results['idx_mapper'] + ] + + if (not len(results['idx_mapper']) + and self.skip_img_without_anno): + return None + + if 'gt_labels' in results: + if isinstance(results['gt_labels'], list): + results['gt_labels'] = np.array(results['gt_labels']) + + # back to the original format + results = self.mapper(results, self.keymap_back) + + # update final shape + if self.update_pad_shape: + results['pad_shape'] = results['img'].shape + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += '(transformations={})'.format(self.transformations) + return repr_str diff --git a/CDARTS_detection/mmdet/datasets/registry.py b/CDARTS_detection/mmdet/datasets/registry.py new file mode 100644 index 0000000..974a4fb --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/registry.py @@ -0,0 +1,4 @@ +from mmdet.utils import Registry + +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') diff --git a/CDARTS_detection/mmdet/datasets/transforms.py b/CDARTS_detection/mmdet/datasets/transforms.py new file mode 100644 index 0000000..ff575db --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/transforms.py @@ -0,0 +1,147 @@ +import mmcv +import numpy as np +import torch + +__all__ = [ + 'ImageTransform', 'BboxTransform', 'MaskTransform', 'SegMapTransform', + 'Numpy2Tensor' +] + + +class ImageTransform(object): + """Preprocess an image. + + 1. rescale the image to expected size + 2. normalize the image + 3. flip the image (if needed) + 4. pad the image (if needed) + 5. transpose to (c, h, w) + """ + + def __init__(self, + mean=(0, 0, 0), + std=(1, 1, 1), + to_rgb=True, + size_divisor=None): + self.mean = np.array(mean, dtype=np.float32) + self.std = np.array(std, dtype=np.float32) + self.to_rgb = to_rgb + self.size_divisor = size_divisor + + def __call__(self, img, scale, flip=False, keep_ratio=True): + if keep_ratio: + img, scale_factor = mmcv.imrescale(img, scale, return_scale=True) + else: + img, w_scale, h_scale = mmcv.imresize( + img, scale, return_scale=True) + scale_factor = np.array( + [w_scale, h_scale, w_scale, h_scale], dtype=np.float32) + img_shape = img.shape + img = mmcv.imnormalize(img, self.mean, self.std, self.to_rgb) + if flip: + img = mmcv.imflip(img) + if self.size_divisor is not None: + img = mmcv.impad_to_multiple(img, self.size_divisor) + pad_shape = img.shape + else: + pad_shape = img_shape + img = img.transpose(2, 0, 1) + return img, img_shape, pad_shape, scale_factor + + +def bbox_flip(bboxes, img_shape): + """Flip bboxes horizontally. + + Args: + bboxes(ndarray): shape (..., 4*k) + img_shape(tuple): (height, width) + """ + assert bboxes.shape[-1] % 4 == 0 + w = img_shape[1] + flipped = bboxes.copy() + flipped[..., 0::4] = w - bboxes[..., 2::4] - 1 + flipped[..., 2::4] = w - bboxes[..., 0::4] - 1 + return flipped + + +class BboxTransform(object): + """Preprocess gt bboxes. + + 1. rescale bboxes according to image size + 2. flip bboxes (if needed) + 3. pad the first dimension to `max_num_gts` + """ + + def __init__(self, max_num_gts=None): + self.max_num_gts = max_num_gts + + def __call__(self, bboxes, img_shape, scale_factor, flip=False): + gt_bboxes = bboxes * scale_factor + if flip: + gt_bboxes = bbox_flip(gt_bboxes, img_shape) + gt_bboxes[:, 0::2] = np.clip(gt_bboxes[:, 0::2], 0, img_shape[1] - 1) + gt_bboxes[:, 1::2] = np.clip(gt_bboxes[:, 1::2], 0, img_shape[0] - 1) + if self.max_num_gts is None: + return gt_bboxes + else: + num_gts = gt_bboxes.shape[0] + padded_bboxes = np.zeros((self.max_num_gts, 4), dtype=np.float32) + padded_bboxes[:num_gts, :] = gt_bboxes + return padded_bboxes + + +class MaskTransform(object): + """Preprocess masks. + + 1. resize masks to expected size and stack to a single array + 2. flip the masks (if needed) + 3. pad the masks (if needed) + """ + + def __call__(self, masks, pad_shape, scale_factor, flip=False): + masks = [ + mmcv.imrescale(mask, scale_factor, interpolation='nearest') + for mask in masks + ] + if flip: + masks = [mask[:, ::-1] for mask in masks] + padded_masks = [ + mmcv.impad(mask, pad_shape[:2], pad_val=0) for mask in masks + ] + padded_masks = np.stack(padded_masks, axis=0) + return padded_masks + + +class SegMapTransform(object): + """Preprocess semantic segmentation maps. + + 1. rescale the segmentation map to expected size + 3. flip the image (if needed) + 4. pad the image (if needed) + """ + + def __init__(self, size_divisor=None): + self.size_divisor = size_divisor + + def __call__(self, img, scale, flip=False, keep_ratio=True): + if keep_ratio: + img = mmcv.imrescale(img, scale, interpolation='nearest') + else: + img = mmcv.imresize(img, scale, interpolation='nearest') + if flip: + img = mmcv.imflip(img) + if self.size_divisor is not None: + img = mmcv.impad_to_multiple(img, self.size_divisor) + return img + + +class Numpy2Tensor(object): + + def __init__(self): + pass + + def __call__(self, *args): + if len(args) == 1: + return torch.from_numpy(args[0]) + else: + return tuple([torch.from_numpy(np.array(array)) for array in args]) diff --git a/CDARTS_detection/mmdet/datasets/utils.py b/CDARTS_detection/mmdet/datasets/utils.py new file mode 100644 index 0000000..9f4f46c --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/utils.py @@ -0,0 +1,68 @@ +from collections import Sequence + +import matplotlib.pyplot as plt +import mmcv +import numpy as np +import torch + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + """ + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not mmcv.is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError('type {} cannot be converted to tensor.'.format( + type(data))) + + +def random_scale(img_scales, mode='range'): + """Randomly select a scale from a list of scales or scale ranges. + + Args: + img_scales (list[tuple]): Image scale or scale range. + mode (str): "range" or "value". + + Returns: + tuple: Sampled image scale. + """ + num_scales = len(img_scales) + if num_scales == 1: # fixed scale is specified + img_scale = img_scales[0] + elif num_scales == 2: # randomly sample a scale + if mode == 'range': + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + elif mode == 'value': + img_scale = img_scales[np.random.randint(num_scales)] + else: + if mode != 'value': + raise ValueError( + 'Only "value" mode supports more than 2 image scales') + img_scale = img_scales[np.random.randint(num_scales)] + return img_scale + + +def show_ann(coco, img, ann_info): + plt.imshow(mmcv.bgr2rgb(img)) + plt.axis('off') + coco.showAnns(ann_info) + plt.show() diff --git a/CDARTS_detection/mmdet/datasets/voc.py b/CDARTS_detection/mmdet/datasets/voc.py new file mode 100644 index 0000000..0e3162f --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/voc.py @@ -0,0 +1,66 @@ +from mmdet.core import eval_map, eval_recalls +from .registry import DATASETS +from .xml_style import XMLDataset + + +@DATASETS.register_module +class VOCDataset(XMLDataset): + + CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', + 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', + 'tvmonitor') + + def __init__(self, **kwargs): + super(VOCDataset, self).__init__(**kwargs) + if 'VOC2007' in self.img_prefix: + self.year = 2007 + elif 'VOC2012' in self.img_prefix: + self.year = 2012 + else: + raise ValueError('Cannot infer dataset year from img_prefix') + + def evaluate(self, + results, + metric='mAP', + logger=None, + proposal_nums=(100, 300, 1000), + iou_thr=0.5, + scale_ranges=None): + if not isinstance(metric, str): + assert len(metric) == 1 + metric = metric[0] + allowed_metrics = ['mAP', 'recall'] + if metric not in allowed_metrics: + raise KeyError('metric {} is not supported'.format(metric)) + annotations = [self.get_ann_info(i) for i in range(len(self))] + eval_results = {} + if metric == 'mAP': + assert isinstance(iou_thr, float) + if self.year == 2007: + ds_name = 'voc07' + else: + ds_name = self.dataset.CLASSES + mean_ap, _ = eval_map( + results, + annotations, + scale_ranges=None, + iou_thr=iou_thr, + dataset=ds_name, + logger=logger) + eval_results['mAP'] = mean_ap + elif metric == 'recall': + gt_bboxes = [ann['bboxes'] for ann in annotations] + if isinstance(iou_thr, float): + iou_thr = [iou_thr] + recalls = eval_recalls( + gt_bboxes, results, proposal_nums, iou_thr, logger=logger) + for i, num in enumerate(proposal_nums): + for j, iou in enumerate(iou_thr): + eval_results['recall@{}@{}'.format(num, iou)] = recalls[i, + j] + if recalls.shape[1] > 1: + ar = recalls.mean(axis=1) + for i, num in enumerate(proposal_nums): + eval_results['AR@{}'.format(num)] = ar[i] + return eval_results diff --git a/CDARTS_detection/mmdet/datasets/wider_face.py b/CDARTS_detection/mmdet/datasets/wider_face.py new file mode 100644 index 0000000..b83e3d6 --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/wider_face.py @@ -0,0 +1,42 @@ +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv + +from .registry import DATASETS +from .xml_style import XMLDataset + + +@DATASETS.register_module +class WIDERFaceDataset(XMLDataset): + """ + Reader for the WIDER Face dataset in PASCAL VOC format. + Conversion scripts can be found in + https://github.com/sovrasov/wider-face-pascal-voc-annotations + """ + CLASSES = ('face', ) + + def __init__(self, **kwargs): + super(WIDERFaceDataset, self).__init__(**kwargs) + + def load_annotations(self, ann_file): + img_infos = [] + img_ids = mmcv.list_from_file(ann_file) + for img_id in img_ids: + filename = '{}.jpg'.format(img_id) + xml_path = osp.join(self.img_prefix, 'Annotations', + '{}.xml'.format(img_id)) + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + width = int(size.find('width').text) + height = int(size.find('height').text) + folder = root.find('folder').text + img_infos.append( + dict( + id=img_id, + filename=osp.join(folder, filename), + width=width, + height=height)) + + return img_infos diff --git a/CDARTS_detection/mmdet/datasets/xml_style.py b/CDARTS_detection/mmdet/datasets/xml_style.py new file mode 100644 index 0000000..39d5704 --- /dev/null +++ b/CDARTS_detection/mmdet/datasets/xml_style.py @@ -0,0 +1,86 @@ +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np + +from .custom import CustomDataset +from .registry import DATASETS + + +@DATASETS.register_module +class XMLDataset(CustomDataset): + + def __init__(self, min_size=None, **kwargs): + super(XMLDataset, self).__init__(**kwargs) + self.cat2label = {cat: i + 1 for i, cat in enumerate(self.CLASSES)} + self.min_size = min_size + + def load_annotations(self, ann_file): + img_infos = [] + img_ids = mmcv.list_from_file(ann_file) + for img_id in img_ids: + filename = 'JPEGImages/{}.jpg'.format(img_id) + xml_path = osp.join(self.img_prefix, 'Annotations', + '{}.xml'.format(img_id)) + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + width = int(size.find('width').text) + height = int(size.find('height').text) + img_infos.append( + dict(id=img_id, filename=filename, width=width, height=height)) + return img_infos + + def get_ann_info(self, idx): + img_id = self.img_infos[idx]['id'] + xml_path = osp.join(self.img_prefix, 'Annotations', + '{}.xml'.format(img_id)) + tree = ET.parse(xml_path) + root = tree.getroot() + bboxes = [] + labels = [] + bboxes_ignore = [] + labels_ignore = [] + for obj in root.findall('object'): + name = obj.find('name').text + label = self.cat2label[name] + difficult = int(obj.find('difficult').text) + bnd_box = obj.find('bndbox') + bbox = [ + int(bnd_box.find('xmin').text), + int(bnd_box.find('ymin').text), + int(bnd_box.find('xmax').text), + int(bnd_box.find('ymax').text) + ] + ignore = False + if self.min_size: + assert not self.test_mode + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + if w < self.min_size or h < self.min_size: + ignore = True + if difficult or ignore: + bboxes_ignore.append(bbox) + labels_ignore.append(label) + else: + bboxes.append(bbox) + labels.append(label) + if not bboxes: + bboxes = np.zeros((0, 4)) + labels = np.zeros((0, )) + else: + bboxes = np.array(bboxes, ndmin=2) - 1 + labels = np.array(labels) + if not bboxes_ignore: + bboxes_ignore = np.zeros((0, 4)) + labels_ignore = np.zeros((0, )) + else: + bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 + labels_ignore = np.array(labels_ignore) + ann = dict( + bboxes=bboxes.astype(np.float32), + labels=labels.astype(np.int64), + bboxes_ignore=bboxes_ignore.astype(np.float32), + labels_ignore=labels_ignore.astype(np.int64)) + return ann diff --git a/CDARTS_detection/mmdet/models/__init__.py b/CDARTS_detection/mmdet/models/__init__.py new file mode 100644 index 0000000..c1776bf --- /dev/null +++ b/CDARTS_detection/mmdet/models/__init__.py @@ -0,0 +1,20 @@ +from .backbones import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .roi_extractors import * # noqa: F401,F403 +from .anchor_heads import * # noqa: F401,F403 +from .shared_heads import * # noqa: F401,F403 +from .bbox_heads import * # noqa: F401,F403 +from .mask_heads import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .detectors import * # noqa: F401,F403 +from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS, + LOSSES, DETECTORS) +from .builder import (build_backbone, build_neck, build_roi_extractor, + build_shared_head, build_head, build_loss, + build_detector) + +__all__ = [ + 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES', + 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor', + 'build_shared_head', 'build_head', 'build_loss', 'build_detector' +] diff --git a/CDARTS_detection/mmdet/models/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e7cd38a49c907c89b8e523e098aecf4a05c5670 GIT binary patch literal 814 zcma))yKdVs6ow_+vM!e7TjHddv$#VAq-)XCu?3_^VFQ+jf(s$(5Vl}Z3P>A>AE|qX zJ`2}QeT7UVEg5h-MS{Nn@aP=sC#jE~=iI+;;$KZo`>mZ_zKp-fhyRoS4fI@FRjSw8 zsI^&Zt5&1dcCBWu)=?X|O&#RW4(d=Bxzs}*^^s4zs7rgOr>k!?^FAG*0Ue?ty+9ZA z5?#^}8qq6sMX%AdtZ9J(Z7{(C8yx6>3m*8;g&y?RK6(d(NE;9T=+gRF7yA3LDeQ;I z^yy(S4^+4!^IKlj&-)jD#CKPL|;eKZcW;EUKjaZ4pJX$DW0&@K`73 z@{@g*15V;}o0J6+PBkt_x)sAJW@U!S%Z|nAu4JNLCHW?11n`E$aOyda^OEz^+(33j zwK91w8fz&S3T*{b!BVgl9EFa8tKcc9<6~Wgp2U27gY1)Cjtu3LBmWk+Agt@7u<$;mYh^Rw~px literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/__pycache__/builder.cpython-36.pyc b/CDARTS_detection/mmdet/models/__pycache__/builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d97d3586f7203285d57d121c996d003492a1de85 GIT binary patch literal 1661 zcmb7EOK;Oa5Z+xoiQ}Ztl=o8zA;KX+aN$rD`iKN7N~O97WC=yCcbk~{QFb>HMd8#+ z^a2t;gTLe}C-@0HG2?AY4&)F=J3H~t?l<%8H-0`h=e|521b-|-evyd>{*zZQ)F~KF zIE_g{eM-@1F-t7p0^j0xY$qkZl$8B)Qt>N^<2y;!uO_bVCN;kXF;{t+S5B#4=MJx) z5`T`nyaw|;uk$&W7x+A1fO(NG@+Fv;_%d(6yv(oh6@CSFHh9S+&FjA*6VjwPOH;7( z`@<;ay#tvgz3`y_o8n?6uWqbwZg1>#J6>Mub~d-Ye13Q5O|SEAZ+CrjZ)exb>)z}2 z-Og6;b!UAGg3MHYyW@GVr@GbI>lAU_5GlaNz8Lsly#S-hh-iAkwg-Nli-TYo>s}!HDtDylM@q|MS%=W`we}!O#51sI|9BXr z?eLffs;!PP)lL#Fv*e zgfZH!+0l^uDJ3l#G&KmM;Jtwe_}vMt+p}X;r(<2vhae9oK<>;Af-0F(&;>}GDfMUq z1gpCIDxli%1`5TkfMXP=*@%wdi|~zM*EbnxFR#K4L@9oyaz_iL(Q8^plGt9k&7)Am zn?jiP;ElYG=Km5k-J8iL5Hh|`q@vJyx7%FFOK{uaz_eejX!q7oN3kmU&vX`cf<%={ ztU==CN@QIkD-x43>7n)u(%uy#Ev literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/__pycache__/registry.cpython-36.pyc b/CDARTS_detection/mmdet/models/__pycache__/registry.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05927f36d46d43b73e35496f627cfeced47acfb8 GIT binary patch literal 400 zcmYk2O=`n15QQbjN$mX5Yj_hwyDd$fM39magOx?`#weDEO>8SjHZ<#=rdQIot6m|i zPV7P}q4$l3(eOqO)9Gk&&6tZ3`a=Jf1@SW+y#e8f;{ug9#uy3Atq&Yqz#gy%w1GCT z59|X6zyWXw90DDn!(EEJ(KmEM9yU%Sj=8SY#W)9+o({4S#;!zqil<8EiFmISOKYj( zvCfzhJjn#(#_qI`S{sLpTHH!@o@>Ts3D``>&9IMtL-w}5o-E7vf5HJi^7+L zyzQf4wGSg|CUg@-WSwkCu!f=zgV&H!c;l|gp0stJNZmohR;~*_lcl%^t&W$5RepNm sO#AvQwclC*N4c!J*e@j)h4$6&sPi*S`v}RV5Kr*mNY6kfmV^KH4?;X~9smFU literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__init__.py b/CDARTS_detection/mmdet/models/anchor_heads/__init__.py new file mode 100644 index 0000000..798b1bc --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/__init__.py @@ -0,0 +1,13 @@ +from .anchor_head import AnchorHead +from .guided_anchor_head import GuidedAnchorHead, FeatureAdaption +from .fcos_head import FCOSHead +from .rpn_head import RPNHead +from .ga_rpn_head import GARPNHead +from .retina_head import RetinaHead +from .ga_retina_head import GARetinaHead +from .ssd_head import SSDHead + +__all__ = [ + 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', 'RPNHead', + 'GARPNHead', 'RetinaHead', 'GARetinaHead', 'SSDHead', 'FCOSHead' +] diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0640674d319981fe7e7b3163949d0fbab3896c49 GIT binary patch literal 634 zcmYk3yKdVs6oy61wy2w}xKH7w0@AfeYP3i?1Zfd(XCVk5#uh+IhA0nS`aXT40$Mxu z6*85FN@7XShyUEjU*xAePi|ih@(&AwzrpB;nPSa{9#(iBiQ9bPOaLYJa$!6yAgi(^~cr^us(YF z`S>d1`^_ato>W)W^0kz?(m0N`+iz5AC(cA05~hSXAtG=B9ax(XW)6?*6{OO;6^FNB zSna)IF)q9>2=Vi-u|qd4(ew6L40XNW4DAbP>wOMAWC9k{GfI*v;MU^ML-zVe z2&uIYzk@b+vB%3pd1#EfcC7d3ldQ|#8Ko`lv9YBws5-Ty108f}8dTbrqu;Ql5LI1u cLVP@)d;ZyVoV2>3d$TKtj1?@qydh`*0i(sD!TY<6}|GvmJ%mX97|fuk{wu+#B$<#|&dO!H`yr%uTw(yjYzKS>f0)%NykF;2C>pG>4$Y`7L zYqc%;Ee-6pjg%RcW2f!NT2@qv-L@N7+tt`>dvUE@i|g%rywYAld5PIkBVKK<%5*te zi`U!hx)!bvHg0OnVU>p(tAzGob7D^P_7*d4Yc2N}bJ1G5a9ICdKkW~bJ!GoMkjFvf z_mh6svc<}^q_aQdH^U_4K{n)~7D$Gl1-ut#!ewEU1sCt!86Jvid=O=QKNyXo2cj-B zohV3CKZ#S}c89zZ`rXl`i^ql<*S&MlkC-0~(-Z@j-h21j_1hhq)AXqoiS(b52(`A( zw6?)?NXuX*vskHTGMkl|gKtG~nAL^`#Vj|sOuOYqm#OzicXquqM9M&||yor6)SOz+Psr zJk%l08hEcu-W%*&?40D)*+tf3uRhe7DC}fSRpF_2X~z5P{}@RXJ=KIH?Wg^t z(_2w#QU1EnZDH6`qn$6g&ud+NudF4Wll95taEjh`dDPWg@Q-`6iKbL|Q~% zC31nt>mcpwf*9cID!!7j@Bgj#_3vERAI9ND(4_a^AV@BB9e!4t-JWM@%4~jL+R1_ zHR<5VN$pP0NQVv+J@h4cy@NM>6C~3HIOzkN?g4acU~K@~Or*9gmM~++2d=@{HTU@Q zXwBC_T4iA*iLhbQ?pJTX<743YzZd zYx>cd#l;Y$@zs8wzMv-BWl*$%PAdPz&{YnQL*CL@Nogr~(rL_6%K<;eBzj$@Oqe{g zOVCc=gXYhtV@Y`l{ZLd$fzr;9hoVAS;*WU9P6$%e0oCpYoWWBjy(s(xj8@2$4$INb zf=rid&Spz9C6UP4lD$?^#VlvDD}&3ggJ7n#o1#R~&xSI2M%IKHJFI>0)8k%uYXfg>5) zkF`OGTmy{q^B-#xIN~fkbkH8A`nrZ5%RO!E9R6Kq(>UnyuRk%ym9YzM_3-g*oLr70 z`-Rb${Bo233v5bvl|6H z3E$>Lvjxsf#Pl$etx5?Kkr)5e8S9U=KQ%tHZZ8<3mLZJY*M;uGGOVlHkX*7~h*zC% zvQ}TaZ_(m9!+7*Ir;ce9TRVDn;cUYhP8;sNCE;57HWJfjqB07gqx-=qY%beRi<8TS)m;@SWMJQ7wX>_?(1CF4uUv^JG$N*plT zT2YzpkZ=etAdCAQrQ~8$q4!0(Sh1LTKAN^znRo6>hD}`o2|Cj*+pCLWle1_~ubc|- z3tOU+{o;FH==(yy&)>t^+FsE=-DdWea`$0hXtU)Gz&^i^>YO%0R2Rhj$6%zmyK9>3 z=#FK%o^>kJ5gx)Q^!@D$}q)C;IRtpFD&G z-+;PB4=%v1D=k2+aKz-;xeIbkmb0a|-QZr&?*<**`K6aW{vSL%q-v*-R!Nj`|Dm%e z%D|ZM0d9ZfzvmdJRaIX5JH#bX2s;Yy0LzG){Q8As6rOZU#VxAHP0Lp4lx4#V0SXBU z0aDFQw>KZ(IrCo-MqDv*n)u2noUv58;EP&v4+4uMz+u$~+5|Bzrhm`aDeNQg5uL2PCL-4Jdw6Myw*014u#0iwY%Q^I;LoLW`s3T8;S-1sIw_L)V1Sw(3EBuyHh9Q-W zh+Ub@Stivy`R`LX%|$e3<;9x#M^x4(Lg0ws18J|&5d}mp!gDFE*iy&Tg4fi73uRKg z)kSSvWG&UI&Gin~60wj?5cnX8wO!g3NE|b0ORYni`aZF-xEG?{U!p~N6-2ARxEdBz zuu&)=si8GfM+0i==$6qi)+Apk>6Ci)80aXRylK?OJzJb~XY9kH_C^)Z;$fD$1 zC-_j=$Iu1_P&uv^X&Y(pu?f>oDqV&Os|Y}OTmx9M0Q+=+wHpN8o>o0tqJ$&l^`+{a z2ONRw*8)cLiG zT&L90pCX}j)HrG_=qPzrrGuww>XIR$yNz7Uq9IXhcueboL0JG=Fd#NA#lUi>&F~YN zm%JFxX85z&47LB)uzZ4!PK-&a%TF2eU*=;fxTIX8CqpNNTWyt;Nx4sbd;lU==Rzy) zUi=dHt@;ULv0xU+>r%j#e@q>Cada=5V!n12Wq{pMsoV9L^D6RE@9Fv{^ZX&rf;=Q( zCBk;YG>8hYWJv?X298S zMwa|0*-Gg*f1&PgQfcXSWkHwk8pG$6GTt>3%RodTJx7X6w=lewuk^zYppxX#%!zqj z`%3@xGHzGyn0%u< zxR$s`RT#p2P}k~Giz>5FzY1LDvJ&%$bRaKvAVx85cJlzc!%WGL3 zEeJDE2(y8ka0%GJ82}fM(a9@FDf4p`fylMNI$@PDf~A=AUm-Z^3^ssK^yeUPSz19U zp;apX8Cq}RmFuLiDKG~0u%1_Xv>G6ZaV6VAF9^8~&fwj~yE7>%yg_JVic1MIs3?&H zr(_{S89yaa%mPNhWc^s0FGWJCy-Zrq%oR+SNBxm9Zau2mB_aVT`E5~7`zj<|PKRB< zkwgA%VwaTKnqVM)JsjQTWO)~SJE^BA0V8+z0Yn4*9+rgn{VYsU{8Uz<^ihadI44*r zDl+imXZu{3SOP@XTe)>YXfFma&ZeO6|pW{ubL(Mq{ z9Vabc(_e^h8^GMhcsA|2n8MaRU#D2;x~v$I7nEJ6a3(x6Zw(|$xGCV*l#C{B6KwqDK4eI zPL-tJsv$-AC8evpvtpGD$cCgdMLzVk@ovB1lpG7M>3qxCa2)Ry$MBjm#NRqYOX(ia zuYDNEKu@9s9ES+;?;*{9NDN1w-Q6VNzbUhE+_|^gL6%UHaAo{?m*RyoOwJ{=l{z2+ zEgfsQNKux*<>JQ$6_5u6#eJVY;2w3eqOv&YO0dGdB?T;}IhVlPZwu!wRr769{gk%V QGe`g~V%AM^C}v&%U!~FkkpKVy literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/fcos_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/fcos_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..709522007c3a5507cc32e2130e89f73a59ceb2b2 GIT binary patch literal 11299 zcmcIqON<=HdG6Qr^z=M-W_GyTr6_7c%M{15mJ%r|w#@JmQj`Tq8zZ!>cp|1Voa&vO z;k;PgOYLrY6h%XuRFVZ0AaDR9Cj&VINOVe$0dmYKNPqwV`Vz#+g|{4X2y}?@{eN}$ z%q~}k^O`~b{nuaBRn=AhyZTbO>|XtByM5)Vru~by@XG<{toL1d0_m|()L_xR@HR1Y3zcDjsdh3MHZ)?rs6U;-~Z0d1wG?v5m!0U~AVbh9> z!<|9c^V*ZiU@xw4p)+U)fj1fkaar_&us!Pd7w_I3@5b)dSay7GYjWx069e6$H}~#N zZy>zEI0#V6ed~?uH*Yu1c=>Lx9qyI=`Yb|4MuE)-;8^3bkq6o}ktOTsUSUGZ0Q;CXzOM?2T zMq0l{qX-QfU~b9=#=q69$GKo<;!8;^7(1TgU=K2bw_&mHgSgrm1m1Wj^g7$^(a0YJ zaj`d=7s^|Hd!eoj0qWfIh1UVs2XQ_KWlx}&iuAi!lE9PgQP&S7ZFlVOHlY8wz!*#2 zk|O$}(3c}T5j)hC*V*dE4#_fOrLD;;uX+BceRtrCxTGdNXz%$lXqI9t@CRG+OBh0a znZP*$FAz9Sph@5>1YRWYRRS*&*Z_c77d-Xyp7*EPXYX9t9uNJCfTQl-PJ48rvnSfY zgcm|440#bWdqg$)DVU>g^qsIk?AFgi=a>R4*9Qaj!9M+>(C#_HIsl7-MYX>9^R~Ue z{w~d_glF=&%G`7AfEc-w(uKW1UCOBD0<}tSBl5y|NTb(oYBOjg^igLyaa^3Aw1w!6y7QflEz;6WUH&39UjoRMle9Z&q1(6ul8r8gyHlF#AtYv||ABGe zg52nH9iz)4S~ZQ>*mybCJ$VTw1zrXAnR|HvOVSw+Cs)3UazOpt+J1Asp=6Fc#jqQf z2c*}6D?rkxn#-{<8pW=%8*hUg7f243R23jh37TVhQp}z4dThhK`6D5TMHUvoCR21# z$mcRlmZ9XkA=UJQG-)emN=0ER0yb6V3 z2BCoUcnhmp2K#0gx@~Fm{MNp?QKMD;t-XS(LR3 z-o--!X%I~>>2F-XOZo=`qFq9+%Er*1V7NF&j3^K-o$)c=e zH0X>a+`djGMJ<-sqy^bH_=mnYvdeN4g9}`&e>9R5qlE2+J)pa}qVJzPx}M4QU@Xjv zhYjGNrUL|VX+8&tuO2(>_IlyCQ@}v8gL|%!jX(+8jB%F2eBX)6z{-qOfK?b<%4&ru5|0Bg8{S}o%ZmvL`kw#BF%RinmZ?IdE019N&Jszl3c+H@tV z@%?19#P?OSEk!5s)LPUK<~{2ZU49VNx$ab0j!s1@Gh_M;V<&-~M*B+iOmsS0i`M!} zGsG+h*7R9WtI=w-h8AZ=)o6ukrq4#Jd_NP`q7%`x(HT)ZAjU0kW@?b6nbaU#Eb>^{ zVl~Lf_ce45CHMnlO-n47Df4Ku`Q$%8|NL|Cffe-jeaQ1SK#cRm#KZRFC_jD^mA{QE zHD14vaUVHo#~Cd)#)oD|qb^e)eMllf`0p1EB1x6 z`Q8`I=94d&4RIOoAq7V~o2r`4WAcsCUDK8SrN_4PCt#?QyX?uhHwsz#P*Bkub`#T) zZOxCfaT6a7QFi+QbZfg4jwM_yTUkEoqah5`5yzbuy&jtbe-u})2Z1li9e+d0v5eiW zA9`v6aZYehXbb;AuY=%_J)N*E(L~-s&v6c;b++N!K>6oKv-p~)*0mep< zQ_Wg=y)FVCKjsh%5(;&{?+2}Fdob_@{sVuIzMGwNRbt(*W*4;TTZ48O`iRvs>oBLy zt;C!*f5TQKeOC3{YNX{uBV?_THFgjbArupc`)IRK;NVAbo8TWSil)8Wpr4&$eVZz^FCa!g5=>je7Eb4fKL%soqfK z{qiAw+96d*zZ$O8JqrF6g;0A8TMP)xbmTZNSJ6YjE zSDU)KKMHfObL)RI}7yEBc17Fkr(OZ$a;(s zMIKhNlHd;TCCtZ05~2i)T7VT@f zkExU)8}_)w9(QU69NDGEw+ZwJ^aJrB#u1_(Bn#`P?K_|+PK_mEGRDW^qDGuhsOMfdUnAH39l*U zu;xZWvd#FA+OkSw6Q=m`uRsi*2hf}b)QD0|mR>hjP5e1naG+=`C>}(E%g1o`t<3MI zET)5t1F-+lztHc8wm9tfLzA#J!M*s2zI!t?5I`azgI=+2YM5H7G0Ix+g^2o?}{6+!}^K^ylRwB1+@LL#D zmnjqj6~j9EL7KR~jH`j`L{t>cfsJ=NIbDrTMyp!}H1E=;a=2tzA< z%5I3>5Vyjxz3Z`PqEO~&GZD^!wXXu?j}M-+1VnImO}y~nxs4O-}m6*1n~6=gp!ASRbNM`%6|tKc`gKPhIOQn+{8ms&zJQ5Q^%-|M5%UYZRtlz19Dkdfl#sw3c=ca zmCrN~V3R9x=XvCCEF=afC7?$+h#}===uR0Ns1~q)9S#8JR@*N^3%%)w>&m-ZZ+Eap zl)zmN$LkcL?nr+fPVhQA!5gl-p8T%MZt!|mB4QHIDtCGPJn8IT4|gU5|4#Gu^>^s} z?J@ESj86E6C7#ak(-PxK9oK6t3KXq zbY%WK*Vog9z*s)~hV|aC-Sy|=HFIqElxUG$C=;kwFExSZL2ufM*~zFaSWRYjazc*s z6_xH$gRe99 z4oI)TF+9LcDZ0ZN3d>0P1Q~Nqq{q$>d4(>{te}GEBufSIC0@d~w8&ZVHR>`ODAu=Q z{ry&cSDkRR@_P!2^*u=$21%;m=vw~-w6nCT)>#!Lnl0PWQ*KE4qM|)3o1FM!jF%2PUWdIo1+2}rZ_ZGRA5TxI_$okj(z9a#|Qy%tb`Fc zQCHyh+ym(m96J>kYGJO^-c;@VPbq|mifqh^Plplgqtn42r!LM-b>*Y?;iXgZdkrry zH2CX=qGw4JPKN&o&+&0#TtGUwJrW)ckW}7GlI#HIZGDB>QB28B8)uv@(za5T&er$O zX9{&8>+ppcw1ivJzMJUdDgU21Dp&#Fx2mM+q515UARG#aVq4j%qfAKovFq7v506FC z%`3Q?j&*$U2aiL_ATyHzKC?(xXipLOY+Ui~kv00||C3SWpV2tUnEO1Y{Bzun40bac z>~IQ@EE>NeX$oFWocl#!lQereU$1l!^U&5Yrq! z&u^W;DHk$lN6F=cthOKDU2x7zg-taMo01HWikxamb8@PZ%+tvQ(&7WW@J&l?0&n+R z5_zQtnD0xpQWt?}I9qf~I!8g+gbYlUl|i{q*&2(yp9-#1xCrhyjuYC<+?_z5Bpf$z z1%HiUBD0U2HfhqxNH7~m9U2E41_cmV3Fd1}TSc1YVJz8J_e;%5EhtIi$=;QICud+o5>SAc2?qX42^Vo-1_OqJG4X}GhEgfuPGcs4q32#7S0)b z9MizTZX>FPd1@b3LI;OYdbCXMi8(Z)3fqpF`j}uL8sgIjiiS2amAsKS7#rNIX>6#p zH}2f{h;IQhI0L#VX;x5?&t1@BTS<}<)juJMIqZ$(A2RmV<}LQ_nkSU+7Jw>40?gzr z?Zopc&)N+GeApy?WRQ(rTzdgBnf?}{C`lhU#U>(**uFas!|_m(T4^o!Mgcy{@e+~u zhV9^9t3i=TD%RgUWVWYUFac@{B>N?*`oqEZzZQ-b@?94C!wP(o&wek72YP%0lZWqVvwkgh&+*tLAB4-8FQFRKJ<1js#=WF*s5C4LVgz$7xA^dWG%$%?}1Cdo&^ zp$%uXzE6Y}5!ktUnx@}sHs~|7QG4inUhH}vzNf->lfX-!cYminP&M)|sVCZCd6vM- z1l}R=Z33GFC_t4T5coX;GXfOa$e$1(;~AM63ymC(# zsF>Rcd-$S+HlIIPQlA9T`o&ACB~l`4m1A3dZNyVsV53X)ESc2UxvW}VA+5$S3!QS5 Oj8(`eB-gNDP5%qvxE=8T literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/ga_retina_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/ga_retina_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e28db348cdf96aa258437314a67983f0b180b9d GIT binary patch literal 2784 zcmai0OOGVQ5w5IvRX?V87+@D8SP2W!vUkP;OGwBvtN_m*pbv}fm0Gfjn#!E+>ZwOh zW;HuAuJ)x@; zvVO9zJs$W^(9|4+Tbvcvf=wAXyRb`V>M)Bt+%4SFn|h@`_0jjZUj$`14GkX@Q5jF; zvN!FaZ}G6`mxJkmSt48v4=o!Hyq1qd%Jt6QMB1#Rc*xUblh@UyO`1%5k3N6+*;knI%2A#wvq!9_})E_}wr?nERbwOF{f!dAJptrFXUdvkh*lzvr7#SW1 zV3V=02dYUkTr|mWQ>8=tB-!yyhh%EQd1|#3Qqm?mOgghA<3PJg6vuGM3Tj2Flu){t zS4lQct4b6~4lqFuiR{o+|Kw>ZX9~8Bl5Rqh{LlLN@xi<<#rvSu?0l702iZAK)qz^p z>YyySXhcRn?D&JS=2-e5HO@}RChB08CeoPE^!JzN@(%W+ja3L@1wNzq;??Ws-oLs? zMqxzHl_5U`uik~UL2Prz?dQxG(poqh3!Dpi$9ZQ}lgjbi{)_ zpv}ndG`pl{bm<(g@bEbW#olkoUu|C7>W}b!9qZ6JBQxjj%P}6`7`bP{s5kjLd>Z7{ zvPd%_$()yDOG#wxYF{-H0M)@V^)B|5jtx}QJOwx?FUq%Q={;H+xhF}t$p@jX zenpDY$(SaL=*acz+HvA-G}FW@9_nL9?$QL7YbTpm@^#wNkh>)Acfc&tb0O6mkf?ux zSZv5*yd$;;2)KA{^SZ2mt3R@Lp%r4Z)9PR2Xr_;T3`!ao_ijRo(s9!5d;?Bo76Yg6 z!JNH`(6RuO-8E|fm4$7r!Cca-#eYdXLc5YakYU zv`fu~2kj8v8KT{?{q_AbIwC!ks;N>K+y56#3p=4XX9RQdL4WO{O4ww-(-Evsa!O+T zwTHTa(z|v}-!pc%jTWUj$H)V4nArv!+axz=8y@Ls;j90&KGGq64DON7!V_nHsjs!`!s>4&9eZikh{qy2Q!W@qp=8 zF_U?c#gk%E0u8yy%H+|D7qi#EoFZ433@ZWmo-HO^@FzujGLv74gkReAg!k2#Pri7b z62q&Xy9U%Z0E9JU+=5zc=zM357`M6e)*reCcDV=GGjM}90XGcn^8j#@_YBtJ(c56? z^Bvyd-M4HQd}q1VFoYs~9XxyT``;atK<6LTB0j8TeE1?!g5SsF{S(X{rX(-@ob}tn zSABq?v`SW4D|-qVD>kw#`)`a)4F2}9Y+}X0(Vkk&g_Hp>?Z?JmaSXbvL^jf`NfDBi}+M2;1V7Q}IyB}&EdKdkS6dvr3(#UqTx_{}0Ij?yL@Yxj!6S`G)`hUFAQ=xeroY&7NvkifnOBoz7?` z5vM#>@MT6T$rGBr)LPo=d@Xs7{i(s9`^wr3Qlke&D!!U_!6*F<5X} z$39@^2V1w@JoY%Hpr9Wanj+eG?$-de*Ro<)Dsro8RL#og>>^kO@MXa5I~GRAjIJ5I zI~H$TkTIH+;(FsUCN+eLCu5{Lp&>r*B(&6lfoN8T1~f{p7eL8@t0O}N$v7US`Nb6;NiyFN{1VBE8W`K+sIiP9N${kF0Q@oEqAJ@+PQI3`vwla)egy6cB?klTGJlh zoyuEz%Rcz@%}KVjtNvOy6fRiu3bP42K*w;~TJCPBjT>u>HP&nP%H8Q+wM(2&y=t%i zKDJhDdJA^FRrRY|*QD)3x?4uN-RVB&$Oh1CZ1Xm(Z7=)fZBU!jgOv?!`^z3l0Q?So zU_4>0+pmJ@2UUNCV*7IM4;CcYr~3@g(cTZ=+48SH!sqTyKE>mWhMwWvEk6Q>yH&sT z0yyqN#!j`rv3~!Cwfj{c*`iYydr7tOs@OSvl3!AGmVFGD|9$`dz5E410N%@A(!|YS za8G+ODaJxOnJCa6OwNVwJi(QeRNcRnawbt_7CGvukolq%y1P&US$Le!CfsDDrWAD^ zYOblesb-@xPhQKP<3t@S3iWa!1UNU*qERMBe?sY5yX9BE@$YS5}fBu_-ox>=je6mGUz6s>gLsPE}f(sroJi7VjYml(3HR~ z!ImX^rP+xN&yoyIF_R7ysymcTo1%Bfz;p8J9S&#KW0S?MNVD$KQJ^z3`U=ij)VFz{(oI1dLK!nl09y|YVo2ma#jsZjMdpc-Q zvsxs%h+`ebF}lhHj2i1Gj$ba4Y(3MAV~&a*o0ih_UXpqkNrhZe-H>Apy1i~^Q;}s@ zY|KEzRjo0d8`X2_Bvc23Z@YHnM9s*Jx}Dpm0UY$@Z!n{M6Lg0~A@_g|cdyXH(V{So zaFXe83(LD^Q=X@1hbcBuhlbFhzWI<^l{)5wnlPKII&Ax6^RHzcYo>FS)Ng5Dit$91 s@{OcEMR#jd$hC!xq+12Ijj8rOt!?-#$`Xo_f)58qEP_+b<8;{n04XA#zW@LL literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/guided_anchor_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/guided_anchor_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20ec01357fb2365f9307532668e1ca286d97028a GIT binary patch literal 16696 zcmcJ0X^bS-eP2~|^?lB&*I6Trif^^Q!50-{UTSpNuxW&QpaHX{(`K7_J;qlgS ziI)c_h9_GmP2&Z_t+>@&hFkS=o2Pa%t$V!Vn+|eoZXLOGZ)GR5W47*ftyhg^<1Xf7 zG|i~s_OJH+{&;i=iSlTyhMhsXKk5g~VpM;6)ZG}X7rc?DI>A^))sAGegO2KXL9`Gj z`y;>adhPW=r{_o2UZ*`6cat)8B>j!fmX~CdTyGF`F08GMuSezK_8{oDJ6l_W8&ORX z-9g9q+oPc$mDb0qi&nQDxzMzt(sLft!)M2%t5N0Uj(^#68NcA(we&`hD zt{G?P@qdrkaWT2<_k8l{=Uzo*VXfcsWsdD@{a~ZLrN(R6_vy!uqy7a%Jfmg0M$2+d zY|?Q8{D+|GZ&km5Wi z$#h;GyW0bAwN!HOd6&0N-*@`Mt${c6MuF!#YaJiqc;r0y>?P;w!>dyF#lYG2J>Sv$ z;mCSBTgr3$UH0x^_clD`1RI@^v#v+x3_Dv+FqZ0ZNoTyi?t6icZ^xC@?yLiLgLMUJ~aYIo7@$Q$_1+5RYKKJHxF9=5+$ z+0J-7_y7%|^t-ln*;5!z+wb4>k`Za*nNCL+aad$Jg6W*wJvH9mO6Rk`w!EFGd^ZC+ zxl5|)l3(zX!;{CuH~w1)0wXX(W5?RGb_~-9Ght>C++@+1WP{A6?V6z-W|3pL8Q0#m zL!+Gya-kVoEa_UyD0|EZL3q1ayvw6bf#kP~Hrqwl0MQjLsXF>ma|oUXmD{B9W+p1n zEJu{z>bP!y)Qjd0O!hU@$zy*8ovzm`MOlA)%TwwIT8j#8eOlU)H5x@ZeeR+%38SrP zMpap^Dmk5)<|F$R@1;*iw(!qFdRJ9XV?N)8xVvEA03 zq5Uhyw?A`!V?6XOARP5>Yiz7`@bg<2G1ZJ8xV;}*{Q3)jar4&2W=4^O)V&N& zBWRlHLySL=1|LBzs*q*3uX+96M&Lh;9AB82Su<~**gxXgqFuJm4~wUxK7$0tY=qXP zxnt~@lMGT8o(xDbvtuF03{4Q7d8uj3*rFVkz0=)Ltf~iU%RnRZI%tUkAj-wGvTx{l z@bzmQ<$e@3eV!Wwqn>TXtBTV{Nkbs2WpM*BcD*HXRrixyeFYS~&vqi1;N0g^HcU_^Jh4^1H}N zKZ|k9|G$Xr5gT%<-7HURF9UIraf@!rE%&mJ*q?A8cfZSh0=YHMcI$4#&AW3bIlr4l zdcj>p{K#$w@g=;=xkn|wjF{xFH$)_@xW^!pvhH#B1Vxd1(mjRmynB!9;Je`7>z>AU z(Y?<-gYT02-R}MFhi{p!viqca)@>qIaUXC$f_GK-LH8WKYhK;G=w8@0-KX3~+(&Pj z&>b4cds_0IaX;!lCV6x2v+l>-kE4Zo5r7LIi|3-o3v$Ay0=V-_d1-?;|S=*6NFDK@y~IhVby0OAV&>v%ot#&Y5{T^JRHo&IQW z!+FF(J6>zV74My8T&9<>N2XzB#ddU*~V`>uAa8lWENWY1;g5BaWvN%*VDn!#Y8+U`wRjJ^C`?mJGj&rp$*w%+i4+}-(BXejg5Sx1&c{Om~UmN-y{=>si zUQc;Atk9@+m6>tGZTtg+c&`=?Xk?EkH1`6vQ3r=9)~c!c_ebewlg{?ZN=FOA=`PVC zTkj7*2kl@(IcL`g;|^8z*ETStjq$+cq1Oedi96dP=yF{u(k_@nkBoA*F_>s*(K$;4 z1Fb+KT<`bD{t~u|h`+yvHcJlH(YqU4Er-N${bXz&b4aKP^Nh9i{nqT zrC3+|L+XZ4qhDzHV8QB>j5eNCDDlZrpvGG_SU4`Dp21sPr1I);JPtNqdYG@f3&`h( zx(jtLs)PP#u?+j{6)1mgDt|`XhD8GF1on#2&fS5U*v^M`P}nSr9s(uzRv{?efztRK zo*h^?C{Ye7P#()WCcblVUNy|&O)bnLtcL~HxLi=54H{v7#~fK<0l9Nw5#fATLbwo? z5iW*kr+p->B3ue<2#@yI{<4HC*PlW!bI%!XJ~mlEYO+y)he0uSK;CFgm)h;OOHT~? ze$X8cx1RhFcEH|p#?5p4HLcXnS$%?MeBeKcNK!MV?Z3My)~68L^Fo{IDKv+1&3-l2 zNn|Rj)LI|EA2C2rQ(Ymd?h)b9TrAF}(pY^T>sX^L92liKH=?ZSjCx+H8kf{Fjxqyp z6wPauYF6GxNjWY5qS{`$7ulRyG#BSiMO#!)C6SgENli*Ivi_(tD@&sCj8xF3ZEY_j zMjxW@wR;K1=3~1=-J8wIp_Dgr6%91KQhFSIlpzjRpJ61>xBRGvpuNs*5dri|( zgYWkk-(+P`LAVY^7?mWZ%{HCOW{Z+5$X#W9>*2@R-iTU-3u{YkN&$)d=AyPxUq+LP zd#f%n_-72bZR*nuKEvSq8T@kwEe1cp;0*?Z0o0oe))|n#)CPk-f~Y*RF|nzJFKL@J zEmZXuB0f7dj@8WE%-;!E^hjGvGkN-b#mbolvxIONPfh*`X3Z*OX!+yM%2_v8-t}Ol z2Nv8@clbee~rz<4gqLdIu8Yn{T$x}r z92EiuQyLsJ%E!u(KKbcFHS?N6&Z9bcR2PUnm`+IuPsi;cJOWXXY|Zb&DvgTdQu1!! z;zhL>MuO%w>J7Z#L!&-rks)WCJYdnXL0=W~<~;`}Yj53dvY!0T;F0y@rfAy2I?~qy zkjzp9kJJqW@$N=h9+LxGOIYg9@Q55X05`td5W3tyevaWO;$i>(H?YL$n*g1SZ|-BZ zXS{7(e+=kzmO9y+4`5#i=H*s2oQH3uZKknj`8(KI>R_gE<<#rOj84Zr6`gKU=oypZ zJLdIgfKa2S=ZtsEHy?e&nv^DGys1EUE3KeUl$cL?MG1RCZ4tdppz-PcCRI=ha!|K6 zsfT4ZgLr1rn9NP)18~2fxLFDtVfDCZl3@+wTS)Wg()>E|7t=YFp%>QIE$DrV>lWS| z!I+k=uMvIUtjG$yh7~vxg3~hR^XPMO6k2GShI_~UytQs2e>tdz%i&RsEDH?~4xi5> zH_Sknv?nX!5>{%3EA@_f#d+Nb>YI&l$;|`fhsNj@fSlzovs@)9cjc#EH^L%P7zfr@ zAyo*A>!gul=V5H!3tr&(F;o<5Er4lU7<+WEIpSb*I$cmWAto`|rrLtG=Usi@`rK(GyD>P;ANFVh!yPNNtx3l}F5og_1+9NXy+X|xBOqzxKpY~saX zrw1SfZ!qNbaf}JE20*=SHJWi7F;lvJ+|e`DmcAefs|R(4+-ooK9izJyo7>x@g!>T#}6Dw=wI!4 z%aZzsX~ep+mb92UL^;NR^JoAc^b^rv27@Pi1VN)K2}?5Xgz+VmFl ziyky)`enT+GwhF|418~jaxJpEUVjjk0iU3v(2RNw`HJ=(6xP9Dd=0*jH4u@fqAdKn z-3>tWK<_WZ_pm)|9usz}mGG2W^q^(aqfB?b7Zs?mVWA2GC7!Y_Fq^st zc1!PcYi?!?z%XzCT66vu0OR(|a0s*Xhl8XgGJ{1UH^CA$aX2Rr-1p>a2wwVU|EQ&xV9x4!E%G1c7l)z$z z^!m_fuji7 zkki6q#lV6e;5|91aNeG{F;adLn_V-E?VpaxPSu-)%?94cAFKVmTFNEPfW1H+cq|b) z(SzQ*5tI^l$h)_b9MPnmcjfu)H2Kgl+QQu2i z#LkfTY!Y*vl^J@!65qzYx9K(}!{ZpJIg00I+DuyRI}k6zk@gFi*b$>;Q7T|Qj5C@D zn1>EM>j~K%0==M%+TK-$hCLM_Z7>4$FBpiy{T5?Cf*@KU)6%Pz2$xtRZXVI<_7_;J zJVS-F|6^KG|B?-TiNP*|A7h|-e>}&i5=S(rU)a}`2!txzkQCro4U+)f#FkdWjYA z{AM9EH;X$^YQf_RP=5;mJhPu?F9_GICybzk_9z($HdNqj&hKWyt&3q!6xK@TC$M7V zrh2{8BNsbY<8wtfX9t=oO)c#qn>pqOd1rjM&uUHwPVs8Ys&UNsHA8n zi$EF>#zq;Jdb@x7@4ocEfB!e01jnde+h_~)33Q>qb@%__qrS%CY0#`HpBbwR2*ly) zkf(;*&^B}4^(}1j6aotRz3uU~AK6#?-Zkij-nI7n_F#Y;Kd>AWZPO{n*DPt~@{7z( z53S}Tzrwt$2wKTKL%AyGM`bbQbwd3smUl27sg6R$Gnct9(FM2-nCGA~+H za*c<_xM1FV;Lzi$*EnGbF+WTdE5cam;r$;-;0OT?iI1kupAeu3-pex&+cYuBb3U1ak5s`kPiV#Q4#iN3xdV$|cF#Y&WRMgN#+QB#gEDn=J z{aaSI`y5PKP_D}sV1@pM0@%G(H>|zB~}tDEdr4=7ZAZ#lVAa40@}1lo&You zLC-^_}ZQM%(R1@Z~^|?#3*D!Xzfd;0#C^2{ziOolOI*@t2%O2o5UV_z86u00- zNbUjaxZaCM+z~T`@}q2>jqN+vQC;T8Rh+Tvf-{xh@ zKNDD=GlL9JPk1Z^pU;K`oYVr&Wo9=AWUD->;8Yn9z9ov>olJ~;0oz9ocmhl$J4L`s z5XunGe&}XLmYW0iCD;}aF(7nkg-F{1#Bu><(W3ZIze21(Tzn%tIWk$A9Gxr!eOmCp z87_}#J09KlM0dx=T<(1hO5C9KDX!S%`#RwS&VW*Lge+3^0&qv2Va zc|hMbUI*SYXA-Rdrp>)JtIEER0=ObE=h2zC!q0By>0O+tjRKQ<#w^U~eFhkV5$B9}|Rr+f^> z?W1`063YA(shp59{IJbKkNjZ>6Fk0^F7JpdE!uGUm!EEl2`- znLgrnbfg!wc{Ds+jn@@LN3S7!&{@L`TCK>}B#~aPQhy+mispy`Y1XQH@fA-sru*;l zrQ@dhEoRwMPoCzNJox~fxTYHNsy&U1VT!l&KB*K033-|JABqt}$X@+EgP&vYRRmFf zqvLmiKq(?08t!r#>oV{dWEo67j6CG=H4DvKH01UlSn4eXbg)FWTs!%Q7B}Bw_J3rM z@{OpNu9`&kN@?qe^N$+o`*>B=7nqIv9SLWIdc#C4ru1^X!%lvj!7nlR3Iifsvf4nW z$ZzA{Q@~%mM_OsaO0p=i1mQ}4rxEv|GZP(ay+>h~s$6T5l$}z4j4q=(c_;jIX@BKe za+ufTFxzrGAnp2&sYZ`+PpRsZVsLLenn{ju&FW9swRoXHj~Id8D#ST63KL!(B(xw_ zrwET$CC-)QIaCxwMEU+59xqakQ6U^Rm9>E10M6AQTIMtJv4xkswL~)8R?TW;YLM1C zpCP4mk|LmITMY{yXS6%b*Y7K6opz-5HnT_d#{@mM{+ve@ex&@_zH-G>iG9TJ#<+dUD z*?sxd+m__#_T|@ZXKveQC%-QrQt1wSH7-FzNI;1f5vP=bM+J9+;P-)SoTK);Q<%&H z63p?24S>Lq03kSe={w})>UXf%dpWuKU1S}`#k1hx2^T+qU@Hj@Xl{EbFE_;^+5F>3T7;Bzo%-_Zx?lig{AObl{O66p%BOnX+JWyy+MQHo6^Xw4%1 z{u6iQs|*x_S6FavN-D7yEb^(j@UB8>M*L7Y=j~+;(0zf8il?NEe86b2NKdunI4#mc z3>y(2^SwR4mSLXyWj4i$sK4YOi*THQi{o0D%)6P@a`Ia&Fx`y*&Lo#4DorQ#f5?s> zLZ(qVBR&x_;Dia1InG;jmUut~bE zW5#$&v&ob(o+V=hlPlsW!Hxa3z!LQp>~O~xclS@fVQXc6QUjaGK=aGfOb5ACB!%mY zOu;A!*j8ARixbvmOMULjGxSx^3IkA(!G#|(TR4p;#Muh!t9T1sZc-n?5QlM2WUo%2 z#!QVe%>p>u>me@pXm|jO5T*8{@g7$$p_5*=ppq|zRiJph7*IvNjpXCvma$>ju@K@5IR|yJVh84;{ORth>7p#$}>>kkHBz% zzY16-$NKjWV?`+V@Kv3}j>=oEDNVG(JC=GBwU_b8Jc*J{j!2m!VW~$nG`EY34KN94 zpyfA@;W>`y#7-77fr%9^g+~Ps1x%9qa;FFh@QfiY9x4y~_=iD(B0zNTH7554&686$ z5M^%mw=^*FHcJX@@=eA>o9!~@BLGk`UWZfvy0jk^w?_$Zvc@7jSnv;buH(+}c-Rc3*rT&&^k@hHQe=a$i^{Dc9Hq2}^FW)xM1 zgR6rC7s0QenbLeQWg21>sQ<~f%ko~!!#E+m`RW=6c%5bPF~lPXVU&yC{|#T2IFvX; z*jtfv#hpZ4$js?h6|c!e`0a5bc6B_~*F`c-#S^i;Gi2Djm1q>tf(a3BuyAwm449B5 zx2-JZJvBYtFFJ_lOxa@0>tQWj z#2EGk$o($n?hQUrSrfkzeI7Nw#dbtS{{mxQMxcMycjybLw5T3o(MK74lmXo)>Q5P* zVsM$kIR?D%B)_zqT59U&nfKca#MY`X_7@EPngN$yeT+f6CpbDyW>9dbk0ao$k<74oNq&O5%4=wHVOLw8 zIo75i(iSSklfDzi!=<hDX_$6+GMUtDZ{L!4;ppFXsbp{0T z4MpWJ%5DevF_v5{jB@(-<@z_gbzKhVfc|NS%yzoSQ*|XOJfTYvP&a&@8)E5lSHXG^ OD-6 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/retina_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/retina_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f8fa0bcfb5884abcce7994c71741222122077d3 GIT binary patch literal 3071 zcmai0OOG4J5$>KhhY#&q>$Mc;VIT=?1_;-XVM7K4DUR(}2;>bQCs=@hF&IwIlEWR& zaJz@nt~|Vi3s@Hg&MBAtfdGQfIp!GTC-g1INq<2nf7Qd)auoq0(KR*QT~$3*Usd%7 z-EMg2`6T{+o3a0~jh_qtHyA1h5lrx$4Y_$*Q+sHGvvOzV4qeWKEu7q&`9mMPE4)0I zwT3Oj`*}EP58JcOuw&kXygTa+dz_`M>6I^-Xo>KQi7@q7_KFX;1pkWl+s|N;?f1EE z7p0uVd6X4d^^9W@bKxgNGAZTVG#1aSX3D+$`6r)!)wlH3Q5Gw+J9?N^lV~o>kq$pC ziw9qpVv(m1-ySWpTtq63Wip8-(2|g~^Pjh7)JI@a^Tk6;biQSWTsXo#YvId1;h(Xg zBLdOF(-om;7F5Aww&I^*toSml!`j(l%XZbO!kVqDX}jjNb!Z8@ zZfCr1{muFdhx-{0=j?NlnR(on*s=e7vq*as!Gb){PsIApy=2hd-U#wj|Wi`^Js7@ zFL5e$8_cZjTEHDLG!o~Bf<;HA`61A}fM6agmBMOPM9CyB3VgoYfq>j4 zLdN8qMBXAoQPJMf!&r`$?yk?)e2Ap5Ar7sm(HTVulMzQIGx-ycD;SE@ve4rfe}`Y= z0T213tFLh33v>`<380^1sHeb8#im>Ud`~z56mRy5KK}RX zcNO3}^;RV16!%VkU$yWa5_rG9wd6JO>d2A;bL|4qpLUiOs59+Wy_&cVRTP2Lm|nry z68;l^1u!4qKVZUWHddZ)SaD(HU$32{(>$-sjea_QY*z%{OkCFj?m=vZ=HH0!ncbejn$ET@Ozr>9CEePX1 zq)2Fmyu*9e7T-Yv^*}e@b}_$e=C=&pwH)q1%EEB?qqjEGi&|LJ*tnEpR9H=l{Ta;3 zuMlda*@_`Ke!S$BRrAW8Ix91P4_or7kt_sYFDFYUbvF!_S|goSxF4Fr0F zu%FnRnfKPY)^&dvf3x%b7yo^9b_W2mRVDhC{1|7`&Irx5AIe|Ti;u#Mh6+z-Dl1S> zizJnOtjHTgs7A{H5ueD1M5v>#gQ59M2c!EM9Y-ttOPIhSq zk?1TPbY-k8t>ceSc8vU#LuP~>c8#lxk+F}Tp4_DwBjeUCtt{>1DT>JZuhe#iIFgl@ zCH{VZ(mHKyQt4|n`M4qYhnDH$l91Y@^n2j?Ueh)9XqF<>c>lujfyn~RPteMcIh1%h zpj0JpXy&NJ4b2>JO-`$%l&NlwM&$`^Q>l14SiQqiqC4S2h5C`FLs(Y70AWY~N(Bm5 z2ef(i)_eym=s(vt66CTTd=6EehEU1c-{0>CI*6hoo~2Qw!zgMH2)+|V$BQ^`R!rlQ zH%a&okqg={UN%u_cE~*UpHz`6`9sbPt znf9weyr|0N4%FOOjTS1R`U=7ZEx30zE-)v%_^5_6r7gJC?6^&(U(w}&`hLem0dcW= F{0EOR@1y_# literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/rpn_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/rpn_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74e5a9579971613b815033e5b6d1f121a1ca7314 GIT binary patch literal 3287 zcmZ`*OK;@H5oVK|A&2v#9=n#-n*;_n2_{ISKn@Tf2;yDXUi)AOh8-XP;eepoJ);@% zG26}2N(v{3i~^rF*uRk+@>_Cl9`en0r& z+mqy1p=JHsy7RO^KSfg&I&N{6SyA(4Gdr>k$C=qPH)?^lvv%%99wP}S^Yc#BF|?Zn zc{l2UZt-^3%llCuw8#BykPo9Fv&3)~eq!+s53Vd8i2ll1v1p64FRfAc8Y^4JBc{7W zDf1+Yr^U3wFyJDqlB2J`F3&M?id-WEE#^YX z4o2FKjpK0)zNB~e;6uj@mahfGc#qnRQZ$CRcDRXgzzL;Dr zlHxGE;E6g^^HLq=ITw{kt7%y@^kH5CQ+1da_~H{dpbq7{Fx0{PLX!1EG-cdkm*LvY zgB#mY+5vj#+eZ5kO&z1FtQq{B*=P;{%$-+mZN=`I?||PzYwse8Rv5+J-5Ks~_`boY zb$m^bN{UbT?9CI-r^V!&Hg(j|c2Vf|hh=eg#AP2W-5-mjx<$(;uTsn-s1|wLIIMJ- zW-2ZhcLY5n2z(78#bm@}j~2#3S$A8czt?O|mRty_w!u*WI*Yk%m-%d)UG8q6qVSCy z!FzC#=?1O?5S0ZmOeDUtSu;YQaC1E<{&jiv0nl(%#Ye8BO_F=m zy-VF=bl+vYvFW9lPEIPd4>?79XR!g^Tg&eXZ33ID-dO$%3}ClavlUyi3Mryq`>>?V zcMP9Gz%`Up+qE;c5%SLQZ)q2&#XL(=F=`pJHG8gIDYC^K`(1;+X6L%U-kDgXr4-GO zazQ+og1@~zgK;TwK6tDUB$@a=>|Ae902c$imk)0?=)UN2N@tNCAB|2T#hp4rcYB0u zMxJo1cI#H%=IoWX^eU(JxV>+oclIsrz9K(cl*htJ6_cex#nTCLn$Uh$;=CgGbjMI8 zr`b`@4Ay=|{msZsyOJWT$qg<|X_rsaYUDQv1r*8#8l{5`E?8i*xb95zNt}x+QQAqz z6ZvE4(``~EkVhmP^ae+YaQPE-KSWcz=qxW__MO)Uec8J~kW@TR2`_pcpb>^9w$Bih z6Km?7+n`~5$c9pKp6x0V6Wq;IzlX7-8-h4p7bvTH_205m$a)p6zg ziPesg?LpQ>T4CH-InaP~BL4_pn|Bb40Vr=}E&Ih2!wWcD*@o-mANLhH)az z*~66$Z^Pw)EWr33AZTzjK8LkG+pEJB%G!(F-&wF=r}nWs_ye;0uWfns9eN(!*Hb** zc-S(U`_;S9@Td-(U4X`2ga7UZ{qP>zVIAV^4VtoQN`_qD6dXRyB?YvkoPGV@H*em^ z_lW}WMp8O^s~bJmEtwP(p`A<=D8$oCp$E@!WF*zmkEJXns*#ZSq7r(zPy#7ul9y9% zj<+X;I!We2duuL;a$M!fx%O~@q1=U{ZzSBRoS#ZcHo8}#w2KO7H7S(tu184nD(dIB z1eoJ(ENNAxOi!SzEG~sCq5Z`|2$<-kNu@iBVm+3WvYREV6B+qt?NU{`oy_N=;L#Rw zl2lS&%ETGnFg#cpqgrYQYmOc?W$>p2(a(&Ld3KgRQv1++JLpGD%d@?2kv7Qn%6g#q6dj5tcRi= zLUIrNL&*0)hiuOd!5@IS+`q|&|F`Cdp$a@71$3n=l3c{G4&oShuLWR=ag~T)ERw95 zF;_16J=iCIK;4h1qf{&-bh^L3>!l*g;`r9xoAhlmHx(_qvnc9(r|0;d-}3u|9aB6; z+mbS~_D$#<6osUO9qrr<55|ka+!2#Z2e-6*&-|3<>DfVwpXjoo19PQ3pn}{)Zl__C zbJav#_s-Q)(j7zlxKmA4B`+k^3q5RxLay;JCf@pWTaUE=>jtsk&`~y5F4~?R>tWN( I>kQcc09*=1ivR!s literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/ssd_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/anchor_heads/__pycache__/ssd_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13757bed5ba80c423ecd5082ba31d9776a68f704 GIT binary patch literal 5825 zcma)A&5s;M74Prq>G}TH-Pv_wZ=xt-AgmKd1O%2hQA}bXV+o8DG;-4Fo$8sL+3ud% z>KecVPnpZ0oXaBQ9@1GYH@D?is^{~-vNZt;bVXN0t6~8rZzpbz$ zbM7n5@oNixq4qYIdRyt1&Y+2MtE-CAiFbG8b9WStlCCalZ$$mW34hyRAul5QL6hrCRFu?PoaQpUksq#x$#@TnE!kGf?&(H#d zWH!m_M0Sb1PK1Gc^6OvG^YQ*CfBg$R|K8`asZMox<&!g??)QtiP|F(huH$W-d>6W71hhckh04q=C53ll z9k0!sX&pM&V2vj9Do{_Ud1EO}Yg@{C%}45S^Bq`^6_2!!RQ~tWWDZtdlBHi_v?g+RP1*VpCIL(J63`(~OnWodV9B-)j1W#=_H zH^WL3TFH4${;!n`73^LsH9*@Z|B&7SACTVir+F)_r>$*e-bOurH_{Deb`;jSpgiUV z+I*9>v7;MlgIZ;MXh(+He2cY*>b!IEd}>i0_HGk<*I{Pb#2C$?_Elxx`A9qd8@y>V zZKqqP+ekOr<|A{yooq>cI;0OgV<+9gZkX_w9h6~{q%+=5x5qo!xz6~4lw6}(L7&C=ihEJgOOoy~1xJ1BR-b^19zrJ^Z%Aq3yjaETH0O68}!&0D0xJo0ktKp??K58s3|E z(dGU<=Eaxe=_I}!hRjd=KAJ}ve>t2mKZq~qhd@{(1>u4lVygl%IA;!X9bkbY9z`a?$7&Y4vE?zBeGsD|9F202|cgp%%}Tr(UdXN z^4-uIBXa2dgm<44W;~nvoYT<4Uamkmqi8xyGTlniEOh&U7sq}qN~6f_A9@iqk43#7 z#O?&`RuYqY(a`6FrNTlK@*^g+C=%A2lj!ahCY*0i*evjG!Yrc5{h`~3JQhV7C}Z_e zl%UI0n1f&fHLXz;`#ceid|QUg&=-qpE{YRAV$jWqk9oogzlA}D7bYTP|4@`i5%W*n z*blJXqI}@RzMFN8g>w_G@jX9^F;zJWqHgGW5muU);)KPbHbb9yGz=#r23ufm(s)4I zEF-8W52mlY=K7I$5csTn0lN6XfWHJ?_?L<75xEM|Yt#I+>8|uuP*`jg$20%sZ&LH? zAVOL{*Oq^U#IKQ99f@Lyy^!s}dM7z+#EC!6>Sz+!mg+KA3XRn$8D-Me$>K3n@x;bmp?;ot4(b=_i6pf z!+8#D=qQq@+>s^lD|rXaHOm88~6w{ z%uGt~CGASyJ4)R5{n79+i9Lv8(ptfy%j#)+xxDp8eHxG+I@E+aa)~#=AiQgM^^>m? zqGGx9YBIFHct@Wbb8~K`STur$3`PjVI!flUbi4yd*HV3;5&qul8bUuEO&_}@e+83t z3)xc4f$t?)Go6&UKjF0ggM-P5JLNu(ltIS}hNhZ@Hp~th3Q;OWHWOoK!8ECR@JKJQ5+;Vq;E6dFlR^S~_fT+mt(s=>B)FQ*x zI4|XnEgBSMl1TzUjlFP6r$z4%hP^UfPzZ8eF9?K9`T>YyVUxDR^=j0UE!Vc<_-q4-4*MKb_HO+b(%A&k7V z44^7&8X$^(21=)`rO&R7ajQ>ppPn7O{E~sI1f0;u*|m|JD-YE%vZ5m` zej(95P*Kv6XqgKQZdaTWXun4)c zdTKG}Q4uTdAZ;zDophU(9uYcioKGw1j{mu|GF1uhf$Qo|wC!bDsmt{1*4ck>f{T{B=+ z#DF}jHZx!$SA3kVW=NtVP4#&=OOUSu%?~}4E~DMjJZg*x=+J2#h2GGQB|>Djk{QA- z+Wr=AW+wIN%F-c{$|43oMOl*>D^;zhs`?xT$n@3ths6IBWToN(w)AHtT?JUVd%uS$ zS4(a`r%}-9l$!n^<9|r}kBAViK1n_xnIy7y#dRw(8|L3Yy{Nu{B$`uh@J-HmHD}PvYXrl_Z-ZaB&n4kRNrctLR>uMmoT}qbW#~~ob zDa87O1a|P$MfwRGLHf)MGB&v~?iXnO09^bYh+-oH1@M=1h|gf&SSwMkYG`%jxzviX z)3(f84FriSSFPh)N03PY_D?igZJaywG=D;ka&=)q*d(?(xhEvKo02;4Y0YBtkyR ze*+>a`L&_%!}|HvfsP@Llg TZ5TO>gUv5%ZK7(~sH*=1;ZM~( literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/anchor_heads/anchor_head.py b/CDARTS_detection/mmdet/models/anchor_heads/anchor_head.py new file mode 100644 index 0000000..2b8b144 --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/anchor_head.py @@ -0,0 +1,270 @@ +from __future__ import division + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, + multi_apply, multiclass_nms, force_fp32) +from ..builder import build_loss +from ..registry import HEADS + + +@HEADS.register_module +class AnchorHead(nn.Module): + """Anchor-based head (RPN, RetinaNet, SSD, etc.). + + Args: + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of channels of the feature map. + anchor_scales (Iterable): Anchor scales. + anchor_ratios (Iterable): Anchor aspect ratios. + anchor_strides (Iterable): Anchor strides. + anchor_base_sizes (Iterable): Anchor base sizes. + target_means (Iterable): Mean values of regression targets. + target_stds (Iterable): Std values of regression targets. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of localization loss. + """ # noqa: W605 + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + anchor_scales=[8, 16, 32], + anchor_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + anchor_base_sizes=None, + target_means=(.0, .0, .0, .0), + target_stds=(1.0, 1.0, 1.0, 1.0), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)): + super(AnchorHead, self).__init__() + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.anchor_scales = anchor_scales + self.anchor_ratios = anchor_ratios + self.anchor_strides = anchor_strides + self.anchor_base_sizes = list( + anchor_strides) if anchor_base_sizes is None else anchor_base_sizes + self.target_means = target_means + self.target_stds = target_stds + + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC'] + if self.use_sigmoid_cls: + self.cls_out_channels = num_classes - 1 + else: + self.cls_out_channels = num_classes + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.fp16_enabled = False + + self.anchor_generators = [] + for anchor_base in self.anchor_base_sizes: + self.anchor_generators.append( + AnchorGenerator(anchor_base, anchor_scales, anchor_ratios)) + + self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales) + self._init_layers() + + def _init_layers(self): + self.conv_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, 1) + self.conv_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) + + def init_weights(self): + normal_init(self.conv_cls, std=0.01) + normal_init(self.conv_reg, std=0.01) + + def forward_single(self, x): + cls_score = self.conv_cls(x) + bbox_pred = self.conv_reg(x) + return cls_score, bbox_pred + + def forward(self, feats): + return multi_apply(self.forward_single, feats) + + def get_anchors(self, featmap_sizes, img_metas): + """Get anchors according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + + Returns: + tuple: anchors of each image, valid flags of each image + """ + num_imgs = len(img_metas) + num_levels = len(featmap_sizes) + + # since feature map sizes of all images are the same, we only compute + # anchors for one time + multi_level_anchors = [] + for i in range(num_levels): + anchors = self.anchor_generators[i].grid_anchors( + featmap_sizes[i], self.anchor_strides[i]) + multi_level_anchors.append(anchors) + anchor_list = [multi_level_anchors for _ in range(num_imgs)] + + # for each image, we compute valid flags of multi level anchors + valid_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = [] + for i in range(num_levels): + anchor_stride = self.anchor_strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w, _ = img_meta['pad_shape'] + valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) + valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) + flags = self.anchor_generators[i].valid_flags( + (feat_h, feat_w), (valid_feat_h, valid_feat_w)) + multi_level_flags.append(flags) + valid_flag_list.append(multi_level_flags) + + return anchor_list, valid_flag_list + + def loss_single(self, cls_score, bbox_pred, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples, cfg): + # classification loss + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = cls_score.permute(0, 2, 3, + 1).reshape(-1, self.cls_out_channels) + loss_cls = self.loss_cls( + cls_score, labels, label_weights, avg_factor=num_total_samples) + # regression loss + bbox_targets = bbox_targets.reshape(-1, 4) + bbox_weights = bbox_weights.reshape(-1, 4) + bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + loss_bbox = self.loss_bbox( + bbox_pred, + bbox_targets, + bbox_weights, + avg_factor=num_total_samples) + return loss_cls, loss_bbox + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + cfg, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == len(self.anchor_generators) + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas) + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = anchor_target( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + self.target_means, + self.target_stds, + cfg, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + sampling=self.sampling) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos + num_total_neg if self.sampling else num_total_pos) + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples, + cfg=cfg) + return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds')) + def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + + mlvl_anchors = [ + self.anchor_generators[i].grid_anchors(cls_scores[i].size()[-2:], + self.anchor_strides[i]) + for i in range(num_levels) + ] + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list, + mlvl_anchors, img_shape, + scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) + mlvl_bboxes = [] + mlvl_scores = [] + for cls_score, bbox_pred, anchors in zip(cls_scores, bbox_preds, + mlvl_anchors): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + max_scores, _ = scores[:, 1:].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + bboxes = delta2bbox(anchors, bbox_pred, self.target_means, + self.target_stds, img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + if self.use_sigmoid_cls: + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) + det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, + cfg.score_thr, cfg.nms, + cfg.max_per_img) + return det_bboxes, det_labels diff --git a/CDARTS_detection/mmdet/models/anchor_heads/fcos_head.py b/CDARTS_detection/mmdet/models/anchor_heads/fcos_head.py new file mode 100644 index 0000000..957906d --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/fcos_head.py @@ -0,0 +1,389 @@ +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32 +from ..builder import build_loss +from ..registry import HEADS +from ..utils import bias_init_with_prob, Scale, ConvModule + +INF = 1e8 + + +@HEADS.register_module +class FCOSHead(nn.Module): + + def __init__(self, + num_classes, + in_channels, + feat_channels=256, + stacked_convs=4, + strides=(4, 8, 16, 32, 64), + regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), + (512, INF)), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='IoULoss', loss_weight=1.0), + loss_centerness=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0), + conv_cfg=None, + norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)): + super(FCOSHead, self).__init__() + + self.num_classes = num_classes + self.cls_out_channels = num_classes - 1 + self.in_channels = in_channels + self.feat_channels = feat_channels + self.stacked_convs = stacked_convs + self.strides = strides + self.regress_ranges = regress_ranges + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + self.loss_centerness = build_loss(loss_centerness) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + + self._init_layers() + + def _init_layers(self): + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + self.reg_convs.append( + ConvModule( + chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=self.norm_cfg is None)) + self.fcos_cls = nn.Conv2d( + self.feat_channels, self.cls_out_channels, 3, padding=1) + self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) + self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) + + self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) + + def init_weights(self): + for m in self.cls_convs: + normal_init(m.conv, std=0.01) + for m in self.reg_convs: + normal_init(m.conv, std=0.01) + bias_cls = bias_init_with_prob(0.01) + normal_init(self.fcos_cls, std=0.01, bias=bias_cls) + normal_init(self.fcos_reg, std=0.01) + normal_init(self.fcos_centerness, std=0.01) + + def forward(self, feats): + return multi_apply(self.forward_single, feats, self.scales) + + def forward_single(self, x, scale): + cls_feat = x + reg_feat = x + + for cls_layer in self.cls_convs: + cls_feat = cls_layer(cls_feat) + cls_score = self.fcos_cls(cls_feat) + centerness = self.fcos_centerness(cls_feat) + + for reg_layer in self.reg_convs: + reg_feat = reg_layer(reg_feat) + # scale the bbox_pred of different level + # float to avoid overflow when enabling FP16 + bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp() + return cls_score, bbox_pred, centerness + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) + def loss(self, + cls_scores, + bbox_preds, + centernesses, + gt_bboxes, + gt_labels, + img_metas, + cfg, + gt_bboxes_ignore=None): + assert len(cls_scores) == len(bbox_preds) == len(centernesses) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes, + gt_labels) + + num_imgs = cls_scores[0].size(0) + # flatten cls_scores, bbox_preds and centerness + flatten_cls_scores = [ + cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) + for cls_score in cls_scores + ] + flatten_bbox_preds = [ + bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) + for bbox_pred in bbox_preds + ] + flatten_centerness = [ + centerness.permute(0, 2, 3, 1).reshape(-1) + for centerness in centernesses + ] + flatten_cls_scores = torch.cat(flatten_cls_scores) + flatten_bbox_preds = torch.cat(flatten_bbox_preds) + flatten_centerness = torch.cat(flatten_centerness) + flatten_labels = torch.cat(labels) + flatten_bbox_targets = torch.cat(bbox_targets) + # repeat points to align with bbox_preds + flatten_points = torch.cat( + [points.repeat(num_imgs, 1) for points in all_level_points]) + + pos_inds = flatten_labels.nonzero().reshape(-1) + num_pos = len(pos_inds) + loss_cls = self.loss_cls( + flatten_cls_scores, flatten_labels, + avg_factor=num_pos + num_imgs) # avoid num_pos is 0 + + pos_bbox_preds = flatten_bbox_preds[pos_inds] + pos_bbox_targets = flatten_bbox_targets[pos_inds] + pos_centerness = flatten_centerness[pos_inds] + pos_centerness_targets = self.centerness_target(pos_bbox_targets) + + if num_pos > 0: + pos_points = flatten_points[pos_inds] + pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) + pos_decoded_target_preds = distance2bbox(pos_points, + pos_bbox_targets) + # centerness weighted iou loss + loss_bbox = self.loss_bbox( + pos_decoded_bbox_preds, + pos_decoded_target_preds, + weight=pos_centerness_targets, + avg_factor=pos_centerness_targets.sum()) + loss_centerness = self.loss_centerness(pos_centerness, + pos_centerness_targets) + else: + loss_bbox = pos_bbox_preds.sum() + loss_centerness = pos_centerness.sum() + + return dict( + loss_cls=loss_cls, + loss_bbox=loss_bbox, + loss_centerness=loss_centerness) + + @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) + def get_bboxes(self, + cls_scores, + bbox_preds, + centernesses, + img_metas, + cfg, + rescale=None): + assert len(cls_scores) == len(bbox_preds) + num_levels = len(cls_scores) + + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, + bbox_preds[0].device) + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + centerness_pred_list = [ + centernesses[i][img_id].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list, + centerness_pred_list, + mlvl_points, img_shape, + scale_factor, cfg, rescale) + result_list.append(det_bboxes) + return result_list + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + centernesses, + mlvl_points, + img_shape, + scale_factor, + cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) + mlvl_bboxes = [] + mlvl_scores = [] + mlvl_centerness = [] + for cls_score, bbox_pred, centerness, points in zip( + cls_scores, bbox_preds, centernesses, mlvl_points): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + scores = cls_score.permute(1, 2, 0).reshape( + -1, self.cls_out_channels).sigmoid() + centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid() + + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + max_scores, _ = (scores * centerness[:, None]).max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + points = points[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + centerness = centerness[topk_inds] + bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_centerness.append(centerness) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) + mlvl_centerness = torch.cat(mlvl_centerness) + det_bboxes, det_labels = multiclass_nms( + mlvl_bboxes, + mlvl_scores, + cfg.score_thr, + cfg.nms, + cfg.max_per_img, + score_factors=mlvl_centerness) + return det_bboxes, det_labels + + def get_points(self, featmap_sizes, dtype, device): + """Get points according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + dtype (torch.dtype): Type of points. + device (torch.device): Device of points. + + Returns: + tuple: points of each image. + """ + mlvl_points = [] + for i in range(len(featmap_sizes)): + mlvl_points.append( + self.get_points_single(featmap_sizes[i], self.strides[i], + dtype, device)) + return mlvl_points + + def get_points_single(self, featmap_size, stride, dtype, device): + h, w = featmap_size + x_range = torch.arange( + 0, w * stride, stride, dtype=dtype, device=device) + y_range = torch.arange( + 0, h * stride, stride, dtype=dtype, device=device) + y, x = torch.meshgrid(y_range, x_range) + points = torch.stack( + (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 + return points + + def fcos_target(self, points, gt_bboxes_list, gt_labels_list): + assert len(points) == len(self.regress_ranges) + num_levels = len(points) + # expand regress ranges to align with points + expanded_regress_ranges = [ + points[i].new_tensor(self.regress_ranges[i])[None].expand_as( + points[i]) for i in range(num_levels) + ] + # concat all levels points and regress ranges + concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) + concat_points = torch.cat(points, dim=0) + # get labels and bbox_targets of each image + labels_list, bbox_targets_list = multi_apply( + self.fcos_target_single, + gt_bboxes_list, + gt_labels_list, + points=concat_points, + regress_ranges=concat_regress_ranges) + + # split to per img, per level + num_points = [center.size(0) for center in points] + labels_list = [labels.split(num_points, 0) for labels in labels_list] + bbox_targets_list = [ + bbox_targets.split(num_points, 0) + for bbox_targets in bbox_targets_list + ] + + # concat per level image + concat_lvl_labels = [] + concat_lvl_bbox_targets = [] + for i in range(num_levels): + concat_lvl_labels.append( + torch.cat([labels[i] for labels in labels_list])) + concat_lvl_bbox_targets.append( + torch.cat( + [bbox_targets[i] for bbox_targets in bbox_targets_list])) + return concat_lvl_labels, concat_lvl_bbox_targets + + def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges): + num_points = points.size(0) + num_gts = gt_labels.size(0) + + areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * ( + gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1) + # TODO: figure out why these two are different + # areas = areas[None].expand(num_points, num_gts) + areas = areas[None].repeat(num_points, 1) + regress_ranges = regress_ranges[:, None, :].expand( + num_points, num_gts, 2) + gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) + xs, ys = points[:, 0], points[:, 1] + xs = xs[:, None].expand(num_points, num_gts) + ys = ys[:, None].expand(num_points, num_gts) + + left = xs - gt_bboxes[..., 0] + right = gt_bboxes[..., 2] - xs + top = ys - gt_bboxes[..., 1] + bottom = gt_bboxes[..., 3] - ys + bbox_targets = torch.stack((left, top, right, bottom), -1) + + # condition1: inside a gt bbox + inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 + + # condition2: limit the regression range for each location + max_regress_distance = bbox_targets.max(-1)[0] + inside_regress_range = ( + max_regress_distance >= regress_ranges[..., 0]) & ( + max_regress_distance <= regress_ranges[..., 1]) + + # if there are still more than one objects for a location, + # we choose the one with minimal area + areas[inside_gt_bbox_mask == 0] = INF + areas[inside_regress_range == 0] = INF + min_area, min_area_inds = areas.min(dim=1) + + labels = gt_labels[min_area_inds] + labels[min_area == INF] = 0 + bbox_targets = bbox_targets[range(num_points), min_area_inds] + + return labels, bbox_targets + + def centerness_target(self, pos_bbox_targets): + # only calculate pos centerness targets, otherwise there may be nan + left_right = pos_bbox_targets[:, [0, 2]] + top_bottom = pos_bbox_targets[:, [1, 3]] + centerness_targets = ( + left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( + top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) + return torch.sqrt(centerness_targets) diff --git a/CDARTS_detection/mmdet/models/anchor_heads/ga_retina_head.py b/CDARTS_detection/mmdet/models/anchor_heads/ga_retina_head.py new file mode 100644 index 0000000..c39ab8d --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/ga_retina_head.py @@ -0,0 +1,107 @@ +import torch.nn as nn +from mmcv.cnn import normal_init + +from .guided_anchor_head import GuidedAnchorHead, FeatureAdaption +from ..registry import HEADS +from ..utils import bias_init_with_prob, ConvModule +from mmdet.ops import MaskedConv2d + + +@HEADS.register_module +class GARetinaHead(GuidedAnchorHead): + """Guided-Anchor-based RetinaNet head.""" + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + conv_cfg=None, + norm_cfg=None, + **kwargs): + self.stacked_convs = stacked_convs + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + super(GARetinaHead, self).__init__(num_classes, in_channels, **kwargs) + + def _init_layers(self): + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule(chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule(chn, + self.feat_channels, + 3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + + self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) + self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, + 1) + self.feature_adaption_cls = FeatureAdaption( + self.feat_channels, + self.feat_channels, + kernel_size=3, + deformable_groups=self.deformable_groups) + self.feature_adaption_reg = FeatureAdaption( + self.feat_channels, + self.feat_channels, + kernel_size=3, + deformable_groups=self.deformable_groups) + self.retina_cls = MaskedConv2d(self.feat_channels, + self.num_anchors * + self.cls_out_channels, + 3, + padding=1) + self.retina_reg = MaskedConv2d(self.feat_channels, + self.num_anchors * 4, + 3, + padding=1) + + def init_weights(self): + for m in self.cls_convs: + normal_init(m.conv, std=0.01) + for m in self.reg_convs: + normal_init(m.conv, std=0.01) + + self.feature_adaption_cls.init_weights() + self.feature_adaption_reg.init_weights() + + bias_cls = bias_init_with_prob(0.01) + normal_init(self.conv_loc, std=0.01, bias=bias_cls) + normal_init(self.conv_shape, std=0.01) + normal_init(self.retina_cls, std=0.01, bias=bias_cls) + normal_init(self.retina_reg, std=0.01) + + def forward_single(self, x): + cls_feat = x + reg_feat = x + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + + loc_pred = self.conv_loc(cls_feat) + shape_pred = self.conv_shape(reg_feat) + + cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) + reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) + + if not self.training: + mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr + else: + mask = None + cls_score = self.retina_cls(cls_feat, mask) + bbox_pred = self.retina_reg(reg_feat, mask) + return cls_score, bbox_pred, shape_pred, loc_pred diff --git a/CDARTS_detection/mmdet/models/anchor_heads/ga_rpn_head.py b/CDARTS_detection/mmdet/models/anchor_heads/ga_rpn_head.py new file mode 100644 index 0000000..b7788b6 --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/ga_rpn_head.py @@ -0,0 +1,127 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import normal_init + +from mmdet.core import delta2bbox +from mmdet.ops import nms +from .guided_anchor_head import GuidedAnchorHead +from ..registry import HEADS + + +@HEADS.register_module +class GARPNHead(GuidedAnchorHead): + """Guided-Anchor-based RPN head.""" + + def __init__(self, in_channels, **kwargs): + super(GARPNHead, self).__init__(2, in_channels, **kwargs) + + def _init_layers(self): + self.rpn_conv = nn.Conv2d(self.in_channels, + self.feat_channels, + 3, + padding=1) + super(GARPNHead, self)._init_layers() + + def init_weights(self): + normal_init(self.rpn_conv, std=0.01) + super(GARPNHead, self).init_weights() + + def forward_single(self, x): + x = self.rpn_conv(x) + x = F.relu(x, inplace=True) + (cls_score, bbox_pred, shape_pred, + loc_pred) = super(GARPNHead, self).forward_single(x) + return cls_score, bbox_pred, shape_pred, loc_pred + + def loss(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + img_metas, + cfg, + gt_bboxes_ignore=None): + losses = super(GARPNHead, self).loss(cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + None, + img_metas, + cfg, + gt_bboxes_ignore=gt_bboxes_ignore) + return dict(loss_rpn_cls=losses['loss_cls'], + loss_rpn_bbox=losses['loss_bbox'], + loss_anchor_shape=losses['loss_shape'], + loss_anchor_loc=losses['loss_loc']) + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + mlvl_masks, + img_shape, + scale_factor, + cfg, + rescale=False): + mlvl_proposals = [] + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + anchors = mlvl_anchors[idx] + mask = mlvl_masks[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + # if no location is kept, end. + if mask.sum() == 0: + continue + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + scores = rpn_cls_score.softmax(dim=1)[:, 1] + # filter scores, bbox_pred w.r.t. mask. + # anchors are filtered in get_anchors() beforehand. + scores = scores[mask] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, + 4)[mask, :] + if scores.dim() == 0: + rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) + anchors = anchors.unsqueeze(0) + scores = scores.unsqueeze(0) + # filter anchors, bbox_pred, scores w.r.t. scores + if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: + _, topk_inds = scores.topk(cfg.nms_pre) + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + scores = scores[topk_inds] + # get proposals w.r.t. anchors and rpn_bbox_pred + proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means, + self.target_stds, img_shape) + # filter out too small bboxes + if cfg.min_bbox_size > 0: + w = proposals[:, 2] - proposals[:, 0] + 1 + h = proposals[:, 3] - proposals[:, 1] + 1 + valid_inds = torch.nonzero((w >= cfg.min_bbox_size) & + (h >= cfg.min_bbox_size)).squeeze() + proposals = proposals[valid_inds, :] + scores = scores[valid_inds] + proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1) + # NMS in current level + proposals, _ = nms(proposals, cfg.nms_thr) + proposals = proposals[:cfg.nms_post, :] + mlvl_proposals.append(proposals) + proposals = torch.cat(mlvl_proposals, 0) + if cfg.nms_across_levels: + # NMS across multi levels + proposals, _ = nms(proposals, cfg.nms_thr) + proposals = proposals[:cfg.max_num, :] + else: + scores = proposals[:, 4] + num = min(cfg.max_num, proposals.shape[0]) + _, topk_inds = scores.topk(num) + proposals = proposals[topk_inds, :] + return proposals diff --git a/CDARTS_detection/mmdet/models/anchor_heads/guided_anchor_head.py b/CDARTS_detection/mmdet/models/anchor_heads/guided_anchor_head.py new file mode 100644 index 0000000..c3cc705 --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/guided_anchor_head.py @@ -0,0 +1,609 @@ +from __future__ import division + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import normal_init + +from mmdet.core import (AnchorGenerator, anchor_target, anchor_inside_flags, + ga_loc_target, ga_shape_target, delta2bbox, + multi_apply, multiclass_nms, force_fp32) +from mmdet.ops import DeformConv, MaskedConv2d +from ..builder import build_loss +from .anchor_head import AnchorHead +from ..registry import HEADS +from ..utils import bias_init_with_prob + + +class FeatureAdaption(nn.Module): + """Feature Adaption Module. + + Feature Adaption Module is implemented based on DCN v1. + It uses anchor shape prediction rather than feature map to + predict offsets of deformable conv layer. + + Args: + in_channels (int): Number of channels in the input feature map. + out_channels (int): Number of channels in the output feature map. + kernel_size (int): Deformable conv kernel size. + deformable_groups (int): Deformable conv group size. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + deformable_groups=4): + super(FeatureAdaption, self).__init__() + offset_channels = kernel_size * kernel_size * 2 + self.conv_offset = nn.Conv2d( + 2, deformable_groups * offset_channels, 1, bias=False) + self.conv_adaption = DeformConv( + in_channels, + out_channels, + kernel_size=kernel_size, + padding=(kernel_size - 1) // 2, + deformable_groups=deformable_groups) + self.relu = nn.ReLU(inplace=True) + + def init_weights(self): + normal_init(self.conv_offset, std=0.1) + normal_init(self.conv_adaption, std=0.01) + + def forward(self, x, shape): + offset = self.conv_offset(shape.detach()) + x = self.relu(self.conv_adaption(x, offset)) + return x + + +@HEADS.register_module +class GuidedAnchorHead(AnchorHead): + """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). + + This GuidedAnchorHead will predict high-quality feature guided + anchors and locations where anchors will be kept in inference. + There are mainly 3 categories of bounding-boxes. + - Sampled (9) pairs for target assignment. (approxes) + - The square boxes where the predicted anchors are based on. + (squares) + - Guided anchors. + Please refer to https://arxiv.org/abs/1901.03278 for more details. + + Args: + num_classes (int): Number of classes. + in_channels (int): Number of channels in the input feature map. + feat_channels (int): Number of channels of the feature map. + octave_base_scale (int): Base octave scale of each level of + feature map. + scales_per_octave (int): Number of octave scales in each level of + feature map + octave_ratios (Iterable): octave aspect ratios. + anchor_strides (Iterable): Anchor strides. + anchor_base_sizes (Iterable): Anchor base sizes. + anchoring_means (Iterable): Mean values of anchoring targets. + anchoring_stds (Iterable): Std values of anchoring targets. + target_means (Iterable): Mean values of regression targets. + target_stds (Iterable): Std values of regression targets. + deformable_groups: (int): Group number of DCN in + FeatureAdaption module. + loc_filter_thr (float): Threshold to filter out unconcerned regions. + loss_loc (dict): Config of location loss. + loss_shape (dict): Config of anchor shape loss. + loss_cls (dict): Config of classification loss. + loss_bbox (dict): Config of bbox regression loss. + """ + + def __init__( + self, + num_classes, + in_channels, + feat_channels=256, + octave_base_scale=8, + scales_per_octave=3, + octave_ratios=[0.5, 1.0, 2.0], + anchor_strides=[4, 8, 16, 32, 64], + anchor_base_sizes=None, + anchoring_means=(.0, .0, .0, .0), + anchoring_stds=(1.0, 1.0, 1.0, 1.0), + target_means=(.0, .0, .0, .0), + target_stds=(1.0, 1.0, 1.0, 1.0), + deformable_groups=4, + loc_filter_thr=0.01, + loss_loc=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)): + super(AnchorHead, self).__init__() + self.in_channels = in_channels + self.num_classes = num_classes + self.feat_channels = feat_channels + self.octave_base_scale = octave_base_scale + self.scales_per_octave = scales_per_octave + self.octave_scales = octave_base_scale * np.array( + [2**(i / scales_per_octave) for i in range(scales_per_octave)]) + self.approxs_per_octave = len(self.octave_scales) * len(octave_ratios) + self.octave_ratios = octave_ratios + self.anchor_strides = anchor_strides + self.anchor_base_sizes = list( + anchor_strides) if anchor_base_sizes is None else anchor_base_sizes + self.anchoring_means = anchoring_means + self.anchoring_stds = anchoring_stds + self.target_means = target_means + self.target_stds = target_stds + self.deformable_groups = deformable_groups + self.loc_filter_thr = loc_filter_thr + self.approx_generators = [] + self.square_generators = [] + for anchor_base in self.anchor_base_sizes: + # Generators for approxs + self.approx_generators.append( + AnchorGenerator(anchor_base, self.octave_scales, + self.octave_ratios)) + # Generators for squares + self.square_generators.append( + AnchorGenerator(anchor_base, [self.octave_base_scale], [1.0])) + # one anchor per location + self.num_anchors = 1 + self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) + self.cls_focal_loss = loss_cls['type'] in ['FocalLoss'] + self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] + if self.use_sigmoid_cls: + self.cls_out_channels = self.num_classes - 1 + else: + self.cls_out_channels = self.num_classes + + # build losses + self.loss_loc = build_loss(loss_loc) + self.loss_shape = build_loss(loss_shape) + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + + self.fp16_enabled = False + + self._init_layers() + + def _init_layers(self): + self.relu = nn.ReLU(inplace=True) + self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) + self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, + 1) + self.feature_adaption = FeatureAdaption( + self.feat_channels, + self.feat_channels, + kernel_size=3, + deformable_groups=self.deformable_groups) + self.conv_cls = MaskedConv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, + 1) + self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4, + 1) + + def init_weights(self): + normal_init(self.conv_cls, std=0.01) + normal_init(self.conv_reg, std=0.01) + + bias_cls = bias_init_with_prob(0.01) + normal_init(self.conv_loc, std=0.01, bias=bias_cls) + normal_init(self.conv_shape, std=0.01) + + self.feature_adaption.init_weights() + + def forward_single(self, x): + loc_pred = self.conv_loc(x) + shape_pred = self.conv_shape(x) + x = self.feature_adaption(x, shape_pred) + # masked conv is only used during inference for speed-up + if not self.training: + mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr + else: + mask = None + cls_score = self.conv_cls(x, mask) + bbox_pred = self.conv_reg(x, mask) + return cls_score, bbox_pred, shape_pred, loc_pred + + def forward(self, feats): + return multi_apply(self.forward_single, feats) + + def get_sampled_approxs(self, featmap_sizes, img_metas, cfg): + """Get sampled approxs and inside flags according to feature map sizes. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + img_metas (list[dict]): Image meta info. + + Returns: + tuple: approxes of each image, inside flags of each image + """ + num_imgs = len(img_metas) + num_levels = len(featmap_sizes) + + # since feature map sizes of all images are the same, we only compute + # approxes for one time + multi_level_approxs = [] + for i in range(num_levels): + approxs = self.approx_generators[i].grid_anchors( + featmap_sizes[i], self.anchor_strides[i]) + multi_level_approxs.append(approxs) + approxs_list = [multi_level_approxs for _ in range(num_imgs)] + + # for each image, we compute inside flags of multi level approxes + inside_flag_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_flags = [] + multi_level_approxs = approxs_list[img_id] + for i in range(num_levels): + approxs = multi_level_approxs[i] + anchor_stride = self.anchor_strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w, _ = img_meta['pad_shape'] + valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) + valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) + flags = self.approx_generators[i].valid_flags( + (feat_h, feat_w), (valid_feat_h, valid_feat_w)) + inside_flags_list = [] + for i in range(self.approxs_per_octave): + split_valid_flags = flags[i::self.approxs_per_octave] + split_approxs = approxs[i::self.approxs_per_octave, :] + inside_flags = anchor_inside_flags( + split_approxs, split_valid_flags, + img_meta['img_shape'][:2], cfg.allowed_border) + inside_flags_list.append(inside_flags) + # inside_flag for a position is true if any anchor in this + # position is true + inside_flags = ( + torch.stack(inside_flags_list, 0).sum(dim=0) > 0) + multi_level_flags.append(inside_flags) + inside_flag_list.append(multi_level_flags) + return approxs_list, inside_flag_list + + def get_anchors(self, + featmap_sizes, + shape_preds, + loc_preds, + img_metas, + use_loc_filter=False): + """Get squares according to feature map sizes and guided + anchors. + + Args: + featmap_sizes (list[tuple]): Multi-level feature map sizes. + shape_preds (list[tensor]): Multi-level shape predictions. + loc_preds (list[tensor]): Multi-level location predictions. + img_metas (list[dict]): Image meta info. + use_loc_filter (bool): Use loc filter or not. + + Returns: + tuple: square approxs of each image, guided anchors of each image, + loc masks of each image + """ + num_imgs = len(img_metas) + num_levels = len(featmap_sizes) + + # since feature map sizes of all images are the same, we only compute + # squares for one time + multi_level_squares = [] + for i in range(num_levels): + squares = self.square_generators[i].grid_anchors( + featmap_sizes[i], self.anchor_strides[i]) + multi_level_squares.append(squares) + squares_list = [multi_level_squares for _ in range(num_imgs)] + + # for each image, we compute multi level guided anchors + guided_anchors_list = [] + loc_mask_list = [] + for img_id, img_meta in enumerate(img_metas): + multi_level_guided_anchors = [] + multi_level_loc_mask = [] + for i in range(num_levels): + squares = squares_list[img_id][i] + shape_pred = shape_preds[i][img_id] + loc_pred = loc_preds[i][img_id] + guided_anchors, loc_mask = self.get_guided_anchors_single( + squares, + shape_pred, + loc_pred, + use_loc_filter=use_loc_filter) + multi_level_guided_anchors.append(guided_anchors) + multi_level_loc_mask.append(loc_mask) + guided_anchors_list.append(multi_level_guided_anchors) + loc_mask_list.append(multi_level_loc_mask) + return squares_list, guided_anchors_list, loc_mask_list + + def get_guided_anchors_single(self, + squares, + shape_pred, + loc_pred, + use_loc_filter=False): + """Get guided anchors and loc masks for a single level. + + Args: + square (tensor): Squares of a single level. + shape_pred (tensor): Shape predections of a single level. + loc_pred (tensor): Loc predections of a single level. + use_loc_filter (list[tensor]): Use loc filter or not. + + Returns: + tuple: guided anchors, location masks + """ + # calculate location filtering mask + loc_pred = loc_pred.sigmoid().detach() + if use_loc_filter: + loc_mask = loc_pred >= self.loc_filter_thr + else: + loc_mask = loc_pred >= 0.0 + mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors) + mask = mask.contiguous().view(-1) + # calculate guided anchors + squares = squares[mask] + anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( + -1, 2).detach()[mask] + bbox_deltas = anchor_deltas.new_full(squares.size(), 0) + bbox_deltas[:, 2:] = anchor_deltas + guided_anchors = delta2bbox( + squares, + bbox_deltas, + self.anchoring_means, + self.anchoring_stds, + wh_ratio_clip=1e-6) + return guided_anchors, mask + + def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, + anchor_weights, anchor_total_num): + shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) + bbox_anchors = bbox_anchors.contiguous().view(-1, 4) + bbox_gts = bbox_gts.contiguous().view(-1, 4) + anchor_weights = anchor_weights.contiguous().view(-1, 4) + bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) + bbox_deltas[:, 2:] += shape_pred + # filter out negative samples to speed-up weighted_bounded_iou_loss + inds = torch.nonzero(anchor_weights[:, 0] > 0).squeeze(1) + bbox_deltas_ = bbox_deltas[inds] + bbox_anchors_ = bbox_anchors[inds] + bbox_gts_ = bbox_gts[inds] + anchor_weights_ = anchor_weights[inds] + pred_anchors_ = delta2bbox( + bbox_anchors_, + bbox_deltas_, + self.anchoring_means, + self.anchoring_stds, + wh_ratio_clip=1e-6) + loss_shape = self.loss_shape( + pred_anchors_, + bbox_gts_, + anchor_weights_, + avg_factor=anchor_total_num) + return loss_shape + + def loss_loc_single(self, loc_pred, loc_target, loc_weight, loc_avg_factor, + cfg): + loss_loc = self.loss_loc( + loc_pred.reshape(-1, 1), + loc_target.reshape(-1, 1).long(), + loc_weight.reshape(-1, 1), + avg_factor=loc_avg_factor) + return loss_loc + + @force_fp32( + apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) + def loss(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + gt_bboxes, + gt_labels, + img_metas, + cfg, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == len(self.approx_generators) + + # get loc targets + loc_targets, loc_weights, loc_avg_factor = ga_loc_target( + gt_bboxes, + featmap_sizes, + self.octave_base_scale, + self.anchor_strides, + center_ratio=cfg.center_ratio, + ignore_ratio=cfg.ignore_ratio) + + # get sampled approxes + approxs_list, inside_flag_list = self.get_sampled_approxs( + featmap_sizes, img_metas, cfg) + # get squares and guided anchors + squares_list, guided_anchors_list, _ = self.get_anchors( + featmap_sizes, shape_preds, loc_preds, img_metas) + + # get shape targets + sampling = False if not hasattr(cfg, 'ga_sampler') else True + shape_targets = ga_shape_target( + approxs_list, + inside_flag_list, + squares_list, + gt_bboxes, + img_metas, + self.approxs_per_octave, + cfg, + sampling=sampling) + if shape_targets is None: + return None + (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, + anchor_bg_num) = shape_targets + anchor_total_num = ( + anchor_fg_num if not sampling else anchor_fg_num + anchor_bg_num) + + # get anchor targets + sampling = False if self.cls_focal_loss else True + label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 + cls_reg_targets = anchor_target( + guided_anchors_list, + inside_flag_list, + gt_bboxes, + img_metas, + self.target_means, + self.target_stds, + cfg, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=label_channels, + sampling=sampling) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + num_total_samples = ( + num_total_pos if self.cls_focal_loss else num_total_pos + + num_total_neg) + + # get classification and bbox regression losses + losses_cls, losses_bbox = multi_apply( + self.loss_single, + cls_scores, + bbox_preds, + labels_list, + label_weights_list, + bbox_targets_list, + bbox_weights_list, + num_total_samples=num_total_samples, + cfg=cfg) + + # get anchor location loss + losses_loc = [] + for i in range(len(loc_preds)): + loss_loc = self.loss_loc_single( + loc_preds[i], + loc_targets[i], + loc_weights[i], + loc_avg_factor=loc_avg_factor, + cfg=cfg) + losses_loc.append(loss_loc) + + # get anchor shape loss + losses_shape = [] + for i in range(len(shape_preds)): + loss_shape = self.loss_shape_single( + shape_preds[i], + bbox_anchors_list[i], + bbox_gts_list[i], + anchor_weights_list[i], + anchor_total_num=anchor_total_num) + losses_shape.append(loss_shape) + + return dict( + loss_cls=losses_cls, + loss_bbox=losses_bbox, + loss_shape=losses_shape, + loss_loc=losses_loc) + + @force_fp32( + apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) + def get_bboxes(self, + cls_scores, + bbox_preds, + shape_preds, + loc_preds, + img_metas, + cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( + loc_preds) + num_levels = len(cls_scores) + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + # get guided anchors + _, guided_anchors, loc_masks = self.get_anchors( + featmap_sizes, + shape_preds, + loc_preds, + img_metas, + use_loc_filter=not self.training) + result_list = [] + for img_id in range(len(img_metas)): + cls_score_list = [ + cls_scores[i][img_id].detach() for i in range(num_levels) + ] + bbox_pred_list = [ + bbox_preds[i][img_id].detach() for i in range(num_levels) + ] + guided_anchor_list = [ + guided_anchors[img_id][i].detach() for i in range(num_levels) + ] + loc_mask_list = [ + loc_masks[img_id][i].detach() for i in range(num_levels) + ] + img_shape = img_metas[img_id]['img_shape'] + scale_factor = img_metas[img_id]['scale_factor'] + proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list, + guided_anchor_list, + loc_mask_list, img_shape, + scale_factor, cfg, rescale) + result_list.append(proposals) + return result_list + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + mlvl_masks, + img_shape, + scale_factor, + cfg, + rescale=False): + assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) + mlvl_bboxes = [] + mlvl_scores = [] + for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, + mlvl_anchors, + mlvl_masks): + assert cls_score.size()[-2:] == bbox_pred.size()[-2:] + # if no location is kept, end. + if mask.sum() == 0: + continue + # reshape scores and bbox_pred + cls_score = cls_score.permute(1, 2, + 0).reshape(-1, self.cls_out_channels) + if self.use_sigmoid_cls: + scores = cls_score.sigmoid() + else: + scores = cls_score.softmax(-1) + bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) + # filter scores, bbox_pred w.r.t. mask. + # anchors are filtered in get_anchors() beforehand. + scores = scores[mask, :] + bbox_pred = bbox_pred[mask, :] + if scores.dim() == 0: + anchors = anchors.unsqueeze(0) + scores = scores.unsqueeze(0) + bbox_pred = bbox_pred.unsqueeze(0) + # filter anchors, bbox_pred, scores w.r.t. scores + nms_pre = cfg.get('nms_pre', -1) + if nms_pre > 0 and scores.shape[0] > nms_pre: + if self.use_sigmoid_cls: + max_scores, _ = scores.max(dim=1) + else: + max_scores, _ = scores[:, 1:].max(dim=1) + _, topk_inds = max_scores.topk(nms_pre) + anchors = anchors[topk_inds, :] + bbox_pred = bbox_pred[topk_inds, :] + scores = scores[topk_inds, :] + bboxes = delta2bbox(anchors, bbox_pred, self.target_means, + self.target_stds, img_shape) + mlvl_bboxes.append(bboxes) + mlvl_scores.append(scores) + mlvl_bboxes = torch.cat(mlvl_bboxes) + if rescale: + mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) + mlvl_scores = torch.cat(mlvl_scores) + if self.use_sigmoid_cls: + padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) + mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) + # multi class NMS + det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, + cfg.score_thr, cfg.nms, + cfg.max_per_img) + return det_bboxes, det_labels diff --git a/CDARTS_detection/mmdet/models/anchor_heads/retina_head.py b/CDARTS_detection/mmdet/models/anchor_heads/retina_head.py new file mode 100644 index 0000000..73d598b --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/retina_head.py @@ -0,0 +1,111 @@ +import numpy as np +import torch.nn as nn +from mmcv.cnn import normal_init + +from .anchor_head import AnchorHead +from ..registry import HEADS +from ..utils import bias_init_with_prob, ConvModule + +from ..bbox_heads.auto_head.build_head import build_search_head + + +@HEADS.register_module +class RetinaHead(AnchorHead): + + def __init__(self, + num_classes, + in_channels, + stacked_convs=4, + octave_base_scale=4, + scales_per_octave=3, + search_head=None, + conv_cfg=None, + norm_cfg=None, + **kwargs): + self.stacked_convs = stacked_convs + self.search_head = search_head + self.octave_base_scale = octave_base_scale + self.scales_per_octave = scales_per_octave + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + octave_scales = np.array( + [2**(i / scales_per_octave) for i in range(scales_per_octave)]) + anchor_scales = octave_scales * octave_base_scale + super(RetinaHead, self).__init__( + num_classes, in_channels, anchor_scales=anchor_scales, **kwargs) + + def _init_layers(self): + self.relu = nn.ReLU(inplace=True) + self.cls_convs = nn.ModuleList() + self.reg_convs = nn.ModuleList() + + if self.search_head is not None: + if 'cls' in self.search_head.branch: + self.cls_convs = build_search_head(self.search_head) + else: + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, + conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) + if 'reg' in self.search_head.branch: + self.reg_convs = build_search_head(self.search_head) + else: + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.reg_convs.append( + ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, + conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) + else: + for i in range(self.stacked_convs): + chn = self.in_channels if i == 0 else self.feat_channels + self.cls_convs.append( + ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, + conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) + self.reg_convs.append( + ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, + conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) + + self.retina_cls = nn.Conv2d( + self.feat_channels, + self.num_anchors * self.cls_out_channels, + 3, + padding=1) + self.retina_reg = nn.Conv2d( + self.feat_channels, self.num_anchors * 4, 3, padding=1) + + def init_weights(self): + for m in self.cls_convs.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.01) + + for m in self.reg_convs.modules(): + if isinstance(m, nn.Conv2d): + normal_init(m, std=0.01) + + bias_cls = bias_init_with_prob(0.01) + normal_init(self.retina_cls, std=0.01, bias=bias_cls) + normal_init(self.retina_reg, std=0.01) + + def forward_single(self, x): + cls_feat = x + reg_feat = x + if self.search_head is not None: + if 'cls' in self.search_head.branch: + cls_feat = self.cls_convs(cls_feat)[0] + else: + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + if 'reg' in self.search_head.branch: + reg_feat = self.reg_convs(reg_feat)[0] + else: + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + else: + for cls_conv in self.cls_convs: + cls_feat = cls_conv(cls_feat) + for reg_conv in self.reg_convs: + reg_feat = reg_conv(reg_feat) + cls_score = self.retina_cls(cls_feat) + bbox_pred = self.retina_reg(reg_feat) + return cls_score, bbox_pred diff --git a/CDARTS_detection/mmdet/models/anchor_heads/rpn_head.py b/CDARTS_detection/mmdet/models/anchor_heads/rpn_head.py new file mode 100644 index 0000000..0a5fd37 --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/rpn_head.py @@ -0,0 +1,104 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import normal_init + +from mmdet.core import delta2bbox +from mmdet.ops import nms +from .anchor_head import AnchorHead +from ..registry import HEADS + + +@HEADS.register_module +class RPNHead(AnchorHead): + + def __init__(self, in_channels, **kwargs): + super(RPNHead, self).__init__(2, in_channels, **kwargs) + + def _init_layers(self): + self.rpn_conv = nn.Conv2d( + self.in_channels, self.feat_channels, 3, padding=1) + self.rpn_cls = nn.Conv2d(self.feat_channels, + self.num_anchors * self.cls_out_channels, 1) + self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) + + def init_weights(self): + normal_init(self.rpn_conv, std=0.01) + normal_init(self.rpn_cls, std=0.01) + normal_init(self.rpn_reg, std=0.01) + + def forward_single(self, x): + x = self.rpn_conv(x) + x = F.relu(x, inplace=True) + rpn_cls_score = self.rpn_cls(x) + rpn_bbox_pred = self.rpn_reg(x) + return rpn_cls_score, rpn_bbox_pred + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + img_metas, + cfg, + gt_bboxes_ignore=None): + losses = super(RPNHead, self).loss( + cls_scores, + bbox_preds, + gt_bboxes, + None, + img_metas, + cfg, + gt_bboxes_ignore=gt_bboxes_ignore) + return dict( + loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox']) + + def get_bboxes_single(self, + cls_scores, + bbox_preds, + mlvl_anchors, + img_shape, + scale_factor, + cfg, + rescale=False): + mlvl_proposals = [] + for idx in range(len(cls_scores)): + rpn_cls_score = cls_scores[idx] + rpn_bbox_pred = bbox_preds[idx] + assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] + anchors = mlvl_anchors[idx] + rpn_cls_score = rpn_cls_score.permute(1, 2, 0) + if self.use_sigmoid_cls: + rpn_cls_score = rpn_cls_score.reshape(-1) + scores = rpn_cls_score.sigmoid() + else: + rpn_cls_score = rpn_cls_score.reshape(-1, 2) + scores = rpn_cls_score.softmax(dim=1)[:, 1] + rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) + if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: + _, topk_inds = scores.topk(cfg.nms_pre) + rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] + anchors = anchors[topk_inds, :] + scores = scores[topk_inds] + proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means, + self.target_stds, img_shape) + if cfg.min_bbox_size > 0: + w = proposals[:, 2] - proposals[:, 0] + 1 + h = proposals[:, 3] - proposals[:, 1] + 1 + valid_inds = torch.nonzero((w >= cfg.min_bbox_size) & + (h >= cfg.min_bbox_size)).squeeze() + proposals = proposals[valid_inds, :] + scores = scores[valid_inds] + proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1) + proposals, _ = nms(proposals, cfg.nms_thr) + proposals = proposals[:cfg.nms_post, :] + mlvl_proposals.append(proposals) + proposals = torch.cat(mlvl_proposals, 0) + if cfg.nms_across_levels: + proposals, _ = nms(proposals, cfg.nms_thr) + proposals = proposals[:cfg.max_num, :] + else: + scores = proposals[:, 4] + num = min(cfg.max_num, proposals.shape[0]) + _, topk_inds = scores.topk(num) + proposals = proposals[topk_inds, :] + return proposals diff --git a/CDARTS_detection/mmdet/models/anchor_heads/ssd_head.py b/CDARTS_detection/mmdet/models/anchor_heads/ssd_head.py new file mode 100644 index 0000000..db86c47 --- /dev/null +++ b/CDARTS_detection/mmdet/models/anchor_heads/ssd_head.py @@ -0,0 +1,193 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import xavier_init + +from mmdet.core import AnchorGenerator, anchor_target, multi_apply +from .anchor_head import AnchorHead +from ..losses import smooth_l1_loss +from ..registry import HEADS + + +# TODO: add loss evaluator for SSD +@HEADS.register_module +class SSDHead(AnchorHead): + + def __init__(self, + input_size=300, + num_classes=81, + in_channels=(512, 1024, 512, 256, 256, 256), + anchor_strides=(8, 16, 32, 64, 100, 300), + basesize_ratio_range=(0.1, 0.9), + anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), + target_means=(.0, .0, .0, .0), + target_stds=(1.0, 1.0, 1.0, 1.0)): + super(AnchorHead, self).__init__() + self.input_size = input_size + self.num_classes = num_classes + self.in_channels = in_channels + self.cls_out_channels = num_classes + num_anchors = [len(ratios) * 2 + 2 for ratios in anchor_ratios] + reg_convs = [] + cls_convs = [] + for i in range(len(in_channels)): + reg_convs.append( + nn.Conv2d( + in_channels[i], + num_anchors[i] * 4, + kernel_size=3, + padding=1)) + cls_convs.append( + nn.Conv2d( + in_channels[i], + num_anchors[i] * num_classes, + kernel_size=3, + padding=1)) + self.reg_convs = nn.ModuleList(reg_convs) + self.cls_convs = nn.ModuleList(cls_convs) + + min_ratio, max_ratio = basesize_ratio_range + min_ratio = int(min_ratio * 100) + max_ratio = int(max_ratio * 100) + step = int(np.floor(max_ratio - min_ratio) / (len(in_channels) - 2)) + min_sizes = [] + max_sizes = [] + for r in range(int(min_ratio), int(max_ratio) + 1, step): + min_sizes.append(int(input_size * r / 100)) + max_sizes.append(int(input_size * (r + step) / 100)) + if input_size == 300: + if basesize_ratio_range[0] == 0.15: # SSD300 COCO + min_sizes.insert(0, int(input_size * 7 / 100)) + max_sizes.insert(0, int(input_size * 15 / 100)) + elif basesize_ratio_range[0] == 0.2: # SSD300 VOC + min_sizes.insert(0, int(input_size * 10 / 100)) + max_sizes.insert(0, int(input_size * 20 / 100)) + elif input_size == 512: + if basesize_ratio_range[0] == 0.1: # SSD512 COCO + min_sizes.insert(0, int(input_size * 4 / 100)) + max_sizes.insert(0, int(input_size * 10 / 100)) + elif basesize_ratio_range[0] == 0.15: # SSD512 VOC + min_sizes.insert(0, int(input_size * 7 / 100)) + max_sizes.insert(0, int(input_size * 15 / 100)) + self.anchor_generators = [] + self.anchor_strides = anchor_strides + for k in range(len(anchor_strides)): + base_size = min_sizes[k] + stride = anchor_strides[k] + ctr = ((stride - 1) / 2., (stride - 1) / 2.) + scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] + ratios = [1.] + for r in anchor_ratios[k]: + ratios += [1 / r, r] # 4 or 6 ratio + anchor_generator = AnchorGenerator( + base_size, scales, ratios, scale_major=False, ctr=ctr) + indices = list(range(len(ratios))) + indices.insert(1, len(indices)) + anchor_generator.base_anchors = torch.index_select( + anchor_generator.base_anchors, 0, torch.LongTensor(indices)) + self.anchor_generators.append(anchor_generator) + + self.target_means = target_means + self.target_stds = target_stds + self.use_sigmoid_cls = False + self.cls_focal_loss = False + self.fp16_enabled = False + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform', bias=0) + + def forward(self, feats): + cls_scores = [] + bbox_preds = [] + for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, + self.cls_convs): + cls_scores.append(cls_conv(feat)) + bbox_preds.append(reg_conv(feat)) + return cls_scores, bbox_preds + + def loss_single(self, cls_score, bbox_pred, labels, label_weights, + bbox_targets, bbox_weights, num_total_samples, cfg): + loss_cls_all = F.cross_entropy( + cls_score, labels, reduction='none') * label_weights + pos_inds = (labels > 0).nonzero().view(-1) + neg_inds = (labels == 0).nonzero().view(-1) + + num_pos_samples = pos_inds.size(0) + num_neg_samples = cfg.neg_pos_ratio * num_pos_samples + if num_neg_samples > neg_inds.size(0): + num_neg_samples = neg_inds.size(0) + topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) + loss_cls_pos = loss_cls_all[pos_inds].sum() + loss_cls_neg = topk_loss_cls_neg.sum() + loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples + + loss_bbox = smooth_l1_loss( + bbox_pred, + bbox_targets, + bbox_weights, + beta=cfg.smoothl1_beta, + avg_factor=num_total_samples) + return loss_cls[None], loss_bbox + + def loss(self, + cls_scores, + bbox_preds, + gt_bboxes, + gt_labels, + img_metas, + cfg, + gt_bboxes_ignore=None): + featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] + assert len(featmap_sizes) == len(self.anchor_generators) + + anchor_list, valid_flag_list = self.get_anchors( + featmap_sizes, img_metas) + cls_reg_targets = anchor_target( + anchor_list, + valid_flag_list, + gt_bboxes, + img_metas, + self.target_means, + self.target_stds, + cfg, + gt_bboxes_ignore_list=gt_bboxes_ignore, + gt_labels_list=gt_labels, + label_channels=1, + sampling=False, + unmap_outputs=False) + if cls_reg_targets is None: + return None + (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, + num_total_pos, num_total_neg) = cls_reg_targets + + num_images = len(img_metas) + all_cls_scores = torch.cat([ + s.permute(0, 2, 3, 1).reshape( + num_images, -1, self.cls_out_channels) for s in cls_scores + ], 1) + all_labels = torch.cat(labels_list, -1).view(num_images, -1) + all_label_weights = torch.cat(label_weights_list, + -1).view(num_images, -1) + all_bbox_preds = torch.cat([ + b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) + for b in bbox_preds + ], -2) + all_bbox_targets = torch.cat(bbox_targets_list, + -2).view(num_images, -1, 4) + all_bbox_weights = torch.cat(bbox_weights_list, + -2).view(num_images, -1, 4) + + losses_cls, losses_bbox = multi_apply( + self.loss_single, + all_cls_scores, + all_bbox_preds, + all_labels, + all_label_weights, + all_bbox_targets, + all_bbox_weights, + num_total_samples=num_total_pos, + cfg=cfg) + return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) diff --git a/CDARTS_detection/mmdet/models/backbones/__init__.py b/CDARTS_detection/mmdet/models/backbones/__init__.py new file mode 100644 index 0000000..ba515f0 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/__init__.py @@ -0,0 +1,12 @@ +from .resnet import ResNet, make_res_layer +from .resnext import ResNeXt +from .ssd_vgg import SSDVGG +from .hrnet import HRNet +from .mobilenetv2 import MobileNetV2 +from .detnas import DetNas +from .fbnet import FBNet +from .mnasnet import MnasNet +from .mobilenetv3 import SSDMobilenetV3 +from .efficientnet import SSDEFFB0 + +__all__ = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet', 'MobileNetV2', 'DetNas', 'FBNet', 'MnasNet', 'SSDMobilenetV3', 'SSDEFFB0'] diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87e7c27b15671a4f8be0b151f79381cec3b39c9f GIT binary patch literal 675 zcmZ9J!ET#C5QYI876T@ZNmRc|-XO0; zk3IDja_Y?b(5N7OeKY$r^9$y~Vea_m+AcTZUhzg*uEjw$Q@>;G zf-Bf}`V=u?|kNyh%bL_;} z`MOwKerN*B(ez0ABq2#e!boD00ZBqapSQ_SmeY$a!n8Q=-vnT;^RsoMfLSZ~({ATx zLbz|pOryKhxbRp%&D}#WDsMYT9D5_#}kir)Bp|*uFxLs}9vaXI?m|7}ri?!I?uj@+L fg7dN}Th8A<9J=@;9zSaJiGHD-pp5+N|2y~#w;iVg literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/builder.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/builder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42a8004aea84632731c27ad7e01cff15935e7695 GIT binary patch literal 25224 zcmb__3vgW5dER~P6N|+H1R;u~sB4Lm2$=*#O4fs>9uz59wq-~mDanQhEmI(P*En0<2rUGu{};)yJ_nteI#j{)P1^+r*#syy-gd%Z70?=P25hK z>9|eZ@B7c)ySpGHyNwsT=bn4s&;NZ6?;Ra2KKkvITKBqT{ReB{mxuo`JmF^?%TkuI zo7RGP+Y7dQI}47y-31qKr|GR{7BV)|xXtW(ZXqXrubE#jEELv@3&r)3g%Js}o2Av! zg;B{}UKqnW)2ytIFN{lUwmGrBZ(*PKbIr;1{R{iye<7Gsg-_V3s77AOEZp=COO@2< zOO_f9MpqB)xC=KUq^!meGM0oKR24P;lC^LsIBfpZ#3wwpPffn$E!?6$5R3$eZ?=Zq zBWemMZc+zO!>wvDuvYz`NEhh}EMv)e1K(kF^CziuZlBg(#LO}Ad>@|~Zyg|nd({7x(w7Jm?;{4MaQKQ|eHDm9oM#x0Djhc=cwI;k-9c%|C7|x!2;N(3s_q^?) z`%V^p{Ll$+eY07Ob75n7z1>i;i_d8%wzqT<)ql>?V|ac0$=Q|mdT9niq zuj5I7nZy?!hbOQIApjk6fvcQP*dMZ%Y~?ENrO^dXWmFdPo%xXESqoXrT0YLmU|?*e z`9tICqBK6?0XUHr*&S=gS#@?S8>8H@ssKDF!k(Y@(8$e=K|y3W?b#D4norp)1F4lu)v)4`5j4_^KQ>4orDt<|q^Y)}CgZF)#*20tM z-1lQ@^*%QF7CM}zp@HRs?{z92T1j>*V)~Ogkv8W4XF}uR3i?tFxebP@-(y~70&ZvJ zvRCZw@qX2V^QyU|_04n+(%~}b&!9fc&veD~x+wF;kvM{_}wcIm&LS~Td zXDjEWeAn&}P`dMJVrJrs=MaZ16pI<#3|q|6l@2yn7*dT ztg#W3R|bO}H#@{VVC%=2nhn7ejI0H^6*Q|d6H+P*BpnoO>Qr0?wfbx2??N* z9fF<1og(|hIcRk;{{m%XwoPZ*tm%wCin3co~#j5d-DVBlL?SUdb>*3+*iAYb+i#y0RW>$o$ZvJ zXP~`)f-SW5<9MYM!%({vVwBx=4WI>>eSy^Bj>X-AUFPV=Be!EOVb#21>jwdR6Mxsb z{DQ)sQeM|>9Yop;(munGR%tL5)}p(_{$=Oqu(z-YWOZE2?LsaXK&MJI3Posv^_Jv$3u z)-BokDWs_5Nq@af6+VKXly0=aQ97g;Rc6@(%{VS7Y($k_DlBBws49bk%&IX}!8<3E zWF8%!h>PdiErkcWr8bW#_4(Z}gnsjy|4dC|IDo1Ea1`UNw40lZ6}tMJt#G3Oromsl z)~Cdm8_~+<;!M50J{twCu&tLcWV6x6=Imm#y*RsG3sGE;cy2E(HR@m?W&`mAt*8}5 zvvsrq&z00uZPu;@Is|1c^}d7P1=TW5(jI>WWO$&j`5+j4Ai`R+88rQ1E2v9z{q~X{ ztpq**#0g9=5Uy`Fu)ox3qCFv)iL@_B*$92r2s$bMQvSQneDwB zVYmrC;N$}jJ$SF=5ug)~G+NdAO05NW5yqwVX4L;0OIAv6@4n8>D4Q&{=3eJ6s0%EH zr;LZ_pDDus31=_jp4+zSMVXGfLt;M*pC`Utlm|FNs37c8DQ0ggpB?dHotfJZx91I=L$q!jp9STP38LB7)d)z`9tgH<5)A z=Ea)_{7+%70ZD}J(+3RQ`yTiWT{utQ({SQEXeA7CLl3|zZY})`oned+kF++|tHBl) zON0qCSg$JR1=Jd6Ja$^GxR7p+ID1J}x+bEDi$sqmxtae79V~-m21m?=DcdQo=pYo3 zodkr_WyANRO&)0IaPtf%dzMv=Bp{!_B%T0-?R_W9Dy}4rXNRQn*tx2Dq_NQOhYiSpc9S{v3NE@gc-H2q0C$-vI}}%03KGwY?88A%P!#Hi4*tKVm;FS%LO{ z0Uj<{;`mi8(N!!&P>#-ZE+{Ow)%=c2Y8$@7jxAGORMuKSe=r(ZboT}ysRV53DVEeNy!f^n~m7p z&}|jx(=6{t5}+6|y@V9t!4onoyHsnT`Z%-L-fXG3AaPtb$5m`ToCnS4nVa|M+ zyGgL*j(x=5J~S{#yO&)`35*~s>0+5H>@?D6)(gO#UC&U%p!PXa?vB&V0gzy;g5I}Q zGkCHISjgREkpf-?or^ijDXR+rvRaT?Yze=>5(+4x&?z8QQVP-*P#b7u0w|P9Fyvt^ zV9xXxq7kf#96^z+0)nG5o@@tP23Ak5TWlRA82N6gI>ps-h~p9zxr6#l{Rp>I7IZw{ zIa2c7Vy6h?m^ai8xQV@}My_!~sQmzavv6Jqb^e{{2~EVTpNA7?!QepBWL5y-ke2N& zWan8X@UAq1t7bzBAbN~xc2!4;R+Fd$sU7I`%_x{2>B~ssF(AW;i_k0^ux*dW_PN-8 zGPa*L0IAXnt|qaAV97{sM?X2lm!0&B+#%xhnF!q7j{QM<>$WKKGGG?a z-L#6`IVY( zP_PjC>ziQ&y68&a3-0pYKY-Sb`zcBYUz`V{)IceE6;A9ngO;q}xcD5|<|njngYeb0 z)-qrqQh=Q>h^}8mY&XEvmkiEKP^AE~9_SQkm?E5b5|b<>-XMJanNs#JE4?APehPjx15G0unSxhiI*w)ZHCmdBNAO;UP#UtH^*EpUvIq_~!q|laVXeE--iXT_CWMm(L0D6&C#f+42*#tCE9*Mwox@?G^y3g@gJ1ruZ zqenreO`}w4GN#_XqJI*faRGB{*y%V!RCi_9MC(_P>R;nYIYVD5j!7kU=gpGWbJA#bu`HRByl3VrP(_U;egZUsj@!Uzj zhOLA!VR5Ny)y%Jf; z=;Xc%zX@j-Pg~xY1TZakMhGO^NAb4Tylw@;Qiy%pr$O?WcfN#eRCPN=xL#)rZYILQ z!yh?bkdq|)u9K(Dbw))zRDK1^u}X0*=JGoIg-%6(CMtr51-S{Pp+v$HTDWXyq+xeT zKMmm$Qk7a+#kxvTz5}upE8sfV3-HOK9S2;kDVZdyr{iAvtt?5@9p)Stn2?`OheN2N zOFc4^Qa<5F#sD+0(9g4QHiqJN$aiJ7>x0@f4YyrH1!q7DWu z;ngapI4&#QCI=j?u!tZYt!pT315#U%{lq!GtLkd(F1Aj_9&(%nWtrdwR4a0=(TkTi zgJ3&&VyoUj5l#r{ujJd1e5VX=lz*oJx@tt%q?l9Fy``uk zjE=5t1aSckF~XlX+iHhv4WK!6XdqjhH67HSX8#KWd`1XYY>^`q!4VjG`b~uXEE8rq zG%~VN9J~}6$}s*X1ckT3vAir9Y1x%I}K%Prk!h6$GW z;~FQ|c@9rVHh{no0?ES>*ce4t2nrQZ0ywQ1#8FuJAmXsThTde>DTFz&2ABwFPBD1fx4*&A z4dzTG!W=#hr3~K^Jj5#)1iVR$b2w5T=&U5nP*?XO7I-BIgM2vgLBEsu04x$;PA?Ly z00*HwQY!P)dHsv1Pk)Y%jO$;akAtTFDxJSh=QD7Up)oqNd~X}>8X1|DA_sahlf zcJW6y@j>=$Zy$N1!7(auuzQ;M;=OQU??Uj@v+>B|wWz)_kA;6q89wSCGX*)k zH{+uSOJ2hxgm~WbK!g)lP94Y>1EYM9QK{vThxibNzGM1t%2>1=U z0s@-ApS#pUah}S4!b@`&hjK!oOVxnnEOqwjKaPm545Ay2qz-_Y5^5ezY931v-I&zl zbVhDyTLm=`;hDDWjn)KBT}trR4Jgh)pHE|SdzkFY@SDX-noTy9z-#>r{FE)%--fa) z`#bRSO!5Uf+@D|!h|xB#Hi^>iot`uzlP@ytChwYjl1c4wwyAIf!O*>kTp(q5ivw-&^i_D?!2Ch_*6BHsrMg;kX9{ z*>L+2(WGbx;oCR&Hb8Gre816+o1&sVx`EWzpr-`g=)Z?H40EMJE@~N+&uk1!E#qYA71;<48vSB9xvfhCz%6-L#Dhm?oPDVch_=_n; zIJn1GfV(3wYrPQ2mOVf>m(VR?%VAYOBHBnOsXv;V&gzfg9T%aRQNdCTq6g8xik1{q z@IOeOuAe`;7s+o0x^ z!Ym;i{Z!Ng?qZX{p)ii;gfk%vs(|2WPZ2^MBs{HX-)35^88iOJMI~(}=D7c!##WH% zqK^CEX_2OMZPxSQ8V5Ct(kEgzJZ&EufTEKflDSfrA*TRIc z75V+Mm;wQDb4I+48HD66E}soUkSpM?MQ&Fup^2n?HWU=Km<8cfzG#uBNe zyXA`^n+5swWU{YZP|jeXx$7<;d7eAcquwsW(&1t`Lv0)6+$bxrSB#`H{J|*mK`cnd zFdkI2Hy;;aD6m-%Ac#}ce$ub1Tmga~iFsjS{GDB z0Kd5&>VJt;X5_vu>CJYNkrUJ{`1lSc`dK==M(pzl+u?{!zytFp@E{eC+%UPom_Z!G z_>|uuufZvm@!}A}MwwbjjFng?L(*%w0|#umu_~@=VkQWw%6JLKzPkmC{|UB$rCTTv zF4(NxGCA?3mZ3jl1d3oQl^POqGO4uXo5)L9DrC)0SdJWwG&K)~e-V2%@I|8MqAgq3 z24xBnN7vi>YaIwBU;IUGd~9$>Kc#YA@ABzR1~UqMo&F-inInrgK!oI`g<1h=0Ss2N z-8>jmh_~?l5q!hit#t(7Bl!MUl*d;EUnP8rWIN@_fbIS;DWRpUvF(;b!X34TM! zucqizHzlQhT*7W*7~3dm4@k};sQsI-7rSHXprm#5YZ7}GuEqiYXUVNX3gO@ z;JXDrj^J<`23nBROKk(&vUj~wPOr3$Q)tJL>xJ%E>$3>ERciRy8`se9V`nt2?ML3E zHc$ENW$Q_l{uU|Sj3Gx`#&BT#7?zBVy6t)fxh7QRRVREca@6e|*qTvK1`%FaEV7Q) z9(FAY($PAM?Gq*sm|0X}P!Z4sv9&C~{T)FBjNuc|QkW^iQy+N6KSmiwI1M2vIKX8{ zEt6b+tDT@BBu^2Lf$&evEYJ82UDceQ`O*PwQ=vMYWw8<5E|VxbLwqXBaM=( z&o-MZAoS~yhz4710c5eY)-@m@oCh$%Qi^7StvbZKKn-jm1DZjNR;;}iU0rF^SENV^ zU|~$O4%`#9Q5yOH5FvR?&nU$)ZQWJIiKOdH;2)F1e00TsIAxl9Ji~Ecg$N#$Gcc%@ zgrPAarZ&xoWEY~i<9@w`Xd>UoTW4`nM9SAVmC@JZn3Eu;72$D7u!aPQxfPWXCNM;x zF(D28V<%5MG;M+d1gBywU!}kJYmW^5VP>?Ng5&JMGfnfmX znQ$YhH5S0m?GCwB_?j^AYj~xv$uD(BZRziq#uWo0Ly`X09 zo;KR$I`YK!M(ixrV_U<9Sc@Jiw(%JVlP#9o(?ys(QCA_NMZ;HrmDLHKEWGw_@$=Jk zPSX)wEo`<^uZwCnb`+GESbQ)I_|Hu8%XIn$?8^Ih7;6d$^mNf=*`rPfHK{K&lIJ&Q zKnpynRC$(#N^{djZr}85YpvD3+8WwacZAc^6(Q{OlgvX}ME?vNFb)xpMB&f_fDQQs zVVL3!<#G3$;}|48^dHda()lEvkJ9-PoHzqpNt~jp#04{;nCRGTZ-ml7NX`X#gh|h# z@^#mOYlhT0!R%Sl;)P~>`@``q#uX{?HG8u+U4&V?D3o}-ssz?n(*}H;^vcIkgf&$L zjY;_g)uS|41s4a`XoxAfUt8%-EP+yccB-sMPmkaUSK;iE3xaW~@(_r-ahUw*qy>8>rbH?B5ndU>T%lOMA<- zTvWkoLT*yh<04{l;KN9Rj-w9>k;LaFKGMjL80zLbDh>4yOo;if9S=B3VJ=PY%@y zgGTN~lD&mJqlklsXWNyeDK{*aVhu+1q2i2K1(oXA5_0T@JAz>Em~sJC26m>!F+-WXRCWaaQ@;W!g^|AaKit z@qxRP-3xL-rBa{*MT`v!fD^^IkdkReKyd*j&(}Hp{PCT~!~GP#3(hVWRuJ7rL3A46 zdLP;dN zyZSl~qk_iLwS-P2DLKbplD@UvJjg<$4N%^HG6^XK3sv_PalaQV4zXwMq2Ye}>16qS|th7&C@AsM`}*T!bG+x)cK# zmfjHOV%rte>MLN5kufpIQeq34WcMjX_t$aB0xw!HGIN>%g1d!D7z}Uv5{E(Yw%^N+ z6DTWfb=(Jsk8@BsDb>CzLI}#mVUZ?WXfG{rpWj9swQsIFKzm_JwqCnt42u(!%U&0- z49Jxx;+frDk%D1JOsh-7i>4bBOGlI&`?wmoG5dCrw2dCJi_}~)06;&Dmr$HETU@Le zbS}ii#}Ko{)yQID5_xBp-3fU9a2eL49Lv27BlQMLqyy~fLi%E?4NF|R!lT{Ic&_3& zTie(Oa45swhDB0VbP7uUFmsLw69{IQTO!USlNwi~R)Ge_nl&CVA(4hjxBg!&biB`^ znFg!4oa9fw^&c{wqeB=v)p}`Mq)A|t9%WlO2Jys=eSVHZas-i90lbNjeM+=)7WUCo?sEUt;f>;gEh*e9pvXO7(15m;=Q^%oWR4#!qcKO3h>B?;|+T>Fx&J& z?Q=UysxwjV$0>XaPxus^lpz>6F$(Ghs(rC-^=Qwk@=GN}%N!f5v9Zj_C6+n)Iq^3+B>9_xiA2I%@PqV@5p5nNJ)h$Hj5Prd#sV*3Up#x$|gBiwohcCt9*9IT)g z+u%gM$UKB>aXv{IPLCRT`50R_O^4O$yXkw=%Jf^AyCd?$#UHHJmmDas+T`^Zp2s#Qoe$bDWc}zkUaZu$TOMdnNU;c#lMg8P-SHj^^(jRcE8i%eM$!s z`dBuWaGT%4dKlQ2{VgPv@@E;#ox6Rje}h)V|Do=2|18x9@C_iJ|H4dv%uH|bhjcFH z7{GA0KViBHWsa z?jvlZ48trzLdshY01-iy0=5V>;#{-6yo`dxVk^#qT*Q$_gGg^-dWlvDrbZw?sEhN#Cnt{ zkrAcFe?ojk=ZK(8GAdcnK#*0#4uB?Qq=tCzNf?*`Kf;*T!Fk$YoHsL2W6IVYK&9kw z1cj}l9aylIK0x~lWkafh&B(==+{%UX(3GLP1Qcg@3Fz#-E~L|4=;~pf;~9GDRcGr6 zjBk+ly!EQ{Vg(dbfgFh`Ph}5SYYvX}v!)-Bnn*K&(qcWJk%|${-iEJ4MWN72ui9I` zj?#pGdDVXLmr&X$^x&f?4Pi-XV=9jW7cB8>Qepumj+zoXWmN>g+6T1y4VDRQaTYrL z{{~SAVgL|Fy0C6t1py&j_H<_q8&*N%kLv?qAq#GfJz2{j^r#x`j<+U(;GvBt_>_`i zhJ?_&rHrzap^`cggH8Q|oe?Qf%4G?iaeQwuJ=&aw)valvq}E>ru^``&0Z(=&k*dT} z^p`MCwSFC82}Ea z+`9WaC7e{=FLPXhPJgbme-*aEKqFI~DSY3i#yL9}O{X)35t%}KMdELY#yeA;o79Ay zji2ZcIe;vKO*;5ld3%?C78GuNwH)nRo$Qoek(G4$8HNF%P1fFr4U`aKDT8UqFdshw zPr{Ayi$9|wJMGV>$MJAy-)3~8r)kBN8Zc`TWwbfzBin1J2!SL6;(*hBU)4#id$+8#x6psxC5qp*En_6QLC4&)Hdf&Z)gPOW`SO&>)u@=M>Wbs8@ z!4fLQE9>0|7K*rzkp*J*;;e*+`aZUwqPy6{N^o3(bVBnORgYPSi>eL2Mgrq<3qF9;&C#;)D4$W4zc-O ztchtJR18GX{W}Qy2WSSQ&q4;tLgM8}1AUcVQ_!N1BH}vx$}hH_ZHF!9zPy`1A+f?Q zUb=zVaFIUz402PJLTf(V0{(C){FOIs@xXw9&6u=447xy{dYNC|VSGhKdkq@lB_0GL z-pQr+oEZ5g@7>72zH=3Eh1KHf2(Qn9wb?1D4Qv7G`yIr+aebfa*N1d@lu-udf@6#d z_Z9HgzvN;bQ_t51>uG%@sbzeJEXo=1dd6yxEoDj4nxP!HpG#sUR8BDFK&#(4rlpd4 z_DMZICn+b%s(lzjbkI(a35m5JX@dOfB&19X&B=WsnV01xG8nvGtM+%YxIG5aUb%fH z9W}X;p-{UElk7h)SKt9%C3i9yi=tY{o2C3q>3vtgFy>+_k=6E1o)Zh=VFbMH9|NX& zkHDts(SBHBTBIjjFuoi=*JUorjbjHt*TtQSH-0<*slIDUT=!TL8}IlE^s6u z5G=BffjY0R5PT1`_0luXN;9YZBr|qMebe0K$0JozusL`v6+Q{WY>KYUrCb<`T~^eC zLgy1b`d`emjgfV$77klmKX>nOKZxoEQ{SE5={s=K7Hzh_$D72~Q9SImXOlPYe+`dl>(>^-sAV{#4? z>Zmx!Q>;KXa?6ljK^c&I+gIV_cvTRl-2f<&|K`s$YwL@u_6Q9qL!`B~ZdYZn6=>&A1rSlw}2kFp6ML&(c z#~ECp)UFz!CGYIYXL|`uLpBkWC;_OqnKf0#%JxsR-2Ylhbq5n2Gz}~wSJD5-+ z!^Kuy=DzH85cbsTOkb2hbA1WHHWbAdShoz*_>E)2@p=sDtYT4c4>1a5?+A zL$g*$QJLxWR@=CR<7fCyy37nvPQM60FA2xgEw$pJER^Ni`g%>uU&ultd!^cv$v4Yj z7s+w+&_*0s0^$2Pm)vX=nDdXOM)zQbfT5wH#2hWk9L;?$p*^KvxD1S~!($DG!_S}_ z!fBGFm?5CUlRRd4hKm%^R*HA^D=g;6;qW3m^M>dX<|Veq{{TShUs6!h1iOILZ;Evd zIf1P6bJLH?JyTl9WYJFj0rK#69bA7X7X^i>ELKFu`uA8hDUUb{3t?<(O*y-!@KbYD z;-WlWaEBu{h7!&|SF&+UZU>4Ba{CbQN+>s>;YKpBbCG=KnYC!L;!e~px#ZYf&^*ql ztif(_UA8fVJjxnHt9G2e|4!!tItz3jqjQPQc{)$fp&ZAYm=Q>Gou5BN=aY0^rStQ2 zzD(zL>HGqn&(kT<`4XLfN9WJze1Q&aa76SZN_nHFrpiaOD296DHkNg=7x@?G&lr6D z2vS+$3LIkzNxMi;ps-amzLHnTOyno>`P`&?Ti&uydwFNdn{=n9Cf=7X<%`8!K3mLB poSAr6zL1|RmMbplSpI<~4kz{@?}1`AUn$;M954T{bzAYo{{>6i6hYzs1Fg{leQgR9c{mS6n?4ckL(qpj1O?jP_sw2N zN{*Sd*~R(xT)%Vs?q|;z3b{+aT=oC_lBWH;w&PPk{xVYZ-vB}rdP}ROzj{^Yve7iF zCUQp0YTH#?r&hD&v@_LA+pW4NTf%N-+qr5Ec}HYgW9@u3uWNzXEFcw;O2YlVSsfSI z>O?ReOw4K3Ns$v{A}Rq&SY+<7sVA zuO1QlEvolDy~hG0ag z)$xT_TMcS!-A*Hnw~e$ud*zj@U%m2X<+WR7QyEt(%DHtftl`(VUXeNUQN?I=ePyK; zghA|`-#EXk{KCcyyUH(ayr}XUwV)d}IwAG0IY2=l`gM`6B1O{xv6g6wo*0QK^u$W+ zUIsffD%&R7u-+N$>F#qI&w1iq%6a`_*-=hc1}lvXHP#5RA3qF(R-{Uu^|-qpk18gX z{V4F-4cc?Rj}yhRAp$kF9D1?zaaLjdN1B|)gulFgX0_7}&H;w?d+UC9rgl&G(V3{* ziO#g!B8Y<;Z7I#4X?FxBJ+tiB)|NYA5S>AX(2o|o_mul`%Wp3W|Kd^FD3T`U0e<~+ zuraEv%L)2%u!;Pw0hnMB)~Op0BC$k&qs zMsQDI9@Jth(R(_#jmZ`C&CEa+tr^kX*#3ZMe^CB5muVc$O*0t6#@9+R!0$ZB^zr{? zl9fLdnMA{i4kRV{cS(s?e1PXXfccF6eB<(`NlyM3)eD#U1Fs{pBKII&&lJvl3|#I- zWtRy{<-tNDW*J)xBsj~98k+%An#y#2A;2=q6^)ip+@PGg?5uYqWi2=SsA_u4p(Jr5 znRYWNy%HhH1p-X3bW8F$ffE2#6TGM_8MM}yaQ~S`*lqc>pqx=g7^>W@;Ldsw#tpxv za+ur$->tU-UsX;G@wmmXeU2&!>vin`dp-(sPpruNnl1`^44z0hw5-pYE&B4ex3 zrzE!y=Cc#wbgSz(;`#l(zO~9Cw=Ib^xrL{h&BTi z1QuZhr1H>?fsEpy)$%1DHYii??m2cAvQ-+7q-~z+>!&^HYVqY(r{=e!i^!yEVSj5d zz)2+3A%?$*pU2Jp`>5x+%cajDa^|*0LUV?|Spv@yI7i?-f#(U3DjA*JvZUMI*K#FM z*5|F)M;~7A{!o5tA1IdxD7XIyAD9Lvu1;C4MieW@?{_d(dAgRAkCfgUqguurXvpGMo3w}d>lKY{i&S1L#SB8?m1GM`i z`TcOw#$b{AL%E-Q-y5K;4y9&of7ti4;134a_lI&n3+@e2!b<#I1YPP#5ox?f_!|n+&~r%EfX5%L(Iw%i(eS}%-~~lbqbAVEZ><_4`=Pd? zLmfKzv+!Rt!i7(0eW+Pl)rKm{sT|okS1M%5>{4UomeGfPvq&aVMBEZx)AQqyj_%`8cao=GZL-i&i(MWWvrtWV9$C$JyA z0{Rz$!G1t3B|*y4sVtT49WrybZO2Nt{@(OoUH`Yu`7XK+0E8Qm)5N zs^_I5`ZU#9q)L^=u55-ZJQ!NaNlhhX2|xDbGw7G{^hm9Jf$GT4!NFNk&{|Qsk%Oql zb{MzHSsR(Pa)=e>rvG3LtY)!f7bJAEkBB|P*w-*W?jW(A0b6VP#WkU2daSa zq|d;(${C~)Hm62cxH#BrjZ6>v&@UxI6w&Os4H!%?q;SSfUCsjk}Y&(LZ8#v#Z3(9-F{ibjxkcCzj!2n-OJsM#$)kSMt7lU!kLFod3|vE|!pE~sYfI0F z6n&}_Uq-HMf#z3z5dE_#sf-M`1cxe+?e#c-zxpOMxT8qLDmwZ-E+`88=iW!7he@NfeRz&q2CiZW6hF3 zfo#)-!`O%rcvkquLHioJr*ki<+~jeVjKjkoP5dg?hY0 z;3|O1EfWPr-db>vwKsU9+d|aRUK@oP9Hz1;m2ISlRhCYb{q0m!jHG>*#oS&+&pMu> zWKbktysGj=x&LjS&$=?jU&}%4bprcIDXya;l=fwB2Mu1cSpQEzl4$eTCH9v;LA>B~5HlX0D&IZ#g3!y!- zhDG<^qia+IAn{U!Zi9A%nAtq`*@rMv9KVNQ@+|`7G>!CuT%sbM{v%p|h~*Qch>p$G z^OH8%B?ECalBs)rcwKQpGm z$)#sRr8JkGGN0HZc%R;jH~a)x4+Evk zZn+yD$9ZAL1-!t(x<3f-;Z^OOkG8UiZD*+rot=XwMShm@V;jvlpN#c%^!mnDJ}xHN zo*tLtaj^hBp2v8KmKTyd3^D7WDQAQo8p7E^WW71ooQNlr0&IaCBJDZhKu5dK%*PPl zrtn`5p+i^LFTzTAXx#bF+Zy0M+}7f$<}{7|Ub=R8ETTN8Va2HHL5^aUhp+=+VTjY% zGu^T&o_t_qrw7nyhJ2w7vYik1G_3mQRwr5-@N)}Dkm0Sx}F++Bnwh=fGmM+GL5#WZu`pw zehkp~L{y0XtHvp9GqYDc@ zj-LU=K>%N6(wB({K4;Y6x7b>s3jC5f zc&AmzcUh^|-fu6yBnTNS+(83aI;?xsARYUUh2F{W710ZU zW4QV${aIK@GcW~4xj_y6PNQYluVESUcz2+8g;})cfoWEs1cbkWx@n_m%;>Y03rD(0 z=0z!0$wS+Amk&q^m$d>+)3KDYbSjY$W)ZgB8Mqq82YNyHEG(@#A!3fAuIA(p&mN^i~-fN-yHs zdJx}8(a<*Kw+WmiK&L55SV`z++}w3~FsHxmNMb{kUAor|SY!2BA7Wy-0!aZnmIZq* zi=HSO@j&@}k#3Ft6+)uG#jVi0Zl3Q8Qgo_+KRZY4x))jpi-HYg>iuY08g$%vDb`~O zld$^<+4wZeMw4V?KP<`aDMZ6ju!l``IPs%wLLuM+BdtFgu1*(tV4!nwbJ#b|kfS?t zW7$@K1ZCmLV)1>fie;l@HF$}NB=h0Uu0!V!Bqj&YEGWT9{xNkKI+m%(yotJNqjRG0 z>@-e^+}UZsoV{7>pHts+{gE|t{v@eB*(N}y40wsmDfz=;1>&$-`8Y?HB()^Yk{=VG zRs1Tyw~6+DM2d0%(1WkKd3QEfkW>%C!}|LMCei9O1Ywt!Diwrcb^Lc?seDxaDRo!{ zP%iuWi($xASe+jf7yEX~;xd^f6kJc~(rveEcNc5uPvpP^tL*i-(TXGyv~p!o$E&Km zC&{ecA!5!6`5hYZXWWbbk5OBxE8An-&rp*`V41)L0uuy|04N7`HU7@Qwi-t&+09p_ zbgn@5W6He!9T&=1OVMKd*DBr4MLL=2O8{hEKp)bJtdTc4Qh|7dYZ-X0!JDj|zS8C` VSoeDF+1zyQ*yJI%F!@AL|6d7(e!c(z literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/dropblock.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/dropblock.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645be306d7679ac6f246da77868f9fca657e815b GIT binary patch literal 5051 zcmeHLOLrVc74Fx}v_{fkERn=EBn2c!V`Pmj5#s1TKv)68IS3A3z!?^;R#%OtNBv4w zj~;Wzo5;az%z_`liVgV({FUCa(qG61zFR%hBgwK!IGhs})SS9iU0sjc_x0VHjm5?0 zk3Q@Be_t|;e;DVAgZ__E)h-A(xEUE;Q@2*v(ycvk2DNS-eJiqJr|X!;&kSyJ=c&OR z;S3t5cGty7jn^?!7cQRX@Z8`op54ieCO01&?YT3YV(hfd!b*~W`qN@hI_d#BLi2$0 z&942-=$YK&_EWd(aEI5P8eN>k8))m?<#T8o&kV=ty4b&2%x}vy`$?1r2kYDZuPwv) zji2x|j!xJ}$W$n{zRgtT=b<05J({Li+GBZNurSGnIm<<&Qn}V-C|ingp#mB1369r# zR_13y>bJYqNtM|gb(*kPq+y(yi2Nv(2Wr=2t9ly_ zezP#yXGi@s66bj82|pi7!D2sSeiWrgisdQJiPJ;Dl!*N#4+FhLnBbhaJlIz_FwT4l zmi2Va7Nb+rVLtgTxvw_$O8QEA7qbGkajKchcq~j+{L?RXm#FMf; zp_gQWm%Gs(T9Td$M}n<{32mYK6Upkgw3Lq0=RWR3j58MQvY(UiH`(sW&MJGj${ww< z-?n!v7VolO4fB^xSX*P>Ha6Bu|4&p=@zKxoOl@{LzB~>O*HXFP@%L2c{)7AX*6y!A zc>kW}Czx6)wbQEJ#6xc6=GZv32G*%z8e{Z5du)L^+<$COIw+bmQMT>EQ6lQeI%dlT2>fQw^Mc4%%JcqaeD;e@KaIsYXtIAY^pj3-!hO|I zS*kj5%tbB&GP|%bVedZvvvb7LE8uyPEM(k!CW_7kgzJ(i6p zwQ+6i44S9tqn$rBZt1bdm`=6}loFC;|DlwrEZkiB&~>t} zby>7L^3h=~ynR28{i2qqGUykMM67~|kaHAPo)$~hywWOSUmX;+WEhL6u+=bb*QJY7 z3-h?Bl{-0l%EX9t--g@ECT5jnp5`|YFOGc4zkvs}3}V(`pip<`jN~sDLE27H({a3*KM)d1(=n;2t6YX$6phU1oU}j- z+LnBa))6`TCyIQ#+`;^5(Tx3LFH6&CoomM{8tS8=5F^pHwZ!EuTKRP%7ey;y!`OSM z>Kce)zG*g13(nLsnbopJZ=DzE70Ru|OU6VZ*1knsL1ZZ5%|@!8f+`Nh_`@L-k9 z-N5|Ie0>#T+o(!=uQ|GTB`4a5+H(||5J9;5q3egaEAN}5emg+z@;!JuM@$Q%8-Pj@@qs$ zosu#s`3A@YqNR?>H)(VR9R$C#&{)Pqile_ry%LshUxa07^D0=bl=(;O^Kw|8iJ1U- zT6^{@0s8Re0FC`zz%L3{fZO{I=Pep!4mE=uCbxP0?nl*B3(PGXgE-7&AZp zH|YE^@$~{apXX$P&T<7aWt+$yBHtxKZg2^N^awPb;lOw68e*$nf*Z2Q!P2a`qNKhKZ^(x1B9P|Yx5j-G1sZdLm_E+~n zW)X09dqTE{+mpT`SYsPWzx5o~0I#7Fd3$GKFvys5U*@UoDyP zhNU%Gm?8dr$O<}84fxQHhC;P#WeW8!Ru=Pm-||h-q}?ZdH<6yoMW&`Q z)k3af?=$))@ F{{r0^pdSDL literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/efficientnet.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/efficientnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe8a3afac310f03e8860f976d97a1290d60c2ca4 GIT binary patch literal 63066 zcmeHw34B~feg9tCU9DEKEX!xE7sn)4Vo7`HG6}w&gv5%891@gc+1>Xfue{n--mYv} z$Xqr}I2^*60x378+&ASY6iVp@p%h9fk5Ztt4KC0^fl{FS`G3DN@4daAVC+8k_ zi(k)_cifARH|Lr5jkmbvQ~y+8DmWfgeQHat^_UC4{#fZgt~1m%35&PIaI4j&DSo z1$DE!<(zwb6T)ZIt?D*}HzWK^^%C_`gts94EcG(=a)h^{gez19B`9VI+W@&z-44ih z6LJY4SE)My*VHE@rX^Hh}fJ9zTi@)N{f2lsk4SyBEQ0C{7%e8k=a_hy^!rz7?jxZhRp z(>k^FYU;cnJWK=fR=D3&lNHbmAa8?vyLxj4R0ZT6>V#;&Nrc}C_bwZ&KL6Nr5f?n8EKyne*|15bk>A^2GBnSz7l%Ugnky# zKLO4Xddh_UDWHD_yd`wbg#J08e*xSj^t1{69H5^E{t`Nm*kdMdpMmff;QrF)?E+$7 zg!?O-w`UOh65N+<-aZqtzlQsY&D&=o_BU`}wR!t&#QqlUYc_A$;|#d3+q``)Vt)ts z4fH(O&)Dn!9`2jydlJH4_bs?@!~KKF@dpw94%|P&{S(|j+gxXld>rn(a2MeI#pdw~ z$P4v_qQAeWB7END(f5%0`*t}GA@&2fe^pz6*_1)@w5Z;Aw zi~3&meF*m=>{s8fegNSyc^{>^h zAiM|RE$ZK>UqyH?!dun9RlkPtK7_ZahWd4c_al6X`giI#5I%tLrRv|S-$eKz!k4Mv zQooJxA%ri7EPn^#!w6rY{-gR&2;YdX0$F|>;hPY?QvI%a0pXhw-VRCr9>PyX_$tWh z4-mcu;T@3Be?$0IgrA`PQ2i0YM-aXm^7&(gM-je8{fYWhgl|K5CuH+y2p>hbNBvLr z=LkOq;cFq6zeMk zt_ATs5sx73hYKKn7vfQbgK({gKMnC1!Xdae#K#ejBOHcnNBnNY69{*}twa1C#0L=W zgjEZIBR+!g7Pzg5rx4$b@HV(h5Kkjc ztKd?&%Mi~Xem%mM!(D;6iuer(D{xmLt|9&;gtx<8h4=*Gdl23M_XNZz5#Nh2``0yy zA4hy2!aL!55YHmMAK`1^u0wnZ@dF5RZ0<$;1mXt~CXf3O&mn#YVe&MJ_%!0QNyx)E z;(5exM3_7qK)itXO$d`mLx|5Helx=4$q3^2BmQKB$%7{%t|NX6!ld~I#EXdEiZE&1 zgLnz?BM6hGeTdH@K8i4DIDq&`#BW2GI3Gg%6yiq_CXP2EK8N^I5GGDHBYqn3+Yu%X zw;(=`_!z>h^9bV4K>QfOtm8Js7ZAS#VV3z6#Lpo9RD@Z^7~;=F{7!_K=MKc5h4@_v zGtHfdKO6CCu;O<5IEaD8`2bVId0phfTa&Xg#zYuYj zm4_=J{vyORglFLHNBlhEEL(>wBK{EKlL(ijR?*t0UVNhq@maW&=NLLEHU3%m_`|Aa z%(e5l$K^^r;&Zu_qo)MNpvljDru@RT~M`LQ8|?@9al7~0wsm|vL!83 zn$@-9?ojAjyGT*_&i#?z{lmMITVu*~$}ufpEa(%t!YO4&*GhUSn@73(W=n;Avy=U| zr}8R)_o*k3>V=v9(L%O(daN|70$=5nmYqCaQf3Nzsn`=zl-{apb}P4QrQ_NBiEMsS zIYK;@u}mt**hE2B4vdZ#k>hx&G*jH&-=EUwvM2ir`ec79UF?sJLoDy3uKlkH5%=Uj0_GAh*HP2+(fTn zF}WZbO)ylkI0{){4Xg@Pu>CW+RI!+akU@4izhT~kK2Ssiuls}NjeOrso&mON5O zXLCz5-Y`&zMh9bqMB(s$Akce2)F;Bbm77w!%6iP&%7Q@#5Z!}XF*})8PG(ce!R&+( zmQpsAMxCSk#ug`iI2!AV#1n})>m5U*=Ct0UDfC6=ItsStq&U?)+HKZHDX>wiLuyHu zLs(zk1GntIZS>IC_EJkDBOu5ax*jKTuh{7f#D(K0NYfBf~r9e^vw;4QfIOBY;IF4cY3Jq2q>^`7-i z(RYuzO5Q2o0-kJkorR5biE9k&L}RWV-{>SV;xD+@47gloV9VZ~7Q>fP)uiD^x0;?w z8a_39GHJLE8!b?%vx7Yz!#$_3L+-b^^iBBr#gYEwg=sAYKRb0n#=!fTW``lYR z`WA#Tj9Py!W}vtW0nLTgFV{Hiea)+S9(6z9ngHFZ?_B#hR=iX{R*YI6aQR&0esCdZ zv{A0cWJO@MAgYehWOk9&mRu$Gf{V;O>vDtb3$CPBu+u%-<1<={voo5mZ$&~Qm`rA& z^pZ(YA{gc?LaE6J~*x~rcUt%ao?nZ!;eZr%MX-lYHTegi>j04Cxoto1rIXYWV(9O;{1MHHe z(`c(2PHax8shLvtq?Qy(CeX8!)3dqKFmjs`7Xe)en}L7)wVUKP zA`6%$I*2iP&>hAK5vcB(B!wFHH- z;NAq)`dlcUYYLMv=gbjAS~Tiu^p?;;EokQ0l zT`%YmH=zZtW&*+fT48~v0tHE%_ryP@-h%W+_Bod~2;~%nz)08O>2&J}z%zVk{Rt(8 zNRjo}O4%>eF@~mdt#~y7g$#A<6Y98y+%vlOnLTjd$;l(Bxuc?HAx*YiTB9Eu_~J@g z--cv5Rh!gDBp8ZOC_@o8RgmC;t%mPm*@6P9;me$mT9!T*EmMhtgFY52^XWvu)Ww%N zR&Oh1+1LE~709A1^sc1G(L@xbU&WB!##K6(e7PIW)VUo#!KoE!$ITYb2U4Y#ZOTDX zBZ#Zy%ZkRapeDWwoa0VwG4d)|y%3V7BHJi(#-L4NC5)7RBhMKAjNf2HlM( zcE@z1JGP6?NgJsZwi4b2(pJLyXb8W4tcQm28xWl}h|bz!bW2rd5hOe7{DH+4U@mnU zHny10T}1n_a|;!CIAbXi{t`@A{4ID+#u!FeSJJlt<(+I_K-Y)Spu`J9ik{Hlx|c83{RcMi`~~wgt{j zWE}GnSU^&9dGrw^mN=S%j;kn!);2LtzU+RdP*_E8l#t^c@Ql_0uG=!Z@ zgHEstToSw!T(VhZwA!ZN^N`Z&%9lG=Zdr|0)l+H@FfXkka)!Lv>{30OT{J(YFjqqi z)*-?^gy4W&%jmpE**PDsF!XMi5APCVzp!9$_=UR%c9yWBIRF^mVo8^yIz?&`a$v*j z+Kiq|K1hs&V)8E9%rYgc29h9UyQj5gv@rt7q*}&Ar4Q-blPCsf#=fNmF1 zns0bA;a%`PR6$}1eq(D!gTJ-=li%66H|AI)u)&`hPfD4uDCK~J>a6D zxeypr`$V~Sur_TDx}}KO4HiJK&S9^kU|k0#h?OG*PsYvK<-#c@)dM*_OgPoZovn=h79SX4B8xoojyv{hhhcxMVT zMyQOhu|6Yskjw}&lQSt5W}!cuPiBr6jQ~1=2((FiSx$ZYCop}TEE?`<{UD@6-%rn$ zH!-_3+N+8^j}+3DZ3#n-guPuJPJp`I+dNy``cXX1w7}yZe<_@z^Y$UYIfON>!x0n*u2QJ{DXJ%MW|(AOM_jnJ5O|13lS zYpI4`Hm1?GKUK;cAH@P-OvS2<7n23Uixx2a=;v8g69r+K0CRmJUrTdd~l539q>T*QyIRK zo@lbCF;vrTZGwjAToiP?9nY)jpzUgeD<#=**=m=1tqo474LKlzmJkdX;7;R#CXU+r zCXP(n*A&}|q6EeKB zdQNr?0oIB}NX+h}n96krF+Fg*y?#+#4qyYbNIp<~H|hYs(%M%P^zHnWGoqtM3Q={Hyw|sGqFrU(02do))ysz5vuX`@sMbHL9gPn-PM&0)`V4*;ZK@VvlP{K@l=R=h!P7eHIy3Ev$%(!oe(Br!1}birCw z8W;Rdq!WV!L(<4t1NJ6FrP1MuSY%kr^cqw;Ky@>cifaS$!I8CU(?LOLB%O*5X>!P4 z14@TT>0l-@tPKvVS*63QbYLi^4yfvyu@~B6rgtJX7}F*+F%PhYaa&L8Yp612dJ_q4 zB9e*=)7ct9s!~XC>>y>Bie9mBBhBb#adJfl>$MVm`FzlqiZ%S4+-{yyTcROnrZA|RvL+D z(ZP|4)LLomNHP!8GNKKp;?XsuG|EaxMpQMDNyzn#HRv)iRvL-KBZFFE&7?OjC{+`Q z^u*wrRhnR>>R>z?PbJn&4GyrxXb z^@&1`(wj^S4fLilYf~c;p&%}N9aG}TPyfXm4t@B?KiIPd+E}2`t4<8X)9Hk;wI)Au z`SZT_l#i^+{JD0vq(M`qqN$1Wgf!h(el?}8#_3*dXgD*W4am;B1~uj&&x!c(Ktj@Z za@9pEli|WyLhDV$2V?2Mv{=cqmac4TZhoy08P<9S2cyG@RBS+ufooH5G>LjAhOm&A zl9u&abS50nDE!z^kSWM^+(7Y@QL=ORc$z(7#8y06OgMywiJlz zMcJm;*ATN=&ha%?u{Sjm8G>Gw2H>jIGu@jQObktAOiO6h>VXPPjim5ON4DC^zu_?XHkacf zKq{4qXCe{FyX=a$5r!nYnTAKyczhs{7K(8-tD5eOOeECdVVv0FYT#;CmBZ4_NKA`l zq{g|+XtUm~Mg}4kk)eTd1TU5e4A%g1u^Iq25mb|hT?aLQoUyTr8q4}{1P^NtU=GzrS5+a@nh^(B)BzTAfW;kP z31d@L9rk;ZTDbMrq84twQ!u(}t;#fTt%CvAIv8-Rg8|n%Si%7|-~bzRfDJjohK-GN zb#U#`0gP7%PGGLYIi$w&yAznJSq`bW-sAx0ijV`CYc>vGuBteIx$fZr=E{Tvn6obj zFlYA;V9viCz?>mFfH{YC0CSdUY^`kudmdPyLp-W)O!Ub*fkC&dhvkedwYAxubCDlN07p)T^*-IV4Q3rC+r(@3GxHC9m1g(+M4o%ykoY86z+eXL=&S4J}sJ8P}M5ZgB zLFaOu?dhiaXpo5JS?`ew`2F$Hl2q)Qa54tuk5^0 z%j2+grl)F?WJqkK+^0>;vre2i0w*r@%DicFO9L=euwQ zD3hC2aYksjs6$a|PNLff(H$T69#SW@V@L0Bet5v#3u-?-B2PV!Vbdqhw(nBzy0!dB z9y`W~n172N<`vJycDgKgP|8gl3K(o|r~Ba^{eHwo^?TtT zHi1aD1JMk38t{*YjoRU`ufSEj91T;mikdf-HZOFktpbZJs9`Hp!flz_b{1?w2q%0|#w8`3 z`#tN%&28K*9vYQ91pVL_UU%PxA0vPbv=l6RvnkFKn3fQ~rqk;+M(O z3G51TE4Ru*!rbl4E%3Z20o#4Kc~QqkWo-L4H|BO=TX0^**4$K{hv3B)T0OmC^r!HFpSx&)~eg|+(|{&b>V9uX4f*?Gbp2d9MU@!(>CK6!z_6dTAh^2rF0yX9j^b_leZN0r1LesO5YXTw_TkT^_65tf1% zs1ZCTfitDZ@$h}8c~;3*udO7rQ;-?L(S-898iH%{RS}FKR}y@9F@oK=AHpUWXX1SN z^(7cp;sOEoO+Q9Q_Eh#rc~HtH&uiIZHMX3)XN;hAO-oNu4?>D%t5cd}SK?T11gOB+ zyFyHn251SEh(oIz$OUdyeBu60`vslJRq>tNhJ=6e~i@P*hnz!!@hVwbmNvC!9dU(@&`j1B(VQP10q%= zu~pXWW>~Q;Ja-^YBEWK0+l4jTfy?CDgf-hX(J5?M+OV``w+JiN4Qtl2Vp~lsHYBXq zHd?Pn6>kokWA*%dsw`N=gyX6nG+PFn^evl~Mrm(0--~N?Cef8}FOV#OhlZFEXA^jb z5-xkFzA*>``7RtyQf6@-P73Fi6zfh8k*ugq+7~tni;(tYAw9(tQudu7=G{#4fRPRk zo3}Nvt4q7lhY$wkLaCI4U7k6i|2+Up8$yZJK2*9-cpAeI3I>Zxo!#;q$pWfl*qFlmSh)xHweLi@SF?c`3Bs! z>pBSg4^K8C#U@euW>ClRvEyO4uFOTs>F(WrkLOJHnT@3_Y6uo(_rgZCZL14@c&qEI z7*p^6#htJuQOY(PXlg&5>K1GLL!ALC ztV`SY^cbC%DF+)*9C^{dg_g0$LN*hX7u80Q-8u_>eeb0}+46hwlUg#X&LP5kF<{uK*^E}~@fx1Kh~b&Y8t#)&6=Kk_?Vw+Tu1Bg8 zPOr>7X)jd{oXhW?k5(!gt25b#PT%!JK39BFUpe>uHCEpO4!f0SoY}36;%FX2II3rw z#f%xgJm5xvCN++<>FXeqHu<7@i~L;(Op^Q^XK>&Qq+`&*2}O`?hZ*;r_q;^1!E5+s zA2nK!<2E8;RzlHkMh%9KH%0LH8R^A^ELJnnqDGnxOyd>ot7j2U?Fkm$YO~1GIE(Du zJ?7*BmrI+I3vpVyi_uPc{{VvZ@6h{4dPnJ{=-FnS{y3xGrN<+4@#U(x$N^nMM`2vHsc##jI$ z8SWGMMnR6i$F$%r0k&=UtJosfqX1#Ud&7%b@twko55Y1Gijk|^eYuw>s6&WVTNZWV zUS(OZg9=?r>Ewk&CFm%93nUWV6VeDb<`Oys%Qiwwd8swfNqzG_vd15@Lnvd%aVK78 zwq!0RD7*?!9C|eyLosJgI7&N<6&5s=qNQ-xnVdsm*+kXml zwR_}Hrr=NCf+no$-5kq=a@&T0=-pjsgj%!uw%M26A}gC6C0HHQowWqO)?oS-c+T?~ zQgylJM;uj`CEO;Z#R?Tx?w>yW5Rl>_(~_uG2)Sjcas5Tie=HOzgJpGY75o&UBCPb5 z5X__bCp3hPvxx0P4Yr7NTOwwP7nX3U#LN3XP`sQai?SgktD`JgQmU->tB@(Zhw@cB zE()niRY=vqlFi6LWC@AVuchp;(u+%!{yktnUMWPl<>AVU)@}0l4C`;az;X zr@BsH+B)evSnv`aN-_*LyB zY#H41AgmXjy3)mX$F?)v>6&E;F@YjO#O+As36#$q0y=Po8L!T8W~#gk4t1gIT1#B- z0FcWq_#a+$x}}Gas$#Zv9iwd4i(W@W%ivNX&goOI#fSSKs6;kh7itZ)>U?^EC$x`c zgn~gD&Jfw+qP0iy99MO*HM@Ah2X74)wtvK7Q+;RJ@3WnRPyqRnl zW`n%$Ovh}mFbyc~e#POsd13DN9me%!D26pSE`x+i%tfRAbtXZvTT<;@uD#1q(jB`ry-yeDk5hc+I+X8ke|z zCYytj;0+JMkZ~ha&VvgGa6bUvuyRfjmSIT-mZ8lnagkFzUS9!lNJIK+G>IWmEVD>w z=j2syE-VR$+}q%Mo^F_syb&(!-h$sSVw-TAMmOSEMuw(gnh?xR{mDSyUVRLp5IWIy zA4CA+hc2t~Dwo|VRIiAO3tNO*bzvHf%UjH_U&Z7TlU!cy5>x|cyd@7*HA}$cSX}Yq zn?Q-Uv<2&R3;rag_xd>~cgCQs@yD#eSAi&dcoUJBYiYEK4=$wEGN`mk&Z zU~j$(XytncaWlg)yiMWl$FP|ACQG!JkT$Gu+6^dL4QF|7VhS<)xrd<`3KTpsypSZ^ zauSwOW3J%yu@Nfs-e@!B1urIXiHb}snp8i+lJ?OPcA;$?VxDJSdm>W5f~|Kos&=7m zd|b@OKR=W#!@N+*=k|zyY%^T=FoqKA+KX>rioC=_bx)$T;Jk1?xE44+Twu~Y893uB zxhA-ljHP@P10pUtf~v)B8@QZHT&B~4C6^Wsw@_}LvsAX^eQ5q?2dq1A6mt_%Zz?L+ zh;UyoY--FAVBq7X>D~6F*cE3MHP3xad;f0NE-8x$kFQHO7>&6LD z=Ft>h_syNLYO`eD+i_xSOO=hCl#ZV}o$j%sDSv{%8#G_bJ8%${`tOoYobU(RD`qflKL4(yHIh=M4&KI{L*11yx0<|0}3#rHMPL1a(*n znUozVpiyl4D+``f$JTTA6G)TpI>tw*-F4i8(3yiWN>6A7_zW=Z9{ zV$#}GrB=m0pr6O`KZ-Xo!n6qBmc9UX{?N4W!L-2a45mfL z{KplsuM75e`MSH_r^c*JSxp%i!k{YlV`(sL*%y2T&9SP#n9EN0V{-{EwX*g-mqW>u zIF@pHX`@4Y+G9AvFIB{c`DFw}!H8O69zc8QcYwvVPL#S+$d!au4*!E?eQL{}r6+B@ zOFcOl(eFg5m_BuiEBz`A8l!{TQ{t~2 zvHXp9#Iz2ifc_()LZ@;m-)P}G+QAC_+FD@^6i92kwLvCH^INwqgp26h^-_whmH_|P zV?rlhF1Z4h0P~kZ%z2Bnf&H+6g^^H1-Br3b2y}%g)4-Ra3>%mqArpHPCw=nm?xdH? z0f4u|Chn-SiSc&Vwu;w3+Onio+?8T~QmhYl2!WtbENZn%0kS{3!LsYYto^zUsqmmiSBuZoCu= z^r!I38GFcZrKKtt1ZHT)@3O1N;@dk&f|t|$#0OUoAi&c~5ud71#MUUTEXdV;ypB#~ zbMh9>N`-Hk;*~I|@GUDv+T}8x1;b!rtMfQ^oSQ~8U`PjBy$)8)4wC>N=c3dtuh>kmsKzR(p zsmswu z%B%&mMG^9eQOPRL?kgjMdYTyUWyczt>g_`2tYb#EQtHHvGQ|0$U;M)!pxPPb3xH(C zN9n{L2MXKnnLwm0bC9t5C6gA@?!nFFXK|-7mrb}$2(}Jo&x^$pET&*xI}A(5CoCO5 zZYuWkPGVR(URXMTg#Z_@QgcAp5<$EKtz79}_EJivsbg9{_EL&wZR~~BQ}r<)srnl; zc*@^06AHWv=^1{(OloJYW$spX)~~tj!dQTlbrnX-E({}Bbr7FA!&A8lP3)oNm%S9_ zno7KT(~)aXE{v{Situ8w#4ZI3Dm`*QsYx8INQ~X`u>`U$nm8sCBVDvFhQxy+aooA& zxJ-<6(LP`hk1UlqArm8Av=5)Eiw}#$3FndrWa2?!Kr|8q2u1o9NjxYMqtI9%(hbBA ziu4UQmpsJ8_{JA7=mV9566Zw{56i^BAkLBphY(t<!5X8! zNh`jxzZPsOerv52&rcXwRy^!@8Urt?Rm_}}3S%I>gkqlbspCb+1aoSeo0Nyfd_HauI)@H4eYsZ(6 zHS@hm-Qq{hqKCcYfdvn|tTh)rTCm_D&49`Uj~6c{aMqcyWg7!ay;rH^TtDVqDql+q znOjrUczJmVxmGnMGnvG^cJ0tfLZ${tUfL#(zjyU=jw|y&)W)7zSxw^`Yf+Ve8@77v0YwS9aFZ}m=yy%?@%hoAJjd>fB7OQ0oJLRagamxm=;O>7b6OhviqpNbQR=&!u zSgy5)Kumb6C(zRH8;ursQN6f|l^N`@f)51(A8!FBJuuKJHyYqD z4ONQRYS3|!Mb&llZ?|=c^(AQW0p?ZFBIe8Ket9asAr)eMEt(>wzY>g^ztmhu#FY=x zKHND@kMB$&NZ1kJ_*Aa7;HhHe?HlMDvT~Vo4Bnz}YJR6VV8;7!NkqAPIe1p3yt=Ba zN;K@5i70Ug)6s-Pi87TetUA?oRi)L{9F56gS0dS4F0yZ^Z=?oMEI(Bzufnu%1RuY) zNRQ!r9Gvh}&@M3*#a3cll``wX-b4MlXbo%q^V{rMj=LaMZ2eQNMOharc(A@C*P?>f zN|fId@>+Wv?DfxAvu{D1nQzX`DIp#;{603S9hYNORMX1S6QW*m0KFBzQa{uxM+vN zC&ziU0FQOarI`}Gw2%?q@WK{!y=8X8WeL85CHbqW=~Od0do@ zZ^ny>0gi1DsJ|YS!hGNc5^PB^GsBas@Frv#cwd~|Frf#GZQ#FWTdnAytJvA%9;anFN%(9)bGrHbDS0OjZ#< z$TA4%b0ok?>&v-%wXfMX80YuK0VP z$;PVjuGkEI3FtL`7PGEP5~#ne38pf`ToqFZSqA#o+A<;LWxEtdd6x3<5K)K2Us)aq z)ZZ>q=MI{S_D5!TW){Y{|$&L$9Nh8I*JPDmBvm~hEAHQNw?sf4RZR7^G0mPfGx zu)POCb#||RzSY|6rrMujyx_&~S{$Aqi zG1l)NO&&TrmOOUY`T(lf0d&!>!!lk=5`qZ0u(>1XhUH7BGZ9M~?3?R$`^fD)`pZ#X z9r^_7@0aZ6&XJ4a$_#HMu4Ji;7CIqSc-Hn7pNcOOqqlf#dP~dv`l@{)z564I{tA>f z$)d&g483wkm*@t<$aRYZ9L5?!mW92G$O_@dU1AV^!j?o=-lc;1Pz}2NI&`C06hmn? zx|3EP1{cLk!%DoFvQfO5_Cqyz6ShKl#}xe$;%%X8s~ji1nX*y5nf4PkcoVivysxVf zkC?Joi470W!CB)OK2?r0nb=mN|8~O^F=e9^G3{q-C?af`6x9lRT+v@eyz6wMQ%f*q zqj)pzV>NgawoJSuH7yZGOW+vR99&&X^x~t^ivH?`$zsYz$zs~C)R0BkGRdlK4$KK( zL%gjKYVxtq^Y6d*I5h`THi|dXexn9&!d3|H0Y!h5c*_p`^F3z3z1InErfd{%rv0ZH zya`(&yayHicZj#eW;5V(#G5G_#hYn=P=hyND}?uuqQ91S!|`vgiFeQuZ>DS%Z>Ife z4c>&U5Z=Rz{yLOqi-(DK*b#50Y!q*%{Z$R#gslwTx5oPP*P}F3O|IW-1~xk4&6JJe z&9p7pCsS!45Vj=Vi`z5Xs?{VfMOMVb5i{F2pjg{daO#�KB<%2an53Wlrm)Oxteb zEvyHPuaEZI7Z&jH&xD?n}hE5^VL<-%M)Y@P`k6I~bZw03x_q z=Av@MwA*bWq=8IW6%hddZhH-%#9Nl6)>iKB5@{G zE-7GyEs62sipl|FOu~K-1=<+fjCVqvDH}zdX%Ez(PT0z!j(OPIhypJ0m7l* zX?!_Be6Pj4smbhO6$0}f(hr9}XRD40nj)JS&N`4ycvECA-mz-Qjy6s9dx4Biw%uWz zMsB8TihgEzkpumNuM+*Srs;nl>2GNGXUeANXNFfe&`Iqlc&`Kbgs&3$gUyovA(G$F$|qAcMLsip)F$7{%Mpc*K=>+=Kh!MwA13*5{9Aef zl_{GdpBX;mKtAECME-EIAeqKWD>-37R6G8NOYrfiB2%eqTuesBttY~p;=$t9Q!NjozZnnysFDY^ z4dKKLrfiBC%#d(k2H~s3jM#6+jL(o64cSpl*%ULF;RXk05WYgpxS^(Z#G7J9tugh- zwFFB-C_n6fEeFvD#QydZp)croxB@#0U&i-yc9 zrfiBA%y5?jF9=^HUJU+5y!cb{q9NOgDVyR2Go)-@lx-`*SBV!xzY#C~jJ$y3-_iya zQ#Qp5W;o%%3&K~37dKYx&ReSo671Ja3=Pb|UChR))uMk66iqF9k&Smu*%VipVb*~w zgs%`+YDSbAt~ASwzi5~jOxYAKnBiFtydbL54eioohBw>1D37FsSMeeMz%6jaUGNs!BV+B7sov(;I=?hG>0Hqan4k40Bx2tUj&Xe zXKW?1G>e$0DHbuqM;uwiCsiy$Dk(hIBH$oAi+3c4U76TN%1vYyFM9q8rP|~=sY)iW zXlrP1DKmV=p<58%6lIH#84i?TziAC+U#g_cZb&B*nX)Monc*=95(!^D60t9}hQu$E z#D?GBoe-QBx0Xy4T)bNi483R zFlAFDGQ;;ANF=<9#1;U=5VZvXxjP+7*ZU}Tz&_%^#}ci3hql+<+f{Wlo`OI9Z%{;8 zrFi7=0rQwJpJC%JZy-ZYT>3N()Bcpm!--AmIMqzpvJk(j4sm&s#=2@uVE$Ewz2GN%AR#|RT;5IK*7>Ya#F@4wP|b3} zst{*$2wB;NfO+sl9y_VNcELYCS;zE4bNE1P^(Aj4U=$yAN#O!ie)_ODqh)p};s)L9 zG*;hJIow?^p`=eMx8KR59rEgzVg_H1%}>ftBJ+ir8i zC$=jPwoGp1>$q{SFq_V4uxP;)rLI+fQ>GYXisXgYS~!7pIGjyl%0`*Ow3}*}LRb}3 zY<02|Pn?Ywv17Mdy#(j?*0HEky)3t6RU0`WNfbd}e9Wh7=MrTtU)92v~+K zloH`9LjJxw@~f28GU)$1ptczOd}h(#kXjQN9Ox%}Md+_@{>o0V4D!E0@)yyIeAX2C z%n)}VpYSU3YZc_;#|vvuy06Af8#fSTYY6RjXMnB!=QBga2m+dG9G^rV_o!_T;5s>e zwd`E$IKF?(j~|_LkNfz^nRBl3mIrW0;kfu7vJo5`J9y~u;r)@vWkEmOWB7C}H+wik z4ZuHE9E9`26+el=v&3)z;v3CVp0h4|R~cb^gBe%7Bz^d(pDWo?^7C8I;@}^S{+-9p z{$yZo5MN{tOa;%n4!Z8$i7zj-;tRo{DSV$<-@DK<6=q2|1JE+np6n36>yjP!@4XX} zkKCQe>wDN;{1|>ar`DIc@Vi0vm%7!!dGV2V3yzs)unxKrCs=T;uy_(Wk53AU?@Wtw z0LORSkRx;+m!4L@+thZ4a@y5)l*1>r<+MB0+lA6srj!l`Tsm-FYpcp9wYYRt)(ReP zB6l7Vd{kXH>a=dO`YxDbRpL3ayU`LMW>8e_`S(C!z|JT5td_UQWo5YVj4gAnE7 z!n5h^h1auL--aLkgUoOVLmE6I%+JQ>v?K?8%?O>!s^-^x#F-`|m`-I*r14cOBY^J@ z&E~YCeu#N@)4PKO__UL$oZ&Y=Zr-kI_s?eW1)$`lj_(ED&J=AL2VwE)?xNny>hC5v z%ugC7%})yHFC=`NrH;YFhr2Ijm?-KW$0K81DpSgy6rZzCmQKT%`4}_Y#PyAd6@5S!oN?u4rh6K26{!}IH{ZrPVi zX7kxnGC7(7v&A17&TfAIt~d;D!L9Esxu$S`JE=H_G)tAV2of z+YS$=h+v?Q8WvS^@2C+L$jeqM&XHMSNlD%k?(V{R^!!Gf)XKCrwNyW(SWr7bYVj#} zn^GSrbx*p^c;^ySN>E8|D4477!6)O-K%Izm_)xj(*@U^!36H*g!3&;%Pj1)!+wWvq zKK(xM2x{|ro~_^uzPbgK!W&vgg=C8_zVo6m*M(o>4-z9du#2>Qnx2p}N{P`@nw`mM zmP~C#79&uY#Yb66MZf_H@L_m|d)%YA6aEV1)fIYI(i7e9vrNO{^{eO!@ug?Uvk+zW)`P5` zc2UrS-tO**gjz)}Mm<#jdMiCX2cV5%wxr%J0UZq69#Yt05$bURLwyI+x&>RUACCZ9 zpnGLuPhnv*mWL7keun9xVDV5VfF2$e8K!4FED=D@>ETH}q=$#67^Y|CoFjmq)5Ftz zNDmL^8K!6Dd9h{Ey;u>b1wOC$yQmi&Lw%fN_E~xmeFg7WvfKn{|mYKU&8-CZ1Lw& zn(09q)G46FG)0Y)uG8A4wyhMH2D2>IXB*7Eq_S9k*$vjd{0f#FoEofsG1?ol^rBqkMo*pXQ2v!bVTd7Yas;0O;4FnT_ts549!5 z_pmix-$4mz5oPPzWVTq+PwO!HTzZb)G#_lf`N+N-4~-r=mb`iNz@ekZZoX|adF0T( zQDc+CgRx@=_3g~Mg<1Va#LP|KO2{?zdg(>zCFl*%8>BZx?* zOD6R^ACB|kcNw~eUOS^trZ+~9x3G(kALzXQSufH%OOMyBifN^|@=M&;B<|D^?<~af zzx;l}11$7GdT(Nm=QH#IdM~8+B6_@>TU?#3Kg8&Z>AjTR!}MN4?`8B}PVW`;UP?=|!urT07ZUQ6$F^j=Tz4fNhf?@jdHOz$o9ewW@`>HQwPx6yk$y?4-i zC%t#kdpEuJ(0ebv_tASly${g)AiWRK`!K!Vr}q(hAEoy(dLO6v2lPHc@00ZYklv^0 zeVX1M(fbU&Kc@FtdVfOiPwD*`J?=2m|AOA<=zX5vWAwg2kGlu-FVg!fdS9aVWqN;2 z?<@5FhTd1{{Vlz((KG0Mo!;Nk`vyE?os?jFv{Eh@$OVT@d|vi8GiC64nc_*?5G(#O z`VqhD$hmhI(@*;R4j%q_yZl|ja3Ji*zs|1Ku3*=d!Hu24&Y<5LZj(oS`Mo?lgfPFM zSK$zsG5F>)4{9J(`W+%%_B#=;b7oEf7qxZSWtDS!u$F+lqQ)*!s_6>|k0r|#4f-ON z#L*{8=7+pwZB6_xuWpH}nO9=YRTENx--Hp{h1=`zjz| zug}|cSJzX+SA{o+ci`XU;Y;DJ3irdmEZh^m68|U<{&0)=TNw&KT3P`QF7l7h1C*f7 pR^c-g!oSweHn{RX!U!$@TMS~Q;`26rHJBEjg4}5K*zWL8nsXt2f{qpcXh{r$Tq*5xS ztX68qysa5azU>(sZ@c9zr)Sa@}0xUSlR6noV^)bKXd^m2ZlvlO$fUMrSJCkvUGI_eY{!Sf;QfY9^I-Aqeq9M z9vxN_>NfOfLcLub!TUDVQd8>o7p$4Lt7GaoLXN0qb%(kWAxBj^8nJh%yHLj@O1oR# zgVOGaN}EE+z3K!)PDCNMBjlaxBtlL`A;%DMpSm9*_j|`M%E$5jfO-($4@N2OQ2HAl zr&DH}P}XU624$T=S$CrJcc`=K9C~pVV$Q3F5c3dX?ncZ#>S6T=V(wM%Qs0L638Z*b zJ%$vIMeTT}^3}9@96dj&f~ecJ#$*EsrRWG!q2F6)~CJ$vs72J7}vAPx{#W<@;bNC z^n?`-J+76fJ$1I(2=JN5LR)EcR<6KboLg--f@Y^(Z{aJqQrAIKyjkt7cy%T(H#+U0 z-fVliR-bM76Hb`9xVqBv!u;baCTEy_wCM-uSwOSB(yBMShRwS1-Fa4$&Q|% zs?YjU$L>CM`{eEKxcAOuMHfG`F1@_ks)aefIltU#s?fpbgdJMzdI;5jG^HzeeeJ2K zh0d~f9BzC5%4)qm)wrVS{*=Gc@u!xTl^1w2D#kyx+)-Z3pPH>VmS#I`&!6(<=9-PB z*ACiVP@7$CwlKbvD_6pd&!&%}246Zx^7%*L@lph00AM-KQTD6W`%`n4 za#Z?-;hD6`s4SL3=KZO3Y9@;b&4(Eo84R@aWnhp29aDV#y>Nn5V0BYl_M*L&vM}V` zRLxN-6k$zIq|wOL6;JD3NEqg8wPw2+)M`>9#wqQ4tvT~yg@szJ(W?7CqB&}s9n-tv z?7`!w;iRmxwNaL4O~yrC)h?O*5)uc}7EHWt+G5M_L{;l?q|hUDq_U*OFgMrHYjv%z zL!22`Yh&2d8O2Ok=O=RPOuN49)oNj}R>PdFw&*X_YL`~)ttciv+v&8V51Jzt4o6*V zc&%34P1cMtW+zRTIaF+&bUAO0rDgEGWz$-C7@hl8OsyVclMmA2EDa1SSAX28WN0PX z4T$MYW;JQdH#4DOaRGg)`P>Er)$cH`G66TSa@i}^#%QnV{(05=*q8luxB~P6I?_MR zpuV0?vSQ*c%2MS>@afGPzZw=C{#Y&d3?H8*$o8|8{X%}z>Jo-J(@A1x;)>^5@L3EN zGqxGFn5`>aY_7lw(%saH*mi_!+-jMy*z}uiKd84Gp3Wgpn13YjbbS`kGZPSq>MTOh zW)2VFr`7Ilv@io`?0dH&#%HRORkG67#(1qQIVVrHI*oeEzkgDaNxlhNb3FASI?*bY z0FFkOi*nwAk7&M2X?-)kCeqS?&{_&DKeSXswl*U8=5q#*e>)sP7J}=peI0GeH{?G9xR0EhSzLGHEXs>v#a_ron$&? z|Am!#fLWhgd!?>avps(liK11b>DLo*P}X2EVdiqZwdyrukXt|^(v#Agm*F8I+5+{b zQr`7`##rU#J7?Z|Q}gL=TBTodHqjC+ne6oIT#_`(f4(bW?ex6=W(vPBdF}5&~g%UUybk{03J#e|#t7 z&5m%(bgf0AI2rM-)6Hz9H&fAQSzQ~Wn#TBNw{jSd%(R|DE;Al%Al7$pF{Pz_z-9|| zptIwM4znOtE+1DDIWsEvGV=*K@1%2*&V6+5r?YL14Vk<$7=0+KrB5+68-ghq zTJm(;Yt>{Xq*N9df?5hzb-Ur?OU=t`!NGi3za<% zPHKsmIz8RjKA8Y%WO`fa`YcM^KCHt{zXkvpG#hNE>^uYQ^?A0?(hN!{hJkh|#3*ai zF@P3e_F0mOTPf}q>@r)Q4xFwvhgI`3)+&H+TLz_19!D$M!OQ25X{V=Dl zfXZk#wvWMR-z?PxB(WT!)KUQHMLTch?UG$~(pDC}tW&b|V@T1!ll5_P7h6$vu6= z1;O9~5!PERujP8{UPGGecIMn*!E*s1j$(p=a9y*3-MMB9?eW1`Bz-~3rthkz-&pm1 z^!0>P8nYdJ{ad*bbaZ23DtV<4@;fZp!YQr zt(180zRt}kn=DM7f1SIaA+Q*pGM+3Rrttp;&Q8QVw{0yQWV+54iTx~mY4PQPJiw{b zO?R`(dMOPQSKJ)x4k1keX^N7j2wzrwL){!=O5Fn7VQebcV9aJSSQzBjd5y7clhsbFNWvN#HtTpnUb4I$62ZeD)@A!%&aRBdcq zDuhSrcDGANHBJPvWrKA|E$$%;Bg{*B8t^}XxdtQ=x=-vibni6$hAw=Zz9-;>dC*E2 z;g=P;75;5AZp-`*pEw=pxyrmaJXcN;}@|+7qJjQIodCAL1DQq=C>SD+wc{(ESd76 zN-Y)i^TE&}7ehX#7PhQSfI61Y#iF_{Ma&SDJKS}KNRfe-ib^e`RA5(pVUb4*C^P*! zn?^djF3Sh&iQNya*@=;GFQIe{f`luEB!mQ)W+wd}CQOrA4l7Ib<}#?z+G@KAxU*c- zZJg(?x--IWO)`zfMUp25>kK%kI1a-T)mCz!^~`FwXMQ} z#BptHLo51y$of1UpE8CN=FFA38v{#jTL-O;>-y$s`?5oQWEBt`mGNY|;4-j!a+}5WA%c-N zOVuqdjzZEFp~x-NXX;0IN@YRE^PM9lzgg@SfgJOO+5tDQ6V)i>k5HvgV$}5Qa9#&> z{`HBfCIP134JXWk!GYMxtN_9xE!$hjPN5jmmz&<2+0X)rPBG24>PXS*2jPI!_Vn^< z;7ttmWF+ATkYT_@XqFAww$q_?CbS+7t!E5Csi`xLw|m*D*kb?&Hhg0M0A+xNBfB=LeTyq*)^l3MguIx(Ku3IZ&g+YC!L96LaKg%i zC9SDW140^#VLHMo2b4AmchB`LGJ-c^@d@;~m3+#V@)-as8|}Y7KcpFxEOVk%Vo<@N zl#;wZ^?vL9_MC&IL3ZEiI+*%CcAp8EN!Wb?i2wpiF!$CbAQ{#n5UsM79RE)G$sxXI zCtu_a5vNZE;O@4p=dJY{gUpM7SwNTLoA!%X;a%id%(6T}r%UIb0hLNhbmL$=uAodn zj~NK$#!}kJK8A?nx^Xf|ez~rj6fF4e@~R(zF1qZwg1g-J_Mx>SZh{j0=cd6ZEus{? z1SfP_UR&01SbUmn^Es_MAbfSbJr6<&DZow`MAs`Kw3=Y*O9p2~s8WDg3_1lGCI}}S z!z2rd*AHJmj2V@P1!*Y|jX}}^&Wf4b_QEnPQzg-LF&WA&IFGE5NS35b89Y9Ri^7E8tV2xWK3bAjv+Ci9dgIOELpCne}DxCE+ z$@7>HpRM~K?K^E(c`LyJ7I7SBZYEp=ed|W|5JTicEwlF$?03Wh1K@MP6rxiNQc7VyXCvP|Mtifsf zTJdBHR3-SVmHU5>#j)&cYQrU==XGU`@;z=|*&c{wFQc3}=zX9I0$&eKKC7}S2VGE) zgU^S<7cQ-O-iCK>z0nN3c_}UR+5?7oNkni3gmY?0=J|y6+qg%NaW%@Vos|P1q$tYG z0zBnoG99rw`16PZnMO&&OCYIh1&A$e5ZjpA8n{Oiew^YYen{3o#COC|7r}Q3n`X3D z6D7l~{TQMI&xg7Bdf?UO+A=SO&K^Cb>QF9T_D)@%e*y}dJ;!W_T>KP(`(5jN-_ zXS=w*M4S~4#c7djX9ntzkv2kKWgxC9faVyUjq!mA>TyY2aQr#ZBMXlM#6?GF0L%ee zgFKG-fJY*q6ZtUgV9gOaMvN0H8}v`04E5-x&VAaL;R!A``K-gZJR;*Os59(_vo##>X*a0&botK|yK1NM#=G{YKrk{`DY zfBqA2dOIbOdx5O?(u*00@EttZCHR7?vU9_-fAXq;{gYEgHH7_>S0y!!cR?g!B=*bU z_*vo`=&QU79$96e7p||3&q4jXJ;H@3tFa^^o~MYdD0Dh|LF7@CB)N}uW}7W<+6#_8 zeayuYWSEGbs1ql$d*n$|gpc5QM5MJlkXK>BQsg8>AD+W%M9D~hZ1VPrN%tbVCYL^3IP?f-Jzz{ zE&U(_zsON)XBF!zN%=NdMyvpVQ^P3PhQ!H~O!hIY!?^r*mh9sebB+qU%1f(61PFC1=YSX%%X8PJv!(5BWxXSRJzwlny>h*|^4BB>`k1$Y~p7)iV$iI1C| zn14ipbZiD!1TG$*7&Q>!r`f2Skwp4v!&+@;b%tn#f-7JW;Z-kRdmw~D)P-Rd8 zWrHFuTO|m{DTT_~Rl=qluHK;BxjREzBHS}9d>Rgs6?6gz0!R`Rr4i6+iXP5xPa=-` zf_o5$Wi$|1MOa>ex#5GhQDg=7mG;7f=KYAb835W>Pjr?XV!h=w_GPe3O6 z=aEGJdpO}x1XgP++7reZL{3`4SVy9b?10cO0_0(ynqSelK^}#?GK9%EhNU?yrB&^L z$C~SmgSLsldLDc{j2AF?8#i5J==yUe6cQl;k(LbK96TiLFbD=&5gB1ulQD)5tWM-J z49;B$18En9K}8e!pkj@DfC-T=r)LR0Kn5mAEwBF#>eK(0jv%#vM;`}G{|7q%kB$Rc4 z5A%W-v3ON(ZbUF+K@|lvQddOGD5i^jm~kYmJkq`lQMjj`j3C&?NjDC1;~_Tq%w3!6 z;hhWT1ovStiiFsW1XbIFLox38$^!}1oo-a6QzZ9F_Iu9km3d@0nJqL&zXx6Sz2XZO(!{v1395mArUV*OWj@iF9RYagU*HnsRK}^ zgqnw=nnxmZHX`-d-Jz@6RzVF!Yo={GBQL>7B)y5HVp(Q_$PDli+!Z0M-$gRBSjjcX zo)U1af03WE<9fSLHf3)E{t}b?7#(g;u;!%8SJqm@UUy84^>B~=1QY!-9T9d)dp^le z;u-xba0HS@bovN@YCpo!zsgi>wf;3azfR{r2@6q>LbSQOzs^Pckmzfl4w8ZzJ75y6He}~Q>LeT_Y@8MvT;GqB@!6VmS zH5_k_K{imvTf-m;X4nyLhyd*(@Jk;CNpgT6jj&2VU*3*Yif^)>1=me} z5~!)7Rdw?P&Dfn$%A<(+22e^NK`CUi$ZQ!VD=$n|L71#cj11y!vvK5&P)VHn8$cam zx{=_IUFZTI!5{C2r&|HEhqe&Q1p>R5fF}YQSOan>aMHjKF;fDKQyvUN5h`*hwE<^G z0O?1eZ^C&1(L72CTOfaG&~u@04h*bC$~-ah#(+)#KANHbfX=nRm<}2HaX1rsBe|9> zOJu13kdDkf35_uOC=9aeIxNOopt<(Mu5Sp?zLoYo%}UuGY1z(D^t-vj1aD`vP`(s} zXrUe%OMPEFk@2)&4J}ERbC%BF@qZMKksFevgr3+2G~`Bh0>D7boftUan|?BrFdGoE zGnZ`lzK!1(Urge`Kt_N;=mfoilE7X@VZ_XFndKyOM1O>9Bq2=gNJCgLR%g5Fk+7;3 zi>M%+%FV$jE`UabtIxBI1ZVM_uR~FiK8z53;H8LResf49-YTk=i<;J(Wu{S1X zDN9LMx&ecuxlW6=U^ISmS2_^YQyOrzCEJl?urZGmVmBgezw&^wTAVh+-+UZAA};O8N4N}0&BCfmAn#T_vVH^A!a}gi1Ze&V`~oy1 zJy}o8_{J89J5zyQOGXkDpjZb+V{kQGvQcreKwNxK2M#LChz}_)q;@N^Fq3ltvYAe? zz4y{NLFb)xhy;2T{ZPnqml#TE2ZHG9Y$Iz=Sz^8vw=gDlGR9tsgJgUh_={Lv`xvsH z&UJLUbc7UtGopuM{8iS<{^o6*6|u@`&CmhiD%D>ylXt>H!=yP-y$WVYLFz?NrZg#_ zq**zc6zq)1#X$GBH+45M!E%HW=ydO#5 z+(tTGZ-+(t%z+LAlt`)QAA;kcirDJlrkYp~B*qjw86KErXY21lURcwwE@KJ)DGU2E zIY%APm#T={@)B-r;@H%{Nan}}Q*FvB#yOvtP{#`p14n5Xf_acMV zjbC)-%Z=^Lxug5--%rP;DvzCIlt??HwDL_&U~?U6K(`5#b7MyCCg)fC49USCyj4T_ z<#yf2VbiJQ<~m<6Q4czijwP{=-!5ToaM|(4(JbPjEYSZroF2HKbv4LG*Z{zg=|I;kl1$@*54fzK@d{kH?$>oFwdQZwR!1T z5Q>}S&5>>ntky^y3mdvZuv`#>mbNSiK*?ec$D-731*c%s-8@C1wEw=Oo`HcT2*;6# z**3)Cj;5It6$9A*9Q`xf(}gcg(^^J<7X!Io^q1&}0PG1y7d)(U9La+Y&e?#DtB6xg z(b-Y`2osDBXy%}@Q#b-es~JDmswIW(2snS9^?e&1E_a&qnpN$GMJnPtgK_ zW`x2Fvqno7NjA+s+!^!^b5|Th*)-s$WKJYca9yzDJW*o9zt`Rv>s#ViLuJArf(?N~ zgs`-C6u3+d-1&ex8wnT$s3Z`Yhqn!6Ldp!F6DBND+(hSv66+_HNrWmO=>R-p{WON< zA#m+Qz<=1^_FF+2fZ!O=^gMEeRs&W_IT$Z+=(ho+Lwx}0*8W~bkbNhhG?6u&xqzRq zpa%VAR+E+EIWVw;f-R;;Nt}h5^;&EjMbb zP|Mpm=Ha|-VP;_c-OcFxIoG5put5=yL#{Bkh(kbOq@lNPxLG5UH<;DkTt(+G z&fkxBSj4dh<;~S$)+cA11PdCa*-s)K$$pv{KSAd-9oaH0jE87zJD;r5e}(wN900=+ z@Q^~)O2cAVVv<21O_~hC58#6)=Sw-i6r`TFh)x5Vao(xWd4%35p#vS?Ot;^M!AjF+ z0n402^KjP!G{b>W$(03k6Ek3y1$({$=ot+g$*_2kP|99F(H&#tAYg9ef4CRC7VaK! z>+bQvgHA*x9t}c8^Xn$&lCT8;0BZ+^SUe+U1T+;Yn(0><+_k_+ee%cp$UCQ-31w#V9K+P|P9FrDj2th!}#v2S)*F`>0= zv>#H>HX@L!tYcg)B!E*H%EL-d6)ZL_w)WegD43$b$oMDJ20pf(x+8femU<3skDR%z zP#2I(fx1X+^(dF3W;GFm7sCQa_;mXeH2Vn@m~IrK%J?P};nwxv1!oXTcMGEQEQqit z5m-Z*O~DFqrU+IFqOQz9=?G9QVjI}T2!RM_CN#6?+_B8obF-!0~Lf zwI?!}C~lW7%P#E|`AK9lYP?^k?^AGg##iS#1JY-QYBw5o7;;<&W@pSq>~RK z_z?Qr41j&UkyOO^;5CN+4xMf5>$3=ZoV|;1fssv&CzC$8qhJCVdgc{3;sdQlA0vtM z)ut7Zy`N(mmWUVLjQe?}*DymMY>f6bED{=ACGUL?yZN(pK1WBm9(MChtr@tQ&c#*c z!4!_=@_EhB1S-dYa60?pR3S=mZG{q9q3`A}q*+K0FBe zkZ>8pEkY!O6|hA+NmcWx==fN?)<5z9>uv}7}_rT`u{Ph0HeQ^^afQ4Fk%5lH+>6z|BTMI zMe<7sdyWHBg$IZ2VUAP5@ZbP2zz~d+8J}>6NXH>FYoELHn#RfcM-ls$?2?yQISWo% zunYKcW|L-v1~7)N3Ejq;35;Zd4#Q(6=I_t+-B@@>9I1@pw!tI&c_nWDTQPdqKa3pM zxc>w%DP8zY0H%M+fNiCJ1YxTzU7$WDBX1{T!K5TIWGudfL=3V2&UmlHrUe@nm^5yt*Z)-)H1l&mMFIkZf~_A_xy|&Y6Wt7!FwV~DpF=ov zWbp<}lia*5qJXp@Di*Vwd042yoE6{Shi}|G(LRXpA$)&0$m6SmuM)ma1OR03RsXbAtKP+K;7{)eA+P#vq2uaw!mx`MsYQLnl^%o>| zzr>EH>rSR#vD#JCe1J9kUxe>^_&9=tZRl%3R4-5duq`{+E9JzctsF-?4!%^_9BKaq z!fucnK78#OdVTB;C$)Xg8`PFoE_>O27^S~WN;hN3(Umdm8$X6cqoZzoDT7>9m3hVX zzYy5!rY`P%;F&eBweDwCo(B3+m069#(eZ}qY_A_H1O=E0t zy#d2+5SeTu16p34cgn;~T3cu~7Nk2gxyI4{Wl*s}2c@ACAZaC!=^70>Cr$O+3OO2e zo+;eJQksj-xc4Py53$Dah^u@A_uCvYs2+u(HDZ}=+7DxHSPvg@8*M}<=zBuS*SIka zvW)0hmASsnw8B3k36_u`y8VD>giN51K%=ts-NVO@-aBD}Jp}jkpK@%%L^KcqMi`oB zH$H(C6~rvQIg>JlF>Xc^D|w~-Q6FlpXG z%6ms5Pq+sGb%R!!bEVGesL8$wDKv>QT-0-6w;l>8u z=|DlCaUl>CzYykNe&e_}JPW!sPRm^Mn;f*1j?17CN!8Xja5RsgiH$THir)|((ohZ_ z|1)r?;sJt(z)1}64+S8SjKGN|43wyVWrGYF!X*fyB+S~(%!3eGKQDxoqq5%%njXZ{ zotwERcdnbh4wnW*CFHwVm;`}{!W9zfZtfmlu9WHK@y=X&7r2DT5}KuKW>Ios{Wzr8 za^4N49(vaPKnfQC;Ch0Dm(K?<1T#WTIi&p~D23*1`T7`o>^{LeWrF&=7oAx3aT^;k zp^M)I_;C`O*u`K9Gxu^x$g4LN0CJHrI@zP-z-yFUR0i%c?%U*@n?|U^vz~yNI9r4h z9X&+WFMo04?6_uRp`(ttz#F*i2G`+q)&#dvaD$^w(z-Y;#G`HpZPc#0;1cb{p`_*d z6?1w#I#(Na0SlI}F2eiUyCMbSv^bB}8VHw=&RD`bYowsw!tD2Hq(GN*$qeYEa5i~IhH%gd-Wyc69-`GOZ4AH$K~4nLHgcKhpRZu zQXh+6{7?B1p_dZn)x?khn8uPEzzK8Fq=pr#wbpkIdB}tW8n>(J>rqEIN_CpaU>dB# za+E*%)+MI1HBKY?^*D`#G}3tt&D+>kjzL(xy3hZMfZ`w`Qw4y6^jNxzyAZ}8T`@p| zcT0^~uBDHnkOsvm{0-xwZ1fBs|MPILFEZ<&BWuHT|JECyoEG* z**~-09Ma4{Gh}s()F1h08E@_tLe8PhQg?{gz3`Acjgj;^_P--vyNy6SOzz59`bU(+j-_rV%*tsy^_dnLn;w~e!OF4T}U~oPOrYVOVN)sg>hQvh+ z$W!ixQfwWEq9k6g0P*zG=w%NL&*KZfuRMT7@nv`nQuzz|1gQvmA$$uRoU+$9at6MO zu^h&UVH`@utw30GbNUuW-O7|z&`rESsDC|_=bGy^Igf{9QGS5)rp!kafXNPUg;fpu zpKv$<``EI+KExt#rn8rhO#c!32HDndBtjZ-TlR5xxM(HpMZ|(#Fa|Bz0mCa;5WL)D zoZ?Pez&DsM{*~Z)5Rd;ToB^{3@OM1NfHP6BMHkaM8b4%Va#zAF#zovioRxd9aY#8j zN|KMR9mDis=tsj{{qX9vxihyug%-H-;a$MvT%XPgph1u=hgBC^CdeKrBx|m!uJT*59;d$Tn!e&&GHLG92Bu&5W z!WD%}6alGvgBzPaz7yH$96qSgtHEXfE)Qa+YSjAhm?jT`uQHVo=roR7=;)4>pUL>!c_!HyO#&a%i+N~5UFPJ_(p1OcuT@fu4;BL5qxUfKz% z(>+|qKhJ<(XeF#Bmo_AqKZGN3Sah?4{>R7_4jUGP%mD79)SqNYLU;WdeZNlUQ*@~7 z4lA+PH9j^cdppeGaMNrD3I~zg5lMylD5XC!Y^2@yvYjXBuv-02`rfcIeUeG;qcgZm zMRa->ccuX8G%cjB{s;SfCu?mS*kP+&Ef{B$lgXuUaJwPR7B?P@XVP+`g&yooBrk@h z6Nh95+X}-A!Q+Wl$D?dFtApjtO*^)2FnvM@=K7oXWsHi*wt3BfgZsN+c@O;~2}7(t z9vu|ntsEGXyH#le4sk8Oy9F84)A&m7gYt;7Hsj}g64A{nNTV`9B-BNl8&$+YT;&jE zQFbMw$iaaaoHagnU4CWuUmP29-I4y=-k}+r?ls@&6&pA7T0aID8eU>B*gILL1IYU8CFU z3aUwYA4DltDdof0Eaihz%C4(Q*_D)X9;J*)DWAc}6jAdhB+u@v@{A>Us%jj)_Vzu1uzc^dVeDjseF95C3me$mp1rL!%pJqsD$?_f8{DhpB<^=^9&tq= z0AC*($H9!0uEN=Pn>!#yF=zP)v*9l}q&BW|-F}P7!4xFzH}<+c;mSbt`HiDbEwz_+ zU`Qi^Tke7igL<8HNYh+?_^1=nVm1gdSn8)tR41L8531vmn)RreI6LFXwL??Ziwi1`}QBI$#pvt z_c*scsR5W42qQ>!MOA!7`=F2$v^dFv(gec^C5Ev;pu+j@0Wb_WFagW+ILIiv8Si(5eOUex^K?C4GroGFj92NKH8a3y8~E4`|S004pf} z<)A3EaOo9m{qrbIIK@}2=Uzi;uz2bYqcns?rH!b(*gd86XQf0^xx=Q!Zdnxpp7sE# zevxHD;G2b5{;#3FL=3>+&?b(mF5;Z7fDPVaFr;gpF!cuy&0Z~K z5PG{RZ;rOdfa)P!C*YJ?V4Z-_+oTKuL&$CFM+~-et2-nmO1UhdJBsfv)1&=a9H-4} zj`iozp9kq6CEzlWvF;dBmRJ(*S&;Vc!&%U@$NVk2?)Di z?c$7Kgl!YHC&f3)E#N(d_b}eO@y_5qj`uj~fa(N35Vbhy1KEPS)0aL1-XXtO4n}dK zY3XHIYnPs27y(?J$08&8HDUG*)9&IY;7UkFe(`6H51eqP6G5$<;yD`2QgMeZY#>D5 zY_9K+O*l}5K$b{CL=NJF+Y@F|^6o>3ipv7Ek#Gvf4pD@#$B1g;I$AwY9-Ac+dno)x z<`4rCjhJI>wb1_~Ro<>o#r?&L9D&F8OI>9hps|Mi%{&2*OA_uGub8)x`L^cW+$myLR`eFk&iiX*d z07^X1_sHUlLKkn`XYdehVj}$nG%!#_#5b>L=jG%2nIwfQ>Ks$hhc<}J%uF3-a%MjL1Q985svvZV_hUSmO6sQ zga{U5`60#%+bmWC_w$p2*|69*;d^?fC1GXz@(xD>=!ja;f*1T1j`Rl+yOkTOT4VV{ zy$>t#cRW%!y%S%Gf#kTu9>OADKaA#t75V7fP+^fgIt5CNr7$aN-{0mn^L>USLJP?Xc`v)&>#_Jm z+IJTsjRbhM$gEAQlwYR1Z5x|k3QBo@dNAy3MmMLdOB)YI!0K)kJBX)14M z*};W?lNd{V0OW!`4l7igJ2-%^z+Als(%}X0zz`~fR{-LH1tfe%z|7liGcZ+IL`Y$= zxHv>J>h@ntUEmB*+h-AT?b?2%R~ySj31yHoxQW8}6z0P{x>MK~!d8u`g62807bb8@F{`bD{|4?G$< zGq!JcH-j6N#pnhhNuTA`(n5U#l$raC{Fw+~nCRY8qvosoynVg@AWVGuyNJ4bCXKKV z6v#h_G?(4Ozz&beAM=?wL9!Zu)yJq~n?Zspph|=&pcl_S_LD?X_9h0@HQcO>?dBfl z-vB~luU&^o9C!_|116V;L18x*2x%`ScKPnbC!UgaPPkEKEbzuEOdF{{#&J(6SPtz( zLaT=%C8F%#TwRaR*k&i$k426`g)3&3Vt{GPC?RpkKfh z%eVfng?q6n6(JJ>P4z8s~-Dn0EtUM1Y<@*{Uzks`~wZO4x$5sg0X(v@^Mc zyEfnH%=hDTlRCK|?7?~Fq2O45971bREIJ)# zaKCt`W&q<)GW0ouw71j8E(oxMRae(|;2jh`6?`@vz+A8{Dnf zX0oCLnj3uyP@z<)u|64-(W?gFcR4tBAzi9e6m&t=A2kXTAsYn3aP!4GqZ!IOqiKa* zwe%dy#`tFhr6)FRR1)$zgGVl&?^-L_09I1#2QlloZWBs{DwrtVl1mvSlm`&(S7D13 zU&JW|1Dj%#$7eNs2)X7tp!f@Pm_>A$<6=fgu!%U*&wc&Nw7<{(-?G0%AiU!Q?O&Ak zpZ{n?6Ok|ieYC_8!;|U{ho3=Lg;V9wVuocpZ8}jgd|mB#^gm?yD{%PN4$RvcXu=y+ z)nNrIbS@=TK>&!qj%J&JkDy=-l~h8%w1BasNMR8?J#kJ8y(deMdUD>`j6bR&e-6)2 zgpe8lKcyX3fK&yIf2%Vke=JD9 zm!GHTJVoamo%3`aqa)i__Up_1`~f;2q4Oa+l%<qI(lkEh3+@$u?=@}+#Sn9FC2`HEBB wRXtgKSLJy1G}2_MrF@q6#N@}SdyD0YL!CeWf$qxX*NV3k$I9QGnkY{Ee?adAe*gdg literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/fbnet.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/fbnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7d08fbb396411c21186b608ca22fc35f3de09bb GIT binary patch literal 2694 zcmZ8jOOG4J5$>Khhr`{~ih6CFgb54-G@RW;34|aB5@nT@BS&Tg?b-qcHU`7#COO=B zse5QGnPCp00-t=zPsq)GBmW@RzUH)lzz1KFsvho2c9UIQ-Cg~v`l@Pv+3Weg|8^Gt z$0y{!;}+zM^YJE5anH*|IDg&k%vGxN^~v{u&5 zgD{}&4!y7kGIq9;?}od2%*poh8{v(7Kir4D!QAX--Vgi0d#sb)%5R6aDdD&0cRnP{ zXWiF?b-6LWyD>L3yvOJ%83vbdPcpisK!;S(_krwIf-^oz3m(NHnMoM(qvNiG$QTtz#31u$xUUx#1b(sH}gciHW0@iy?L3w?)!*+plz9be2U3khZ zWj{V1@mjf)alz{-xx}3>J4kLIxeY|%T@nij7HrLJm45=JCN*uy#+Vx$LP>2hI=33S zfgdG}(U8b)OrTB!QjyDW#B0dB1J#A ze0W)~F~m2GKkqRI_j~!xixx(~NOh*XDC=bfws&PMQU*+b07+(XQGiu0QLO?UScCga zS%PN^WsHlVr@CozEhq*cqq^r@z_J!QPP?OWtC%rxVP%ceScVC<^5Rx)&LLno7WJ7B9Jw%89Fr7fkt&%HoUT(R2Rv6J@T_N_j9UU#0ad+&lf` z*~#B(v6zu}b&ID<_k;9I6T#3WqxPM1Y;9ez|lk`+(V$ueTBil)^f zDi-;$D-KWr8uNFN`w@~KBN+fu0r=CkB0}#QjF>7b;!rEuG*Fazg^gMw5%(?^P z{=|Tp2Y@n@nHZd2K6pV2uMQaC_V2JGgerUV7-G6cLV~s|r&EYfc-PecGC?h3$OLSV zFJwI!^FcfS%nV8~7-6vKiTfa^Oc+xxE=3RlWb3ExHXh81Um$rO>RDR?T9F z@&HJw%mk7GO_i-$f3AzQqP1v`N)w$az37VX1peY#_;dhI*6QeRu5ho@qg7Q0CH zfL#6$%Fq-Tjl~lr9|0NC5&k{QP(_?`$WVS1wY~{_5JfK+an{c01fU&6JjC^wMOAN` zX%e1gQS|2GLn?lNLfWY?3t)PZh!Joc-@D`YbY>hLD7!8(&w08zED8;u@a9$baIz?L zeu^{Yzs1wX%7P3!Q&yekT-l)UY6PNoO jO3R^6l=s{AV&6kW-ZB|A0=36K{gfzAzzBOVPyQqeF&L#IZObk~x`_O3=NXU3lrCO^O* zP}1@XYN`ASskm2b$1a%Y%$_~>=&p3`tl#f z<^@-^qpC6o6g@es!X%4?;1d<}p_BzVWmM%d=qVnRNfxRiO8CPNe!aCE%n!>8ZGWVR z8k=TAdvQ*Qu89~WViu9h*uV^&Q7m{B#z9AmEU&%|jC#8$Iq*>~cu0R$KUsN#=Nr2J z>Sc1uFOWq#pGCq;<`5}Q6;gTADe#IXm{@4-O(ozsUK}OkSPHH@{UiG9+hSh3pRi#z c;h$~%z6y{m!(GFCtTK(ZZLWKbw)uGV4+@mx-v9sr literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/fbnet_blocks.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/fbnet_blocks.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e60cc85684fe59e6819aad8f0dc9ac4a4c157376 GIT binary patch literal 8652 zcmb_hOK%(36`mJ|LyCIK@-vAN=f$+O5gAF^G)NOWvFjpM1IuXvA^i3;_h@zQBlsd#cGxyA$`#9%2k9&D*db;|X zAGZC?R}AA{#?W;c&mW=04^6`ejKJ&~jr4AItcE2VyJK~nMoFIChAa1S!waoW1*M8I z6cX`wShXNArQofo_dQIpH-(70V-ZO$1gO@O`moV$Of!&w~XC-(UoR>4s0ywV( z*TK1-aTdXOHFyo2*D}r$IIjn9fb&Mixd6_aK^>fW##s)`b)){)GYWgHZfooIns(O@ zq89Gfy)|Wl-*WgC7w<009h7(uB!MdWM&ImPeS1(sN7kBl;+^(h-PGRwAdHfBa!_}4 zwH@v4C(XEh80x8Z)NF10Q51G#UF(5e)Z92x?I8Szp{CIJ4-al`_jbco&}i#m-;Zv# z4gx>E8SnMto4dO~n1rpQ-HX!ao4dULJ>J~#TRR)QD2#7zZbV_y-01dNJMoRZ1MPj# z^>;S{|HB1NAH`79ASYH=218cIHi?n04QsRmYv5vi_S!Qb(0FEpV2$Mmw{Nrmx}*4| z@<=KqRT8cuB{e>Av_%vd`^2#PM220)!^vR-J8+(sF?4B7&ETz?1$jobP{u*K>r)1G zQ&AtMpT`nfPc{!Luw@?3&^$0K1vT;L7sihM%e&W$6)JfKA219G5xYc~R z$TO|=;>4q4e37(WtoKX#yxCQ`SK?-=xbv~{%LKCQ+Zax*Av^-3uA|z#FtSX`Et*M z@fZI;Q=pNbRpl>@ox&*uFP?=Gm{{;DXB|@TeWoE_9y{bIbzKa5E?pxjJg0LJxSdDW z*T+ub^yWQ`_sPur)v+U=0%er_Gq#%;Wv`7La%8c2{udwv4D8I8X*Uk{nmf0S6Bgzw z>*3yn$UgUWH6cZ)Xf4!1$2XcModJ-$9zX{BhB_x&E)C}xa`8rG#2^!0XnyVU}xxa{mHrfG&o!wigu^{ugK2FTx@cnz3C z;5;!J&bQb%HMlt{>GH>qw?6Onx-CkO7w_zdC0+qZj08KXfz`1FhRL0kV__H4v{9VE zKquxJ`YFZxd!bT{e%fm`+fh4dHnroctyp#*XyU|ScT>@A4{c3|UVyODY___795+Wcl@Z=}ncJ@vJ(f>rh?!39`hgFFB!jD%{Q$?kFZ1>gOaY zAllthz5Try)=O~?DVPh%S0}qqSt7rA59L_37f;UZc@5Le7hC;JRG?dI_IE5b(>K=v z%#JxQzI2YPBm2lXD)lknj&a2}a`DE(aR6$4x9{{zgVIsCU+!Q74SO)fLzX(uzz{B5 z+S1BleZ+H0wvg_3seyV)ad|*Cj}e^5?eJ?|h8gVcC!wywPLuZ5es4dnd#O~YEN!l7 z^Nu!utIa<)?AG3vwsGPx0zLuDuhCl0Gz_z9I_4bclDUlg;qr*Aax|Kv(YmYXo)!J0 zdW&R*g!ofGBPk96a=nO3sLOXzGStmDqTTbOPP}>wHIBoJcTPf}2B$LWW4xfY%42Lv=uP<-XUi^sD`;&FYs`dLNV$ii=W4@lYx#d^d$sLz$)vXwD?Fow;1G=xGXl zRdIO0FXr)Xp|c2h%y*Um1~Vu^c7CGw(zZ75>~af#VB8k_IHb8 ztw&&#aRAo!TVdTwL+~&0N&SV?tQm(>v!3}ljzh)soKKbNspI1$J+%`m9GJ-oT6(th zD}MDph^{cbXeI}Hp)PMk@*oqJg@|v^RupNMna66N9Y!?mFlOl49lyPcH8-Q4+V#6l z?S37$x3&{qL8CbFqXdqY@0wlz00D=YgRb18^H~G8;cZ1taYfo;Mw3dlx*mSD&!a!T zTd$2eDDD{4Z4wGy6{}EpSp9^AMxGxlUFY*jM|Kyim`>Y3jH+25nXYvWhFUWhObv=#^_t1DKgR+m^jG}ivw)8Zx z&;>NuKm91Z3Vg}J(4=xd0+)*S=W!-P(5EQ5Gg%TZ&;qBpxL95jWGjmEZpq~Fzs&KV>qzKQpXj0Ommv@aJoDsVfTL{ zrCv@Ah*L|wu9%XjiQ-Vd2Z4W%;r;e39o z{XC|awVc2dw+9_FK}w{S1y#TkL+84Y2X-4L<8fOotre#D;u7E`B(A@X=W(cNgP)&4kxnfc|rp^|WD9AstZ-!f>LS701REleSDpZzKi1{v4At?Gp-s>suyi%0YgS14Z zc%Zl$ohY^V?;s@k%>Qp?4XPm_#;SK%!szU45K^>PWMrO-zgj!w}49A{cMU> zk;MwRWfCS@>0bs#XSx!8v*$-K;y~&Q#Vv}f6oB)nFWr%>)~8aWd_JbXhvZSz=`_Wy z7Cnte{8wmQAiie3Z>^Yz^ZCC0dBjt1MExCp#v~RJ2EGd~3J(f@n)y#lao%bjq<$LL zdB{$QNV?7{b|b|+a*$_^laM-N7Dip{q2!LUB1mn4Q(WT#?+M_U^AqrHP(A_piu@TP zfWIp-vqhxyn$C(K{@wnBgkXO3U(dv>Dgb4&s!Z4Sy&y>3rP=I9W!9WVQj^j!AL%XSe!jMfrY&J z6TB@6Pogx?!K}wETG3N7zwY8*k&L{F-B6dX zHNc<|MzG Pvrt)>UAVe1w=nl#0iwQ& literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/feature_hooks.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/feature_hooks.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01de8650c2ececbae740471d2b3d7ca6c9b6f8bb GIT binary patch literal 1516 zcmZux&2Aev5a!SRS(aTNJs3p+A;Ds^r0li#&m`B4 zh1DgSJV6hAhQ1Q7J^2-UX=iq&T1i8JBXWl1%s2DRe!sIb_~G^}{?j4kZ?Z8stRKVF zV-SJ}nvtB&XwGJg;+|zZcV-SH&xzo|c}s-TvY9K~6B2qiaEFA&%EWF?RbG zC8Uz#7&albeibW|##w@A*Mq_0F--jp2uWsCkQozHu(xE!dp|QrxWa?ID}3-3*x@r7 zn_9`A%JM=d7&ndpCSW2@Vd^&^`d1ouOj^3+uSG+T$;%f@r*Q;7AWOI5jSK&tAq_dF zjoY%N4>sq32={`ipBwL-HGWGexg4$Z9_R(=`C2c0Ju;36TKa}wj(#J?HC{^_68VBa ztXtEBLL@jaObqV?rv4N}3T|KSzF)XL07Ah(8>Szn&!fJSA5pE`U97So7nkX6{W@Qp*`e=N2%Q{7zOY4~HO4{vtseX@@ zh$(Z-v- zY5#vFeADhe(OLq(%Hl_*N~I8r!lc{1XpOt~pn*b;mf768T_WrbVh|CetInjh{(3>r z>=vGgI*c^8ez%(!cNI|s^bo95a@@%E)r>7 zQ&*W(BQRS}$g4Dwp`-Tk$`ExTRoTa*7O zKJpO6s^{ndfM}sV;rjw#n0MHh%<#7FNPyKafW@ckOSq%H0s-+pz;%z|D$Z)DyE2Eo z2l6#ot=Cuj0~~g6D%Ui-2gfva8lm@~LW|Gd19Nw78hTh>WIzf=t`2neA*Xt5Z!Tu-ds=j`L<yX8PPDta;-bUaaS?D&n{Zl@W>AUXAQFK+BqJF@-602h$* z>u=w9fBnPi&AZ@O-t094Q4djH54?R}jwl%Ep`tv|sh^QJ>t57stOuROF7ll?4*U=! zHE439pOt2!8Zy3C(}jj5)N2`Gh>S3Ola}eRzGePI+tNi&*q`TXS&^FA2aNBpms(FOvGIP1NQB92XDi+y{_q%?>Ntc7(Bxe~sQ5Q+9#ri-yG+M@? zrfUPOl^N)R%)lIE4^5QKfmSy`(dyZOh1444k>&`OG*{Ij`rJTWJ61&b4RIewqN_y&QW&9{9 zG((WQF!X~cDTclFq-w6+3*u(f@B+Wy>BZe%oY*W!jjip(X8EC9?ol(|sW-YwsowT> z{Yig&Jjk4?+3H2U5;=@JT`i+%*YS6fb4t3STZW|`9dDFvJO^_l z5ndjxe}jaCesqpZJU8&Lzzp@voFlP1OT=o73_4PY%S8v?9dH2E(h86ePb(d+kKfLiiQx48Qug#5H zZ&#}~Z>v_XJ+IY!;5}(q$s`#lmBhlyZF_OzeBTRt{!J-6vSP_suok@K@>R;w9?SDc zl04+NBkS#GTfRm_PQF386G$%NiD=I?qo|vDQMZr150)nTz`AoRqLR*SW2DpaC=h}7 z6^15tbw+g#Hgg&T8%>zHNqF6SCpO~9X zFRGY{BRY@5$ZK~4UmZL;bA0+Lnd5|lIc+O2qG?ee@0z%`$O>ZM_VEGVs_CD+7NP-etpNs*MuSs8lo-^o22%C}rc|^hE4f zF;2!AtAQMbqK8H9bz~q4+5T%#1^S_mO*}&ah(oNk4A=>>1?C|RJhrTaEz7jXo{$Hj zS1Vb01^wkJC2vro7Fe0e+s!Dh0oqOC(9dHSt7j>2%X5;oBCC zgHy-8ZKUcO07Y|U+SN34zp-ufw}=IuMDVf(xDk-cTM@Lfz>kOY|D!2n9woG(GeYFPh&)~ljNE_ z&c({6GF=@$^eKoeaaqz%Xu=Y?$HpO)CDymfybbwJ#9L^JhFRGCuP_JnSpp3^^ykAo zY&VyMzNGCMU+VJWz<%z0Y&b~z^X7911AwuA^PK+pDtMYt_XmCaJw@=&Ki1Q)_{{@t zv^E3#i}Wi%gXG}bX9gAw1C975q>0M-c1V+>6o90?+^Cq6G%O#eB-izXfI}*miA0Js z$%VcreTbKl6qT>A#I4ctsPf}fTCf-J(w~{hDFBA0z<*<8P@?WHp@UV5(bO*9e zrrp#_`aIHk=mez)Oi-03ApJmfOQbKLCFZ%LG{|%tOJ7KJ$kD!}T}Y2C7zbyLov+FK zqXxP8^rPjchmY9bBSVLZkJw9eu!Mm_!0DmBjcx5;fFFStsbZl#+)HXh=^?y}fpJaS z`{n~uLYaOT8xVA@Mg9c|`X@s}7(=taKE-8in5AQeL;Z8IY;@q>(1zw-mmFLuC*ls8 zsyWnE9g81XON$OTi{B?iV<(m^4+|dD6}f+sRTB%!1uD`mD<$V5mwF9}-e>Jqv-)*f zsYKtG-=&7HjT3Sgk{tYwN1hbFLP0ri*w+Q4te?Stkp3=X=Ss#)$RCu)JCJsQ6q8b` z6v>?qy|xc`+Nsx7#DRRVUf=6^L0Xfm*F~oRvMHL4m|aHpR+MfgwJKS{$l4?`sh8UD zgP>lY%o;=f4z*-ohdgo-t2{*AoN=u42Mqcz|3 z;g|fGftW1g=LDj*$2zG$k=x30Np{j#Chp)&N()g_?UJ!F){&T%H zBVH6I!~$|9aaNoZi^$E2Q(_5GU0J-uJWq==;5o-J-aK~u0vGm*vw1JhYF5&~P8Ti75 z^4Mx__ask(=iqyd9T#ys0>{o)8j_B=i~^+zBk;rYJOrq5hq%G7>NR4Xx#CU4hc5dq zD(Lu`VrpfSte`+U};mLsJKS0p}a8a-6P6#i(W1-@g0ajheIm z;k|peZ&q*Kc)u#i>4M&v#p~WigPcqH5tS?}nd4{;7UUQ}K|n$Vx}0DMXnnvE1UbO@ zM4DrW!e;l0oDYg;GM`XeXQ-SiqMBoBVQ9xi(*NwLAZkGma;*r~m;t?n#~I`f4P8sy zk&||`-E7*fFtE8@xiv>18v`EmsLKckc15e7K>ma%z%eSG*5Ha!X$CombET9{(8CJo7(`{Mj-8v(Myz;wk>$u404MQQv(Jv8q~Pg@wPwSJr30@0{|@-g$$*ttAPb)%6!8@=zsP@b3C8_)>=h?YWvSi&ZCk{f1)O%8-RSsDt zyZA1+)Ep%(bwU{nN;B5ko1(o=3MzV6pti*?FjYdtT^3aXC~+VTWN}jfyfC3$9&zDr zvdVMk{1r9or0J@>GpSuWUVBB}MJ*znSMZYeC>g_$@;((An*2U;NtPE!y;k`s)Exf0 zI$_EjmDf(BpH8972;!{G!~3ao#h3;$A`fvSP3$Pr-u)|{Xn;h^l{h}3_yd2LMMUp} zSt#d`QU@{p)09%Ch)BnQH|!vtM+tA_Dej@jr#wZmo$fDA(fQQ_3{w6wB3eO#86qqwK5mjXYVc57BK$wk)>OTSQAO_miShb6@gi~;~chd8D1g^L;F`Z310 zgmDT;W(r|^2Bc6IHrqOzW<)TC{1-J|XbkeOrnw{Y$q#@vB3d{)59ir)*!xRFcj@;K z!pISy%kULu$ACk=%GM)=9j5??r%edknkWB-U!@>9BK%*12uiOobWmVvsx1;$Fgjdy zI$Nwnj?o)EnbH|s7y(^%&=uM+rhr3jZlW;Kn+^r5y#>ioAt^;~NpbpIPvSil=zWG3 zZ<4s(Mn*Gm-VmKqWJ+k{v(SX1K5r1-u!xpR<2I8MZs8;|s_(8xJOk`!Gt~qd@B)7+bf)E3iQrFxcQEYoJ?SMB<|!oj-c+e z-_Yn^iu2%VGPr$?!R=As>3$Z(WHjvVVk!d$ zB!c$X(wDIJ(;A&hq^}ivO>Kw*L>d=TbXz)QI2+_)(T}w7SbhBh1*imEB@6-UkYWMQ z1Jn|4C?jFwO{$050Nz2#hHA<8??AOMX8Y*|R7R!>Q0J$Q5jOg0M)CM8xLP|c5qY0x6qNo zq)riJ5!au(Og+BQo8a>^+Gv7XHEgc>D`9g>#yoBpP2?tr`gz;R{=pa{r0pGJg#Um7 zS)&+sR-UIDpF?eE(CrmA62}l1%D9{ke~FT|YsR?ykr#swj5JBJhpE)+48TQdY!)9xYS z-bC~aU_uf?P{l1wWj=A5012>EVZ%={s7i9g2BChEhvRWuv82UIa-+@U`xNU2eAiVW z$?8jj*OL6ZveWBU;qOX{CM3Cdl1`Iy@UBR53?#V_N&X)EJ9XP>vUDZRWU8`DP4xCL zu}F%99@HQC_!o#c`U{c|7YzxJw%HjgrANI4c$ytHa@UN5d3DsMJCB8e=1mW~DmAdg z_CN#>#`2BKIA@G=eCxus&@Kf={1 zh3QP8ek?{crlxro8;35#*=aKRx0z>Q%=0SqERK05DNm+A-(aki?^j#{NP{R4vagu; z%oOjBn0IN+`#STU9r4C(&YLNBrhz-OpW+^tj&lD}-*cFMS@9nrI;~`@N=vYKwy@}u zlFFR?eae#uD_u(dfD)1t`G=ISQ&Xp$N69KB6rIQhC4v$PN2E{57A1F);1ci^%C93K zES#cOrG%_~RXFBBzk`NfO zbW~hmPeRlr%V%5#S3Qg%_bA~rX;O#^3&4wUjmSShlEzBvfN`WW1ohwO^J#Z9*h>}f zi~|MJS61dd%26Pe*j?E{R2T27&E_rs zjB3fS%S}osPLs41k|rd7kCO35jb_agrr7)+@kFGI?0M60%#j;V{$9~6n)b5us&m#^ zu+MWGUs;q9S|&Cw<3uQ`MS35K6w4AEIWU|C|*>3;?J=--6}CzsyY5Cb2$X%c%Y{DW2Q!}`Zy;4|^4OuNi^A+*G9%Tfh^M9nhQPTe#c2o3^ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/mnasnet.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/mnasnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7a25d5e59b857f9f3092e0591a23e41c7363cbe GIT binary patch literal 5047 zcmb_gOK%(36`nh94j-Z<#+L24VH39q)6`0w!hNN&Y$r~ehmuBa46rGtBi<`&C~`>M z89I_W1d7l=Hnove&|SMN3Uu9NQFPUxa91t5;;!qg`kgxzCD}=V0v%$`+&TB$*Li(s z_)4`J-1uTQI=EyQ|1wTpF4`No(>gN@ZgAE&+QpN#nSM8WR@*|$?ArsU?J$E|-0r&r zuk8)|wvTrjRce>;bo#-d+%C^~m7dkBwrje7slBA1_4YD%xz}GAthQH~A*^15`@9sh z9g_#V{D8HeI>W2*DtPD4@M?Ssyz_4wyv~;&7%Gl4wv-nqqkhy8cRLO(f{UId+_!P3FM?!7kKs0RBWIlDCO1j<4_)xAoK0D7 z?HIZJh)pf*)!Cd`7$b??!BX~i9HlK=nMtC8EwMiqNft+a<=!49_pf2Ii_Kl*Ek{|{ zjSXc#D2s=f0PS!T$!L)N!H`d3i7$S*;8it`7!WWCFv7Nzq;(47up1>)TOhZ+n;sh$K;(i0ta9_@}z99;DLFZ$u^3H_MRkG0T`R*fY4K|ivLXUmoqJYAz+ zdt&>k1DkuBGQf;;o14n*N?0(}XVVA$Kb*6?B}T^vdJc=FbQl=OJO=~$W`oI#c<&JN z_&Rpz^fhpVL5ZYo(oqqTHXRk=Zhva*FdWK#P-;88LP-EY?dl|8w7pMZ?Y562R#p8c z$$cR+!9Ngb%*Ro`d%TWr{QA9>o08n(ZijS17d^>%6bk8!FnDHUEH|cR&zu?zE|D9d zm0P^L=cdif{setJi?cm*_)-q#@4+CUles+)w+{}>?FN~O(cZZOBe=a%!JV=!$jZ4( zN$sJ@{oKtxUV3QfK3uQ#h|Oof_(DE+u)4#|i}QEP7d)hO{`dJvqi1(ye${TKWt^yj zQ#we3&ws^^jj+lMXuPsn1d0dkor#tiagh2-9H}oH$>FvNwkZRKX?!FWR%u!7a+*al z3-3q$v1kWr2JxcpCo)VkF-lveyg>8oC^SzNY}J&Ljz>aD3M=J@VVuNS7|IoTt=&vh zxB}A38&VFXS{kP!lp+l~!z4kz`Lbx1lr|)}q_QD)UzO6mcofnZT7j}t(ch6L;jWjd zh=Wm-@K93nQ|_LSiRjDAG({;H4~i3cf*Ca~-GR?&Q0ZjK?GL+MSeHWcs5z1%lTn-q z-W-h6thp_kQ4{df9LnYgLhqC(L+y?81^{QL?7C zQvd2@8dzT?xZ|Bt8t^h%ydM*e;hXyJix3L3F#Q3EU=DSmj%EZ)_5VP>fF!)cX3f>qAT41Ss+fG z@P&ICb&4X>)u=;-W`R1@2L>>Q3d+-%v#h*#QJ!qVIW5d5btWWGnUOK(iSFMw78y@B zPT>&A(?w~8N(-7K{q2)D1QP;>98ep$0n9-@r>qVv;&#^sB@SR;zj?sO({Cm81SktF zNP4^s429(Ut%{y`UzcWfa|C?oWj?RvW&;j0@lZ}K?b-78;MEIWVxnb)rHn>;94Oht zC?o?10q*hT#k<`lgskx8iFfchxGs{#Z$5G-{-iVsCgn+GQbn>T%~z@&Bv~~NrVLWs zlcju#+qxLPj**pKEnCXHyq1@9Kde?Hl)ueRYq* zf#$AbZUfkQd=jK3W@O8~73zNkDV&1BWu+*jW>mQq=B6@nMjS@E&CkdITC4IIJmdzECWtBz$CPGx+(Cj`)R*dMBrYL~QiniI z*E(S;vJ6>E(pIQQ&r}6KlOfqffV{r0kaY#7{2dWxV+>S|jFPTUZZsN+gzIRL`ech@ zw^i4f+YJZfex^ZAuXSo;ReTy?$MY2x%M|M)!QZ=0Dj7WSO z#AnfPAIqiJK{OhgE_0n4(qi#zn66o27p)a`j@cGrWDV(Z4e4@?B&?^cqxw877ofMu zloWClX18&tWG94*(FfAyW{*+P*EJ>xtBHMZEras{1<4(+d}L0XiHqPv9-S}>#h$(A z$}73mbBG5-y8>sT?<;e&sHk0KjrcaI2$6*VFThespePV17KD5cD{H07pQ6?B3N3%1 zxF3Ni8;VMm)k!i*0HK_IbOG}8`D!zyURx^*5j@2+v#~pH3 zx<~^Hzs_bS0I{G=wFB4Jj{%Z?ynkudh?%04{o%U<;ys)e36KDa}E0CqIS~-s;W@j&Am8P zZUNNF=1~?YFF_R$^+RP3qHGss;l9jtajKjnA%~&=Be0oDToL2{bgj`DBO$J;WPLCfft6iRG1y#b)rDH39$#_&HWa|@T z(s`&=l0PKwt3-Z8L~DY+!V2&u+?4|&H;61YdK|49ayQU5#9crxLbCB~zoOF@A^rg6 z8qiv!;XfeNdRCG(l#>l*XIJ_9aWzTw((PLD8jZ7IS4Lb?L4$vaP$j`h^bZQzMpta@)rG*$IMM$v;2(|PRGZyVb`}qWauAdFZ^6XB8om_W7^UN( z^s$K4>YzUsikwJRQ0}|BUMxO3mI`?x=S5ACx=FLj!e1838A-~+lx2C+_71{$5Fy>N z!xM!TEmEiQUoFn^8U<3YQ(QA?3a zdUohY>hcmANG~dK3UcV7rvmv4`Y-gH<6L`*fF63zrTx8Gic;b%v2SPI%)FU-^WOWt z;VZRT`1*JI@t0RE>)+Oe$3yuJ-s~SB+~RCtb&4X)d7yRcvgnSu={6L$Nk)L zt&WFPmv!Ua^r?_a@Q*~6@JT#4bue6#u$^uQN>6qEQd9OXtXf%$<<`vZ+cS$-@Uso&e#VrSv)pZ2Gd2x#tM71jV2^HtXCK(;>*p?U?{5Ac@r5I+wRSRQhR zyC)?D&Wxe0!aeSvP>FikoZ(kOpWwttAA81T9wb)3VCiPhq7vq-_G_j;NxRU0#<13+ z)kwckU(7?i!$Vci{TX}9`eZPzT#z?krCQ28&epBG!r3LO1=G8Vofb8-{_;s+WCvd` zdHcB;%ktZW%=zx9_mgiQSvf{z>GLDN{mYMyS@53I&4s-cTEYy_NkAk1YV^b{7(iv) z9JMUrWbeUT{hl06#+i0^lQ`RfZI+UBJczqO7Pcyv(6R06(m3WEcB})Q3}TgxQdvh! z(QKc~Y&W!*O~yh>vLPKr#_*#^`glkBZ!UcGFG? zH7N!Y9quQbi!|cN5OVn;bXJa{?jX*xDEiv^?!k@y(NJuGroE#{oZjdj@i@DYjYrvy z;gAc3-C_4d`NnX>#UQ(}8+Q+ON2x%U(QYyjsZdY1HpfS@h4o&-o4p2Nu{!fv)vmIZ z9Wd8kLoKvhtl{|f3#`FfPMtN`8eShYm#v$x4?eXXU;d%%Z=Rh_mjVzy^d+Z%6L0n+ z2+YTlEjY5(XE~d}{2vm}lpiAU!lc-C+m`F-D_;OXnZ{{$C|@F;A-^I{X>TMyjU_j- zeQ+!+uWlc&Kew!T$F?VHB!Xg75ukS3KABV+4+Tsrj3TISGN5ubik={t6)i?<^4A!q zYxAS&ioqa?&X?3?vQ1)XWyVz3P{>@6fYq-XP8XHML)J`B_84RlY#FyHdhyG=%&!n| z5Loc~iOp+!p$WuGyn#UM3Xd=I<}*Z%j?Y(k3qiTWukuyC_KbA`eu=MxQ#N5bgi5dJ z$_K^{wuSoRR*xpOtpC1#x`9t(K_7);$cax$JEyd~(`67i>X91B?!Hl2ff_sTb5|XW zg|10BR6=gnlQHjBFn}I1Fbe&2@)_B{_rtw%rWNJpcFs(I#}~oem0RFB1uwO8XX+Sb z*xc^9gg?L@-;_!`n3u{x*n(F9ujp4@wAL^lyk2Qq&0Pd`4*)^X;e(d)5@?VkoJHll z3`(KdiYf&J^sH$uuK@(={Uy8&9soEnDLB zbwJ*{C2vkK@=5)X_2fSY`4`p+DWo-Mja$l}QHh4=%4kAq z7Eih&1MH}y0Ze>Pj*bzYk%RTX4G0^e?jC^400))IrX-#MRm#JsTLqZNpOVJ4+CCE6T>A zo9rD$oR6d4coIRRu19-Ph+{Edb^B5gL&HxKuJ)thWT12n5Qg1$_haDWKv#h`i<;4f ze1q0`mk32z`4*A4iQEIx-tJ)3J?LBmEDBj#S*gzWrRw|(Hi;)j}Rek}qo zuZjM3^sWQ;%-5z?)8;h6ud*iib=I(tTNgs+IkHh2Ll1o^zL9P+3YQ8NPe?Yi=NP6- zJ)vTyWRYXsG`qr<2+q!d!ytzC$b@cwMY0esft$z67iTxE-Lt95F(^u#slPx0)8dkI z;K{qRfFh?pypA&R>OSk)aMx>7cDR4A?b|eyC7}1P}(On&@+8yhZ>DxoLk3-;QbA6M$T;+n`q)J z>qu-G3^>e=uUypX_nl;svygfzG8y&eDBxt7f&%2v74D5QYic8|gNMro)Og||R_~x@ z9RzZG_#Tw#86}efakWu?zl5>BS4NFU({CAW8T@e1M$0W#LDY6A;y$_c$VvlM}zyb98ZAEl8&W{qiP`QJYK(eJQs&84u@7uD>R4m9twbJ z`|vh7A6$=aT$_M2XX+~46LS|!gVTm7`F2>M*uz!;`{u9j^U2rTDZ9*2<6oZae zE^hcNO?Ay}O1_HT@;5|oft>yi59P)cxkz_p%lsubznZ5 zX=?09uNBpaN(R~H`QH-SGaMaWVA0ps&<2}Q4V2A2t)~-=Bst~f1oaNxBvhE literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/mobilenetv3.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/mobilenetv3.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11562e9df9f5e424366b9b81b3584f03927ff422 GIT binary patch literal 11069 zcmcgyTWllOd7c}G!;2_NtCe=OYlq(0j_G)H+tprtTd((GyRl_ATF1>K_GC0?NR31e zxo1XNiJ^ita+6*n!|j8ErY?#$O^~2{=u?6Ap?wbEpf3fA4p1aOQEUaYK%Ucw_WS=K zDM~A+FHT6DGiT2I{OAAw^Zoys3nL@>ue`G6e*dPT{JS#r8%6pBgs815ico~wQ0(-n zu4%T09Ic@?b=y!WS8rsRrfni^h)g4UM-isTHmqjO&LQuTofntwg1BNA#Z`MmJnff6 z`9oEViSc(c_UKEBI3Xt9Rm6m^t(A9mdki@z#UyejdpYA`N}PIEu}_F+#I!i`u4+#R z17#;`nm8*S!u#al{bBJbyifTP;6-KVWss;Wu8wh?-9&iulaKg!?LG~yC_lS)8{KAuu-$My-=ogyM^7E$212xe zsH@7eU1bd%va7DCLKFI~wnj155$kJ47bR25t}661$})Hbt237BXo#QnLNB2xy~L>( zs+q)ag>VwnYq-s}lNch{bQ1M;l97I6<5E>i)GbNu`6ES6;_-`DXIEQIe*tk=-`;S; zS#Mjo(QMRiMYGMO@ME6|n5Ji&E#Wt!*%jAYUulItsxhPpm00a6MB80O1=YKXqcb(trK*u+qK&pM z#nHZYfOuxei zX2)DmM3b0jw5INASQ=)OlBzcWc@{M!^`8zo7&pn*TJj#KK=Y26#njG;;gNggRrPi? zOA8BKaKDq}9S8fk(V%qEaqe!ojb4ed(rPt$J@R36nvC?8>-mjFf5|jH==~5)h?h^p zPayE}Ep@_RLLS;^CJa@18qr{*Ra+M$yv?+|Ic#%QShUR|C-Qijq9BTRW_jB!(5943 zEc*>``Y zawD#+`!e(!l_===`3ioLCYk8&X8pF;pD#U$hnSDPiNtYy)YZUEMps9i>1sk<*W|_6 z>_VcElMR=Ury=j<*c#+LjXWKB@5H&a{2=#HNac#M^EE>B?hKbwp01SUyS-acxp) zZNwl3B(~!xIii_EmBCsKH;(<#p6FBh`eyw#cPpiLVo*(cs$X`)jSm%TVVI2d3lCbA zu1AqCNgdykvLz3ypmEx9)kwMhGc>^`D5@grbK8LPdG4P=Nkq#bi8J|csYNvnCCyUH zYUiQjxaD|(B>7>1B*rI|Oa$M-7Otssx=YGAeWwFPTViH`oIZ)l=^V9>l~Wd_SyYTH zM2ecnT=Qz@?6Gs}iL6Hl6aDBpm|7k$&=r^xoe!)j>?yY=3J&jH&39v`xzUK3e4rd_ z52oCJd`WIlYY{#6Rma1}PpIGFp(&9-Ftuoth+0ISDZrX3C5AYx;Q+&%K1F2)QV5uE zW~rl9p6C>ZnC`hw@hN6f=DX*9p;oJzRJYLnCVCW0IpBtmSq?Ot^xM*Lt_)VAf9%$$ zHLE)BU@9%uoTR~Ig`!6gC0WN2Ef3w2knsLV+CWK1w)`|j1B&*UlT{%J77;w6ow77T zEtM_P(plg$NT^+XJsTBc1){EYwOHGQ3K1ITp5t677aUqzP4DXO zsXIpaP8Z;SPRCtsC)3Tym%D1HiOi14txVbJ;tV|vAh~YHPMqnt#z(Y~W`FQ=fU zdi@?Q=^EIrZ0DzylqD)|WkFQDR=C+}Y>@m`f@ZtnH~lbn`E|vOd{IHZE9*d5VH~ka zy&=IoepGQoQIUSUA;Soj{7S1=%)y0Kx4bIvR%1Vs|8+wp_lv^Juoama`QrObm%iQ0#LcvlIv00Ph!?( zYoi_6g>a+k_*(!0ag^kQ-;P)B1(AO{WfBI7S$Q5?mE?mc2&32yJwGuTK@{7Cq1h&S zyVbVy2lKVZJf?x;@iLvZD{vH>K?o2U*;xnzC6YpV7^Q01T*skaj}o;hNj&96iU$0~ zXKpfbsN3gIBcdZ%$rPbliu$-#gesg+r?pe6q|!tgh%afA0s^yNs_cX+@;Ga83e)*U zr?AL9|e_cx~0Kg#hU76%)kxmr*%_Pq#DX0a$ ztmYGaLpBnoF=zV`$Vg}K@UcTDkZpOg)ppumoQwdS`?B4_wt?Rgqv^XLX#)XFAIJz~ z!&verrT|gChKGEiA3-f&r!+;tZzo@Q?fOf%mTui~Zr-|m{mobJIIrEhzLZQnkiC58 zCYs)&rq57xgye?f)#|^A0v2us2aJKgrOMZl=k=j1JxTa)P{kQUF;pW0lwubulCXe) z5DQ~3i(H*gb%wWl>aI>gx0J4i6vS;ppDTeIF$>&*rXk>tGxD=|B>83k?gscha2tsU zj)N2>g%{n}TU~0&=7NAJpo7YZ4h~OD=+Qv<+)-k>UhLGuqYKRP_N|0AdhZ=ohB`?r z{4L}h!bHDhh2SMxA&|__3U%VGgB8-^BuN!kDBqwL;_)L3ltgR!d5V@1VcB;m{U${$ z5^quJ$R-td4f9*`&Pup~^26)+B62`Q+TtX;5_lU3oz2xaZbwhg&enkuH&$l6R&!R~ z3+k(J6V_0Bn>E$>py}5A^F8wirj!qj7tM;+y|B@8#q8$X%>4O#KD9bz6FCitvhU2# z%{kINUt3(faOrYwaR&MYD}RL+K$Gb-f2DK4=;tpiUc9<^adDwHG>yvR{|c5L*I-@@ ztim1z<}NRupI@Bw{VOYTJO@3MwBMkKkSh9hO@xSXapCgiOYj}gL^@yl#A$yl0h+KX zn{ICivCUI~|vq?{e+pRexdS*F*1g z++liOU0850U%3bm1>~+^pFEa5kITm74`&Z(d0aNtE)DlB@7br0Y(}aNuo)jyLVZ+@ zZ8Nl>2ihJ?>*#ocO?aT!2gu|>3J1S(FBJ(S4lH)k?c}8-52a5K)FP~a6x-~;5QSQq zE&8GufkkhMk{BfmUQCLz7=vfg664|oa&lrqoWwKFwtWF7-&A7VSPdEipp5|spM(1} zk3hkxdZ&jtetu0pkG$R++50>PDN8i&kblbxJuJ0one12@A!H@kT< z)7gBtx@DwualUJGvq08HT!8&zbqzYyx|Y!Qs70|m(lu&oxAYzv_#@pM5oC|jCA3o< zD<17;K_EqByH+>XrTV3AAubDRudp+Y8H{zuxnDSZ-h+kYjCaS^PH@YK?x@IdUnhHG zD{H36qaQd!c=XAk@utw%Df9#szjku%^e)^mNM&}(Au-)8zpu(XYMc>yc17f3xEi}- zqPVA#+ZiMu>46RM(cwJ$0v<^g5LLi&(m##&$!OiLPz^XVH+{L%ihLZRWcc;1;I}DF z=Qyh5JHBi=b+_4cs}H5_hA&YuTT%>MMwYijZva%}ZImWO0^nqoZoo$i1Wqte0(sQ@KzGsO7xX*9_~*sp0`K)tn}grSjv})3H0~kn=dh_ zNX(T+%Uh4+RhrnN)U=R#jo5~UONDUks_zP$dRs`$R~g`@HsDRJKbZf#uuuDa+XLoB z>OJg1f$9mizmq%<=n$;u_1YG%$_=qr!pjVVB?(}Xqj4QVefD^7jcm^c?FPWKZDD&- z7fp^(9DBDU$!JnyqP5+4l_`ggeWzzh{Rv*lM6$WbEGUtpb*qGlf?@n3qzZPHp?QU(L6~Os#fY&W0De2GfJ21l=3KI2Ldq< z3>J3Ea3!vO1jLph1BX`kcWBjfq=U*@=k#EOM{5pz%p}qV0X+UN?61Evy|aj9vAn^7gqeLlu&nq1Ot&&UI75hY8BS6QwG5pMFb$D?wwgR3>$f zNt(evO<|u>)hx@XtRXE)c_%p^GVha+%YfcZx83$bk!0cx_!g?>fj=!FI~nF;WcTDK z5L+;hgCxUZb<_qZ_k3sree#d!K4SA&%fmd*s|L*D2^cn(TGEGzd9>Ytn8a8Vn3!~{ zF+|LsBTkiHM?RG3$9e17i}X*Z6A?|w3)m?SwsGL7=fkX8JmQZ^4L;-Z{;O}YEtD$L zO>`sgQS^0+$dvzOzA4he-$saN0Tw7xRL?$c>3P#CODY?P2hC4M;MI~=r%R~Q$_|;W zX+$h{I&X7CAE{S`??}-+`(?FHLIzfv!AGTo=?YR^AJCyncMMuPH4Axsv+jb6g z@js(h#K1wfV9)2l5P8kg%4G9?6J;I({|HpjMNI)A`XlHoq1GWS6cDpbk0;2J{_95X zsZ+grc8BT#B%lUx6Y5(ZYQLv2PNJI*>vt7VIIY}-uOCua+|grY#}Fgvy@W6-M$w{7 zf3zP<{}^GM{>VAOBV-`K=GNcGD%`JX7Z89Y_lJ<&EP}O@gFNTx9ueF1QP+fC!y6>` z2X}vfmiaBoEdf`0Ex&?R1%x7lrQ&G?>C(<NrB%F+sYgycFly;#D z0GdX6B_>LyxMXxtlE=6sJnqllQg{}CCNvA^4}x7LfQl|o5%fXIKwfceBrf4D8u!+F z2HjTUk2~tU{LWZBS_7PcBNOlmOjTK8m%@SjWFM$8^#aK)N^K*8fD)%MlMFm!#Ozd; zb~!qSs17!LdwNI3z9_UWZx?rns@Um2)bEz>l5NcSyZ?zS8 zev~pGou9wRKTEzsxE9*a|2K_7+Qd{kk2rOo;0-Vepu^0wfF*7tdLD^>ZJsUz;Ld3g zIda}vzu-jkjz8~s^Ou~+pPN}&?3@l{zi7enFFM}B#iK=wPLvj1oI6^C4pDT;@va+RK6wO~<==V6~g*=aWnj+ft`%6@Z&x4M>{?(n1?tgVhei;22d-XX`i%;Fq%V(ia<^?=%CEWO08TL@miCLH)abOurvrOVwmPm3op=8B!EeM14s~ z(QS$ZMRzHpqqbU3GRtu7uF4MH61@|^L5BNl`Bi$&L;zWFlEobXUE3!n^m5aU>I0|%7zRvqEj>`@%St;VJghf|902;<7%rid<{S+xQNK0{sPxD|Vii#;B3 z=q&{ZOayDtu@(CFco_LLw6%*2^}s8{T6R8tqsn%UD)M`xuGz&@z4hz$m7TYG!1##; z{}p@blOlVx`ZzJeZ>dUECyCe%sI(2bLZW@sjKr-{QA zy7VzaJBtH`j#@)aQ`71+p+il>^_Qkkk8ga3F(38 zeQ5W?CLYgHwe9aIvF0wE#tHxE1OEpz09HdbW5&sKG4}vKk(Wg(+}C_87?{6k9Ewr zC5-yX^pZPfE;;NG`~=6WDK|W7Jhn#Wagj~qBBShLiYZwSLiN6djoK%(ft3n$`oJF? z5Ki0wI+edb(f{Hn{~RfZ;4~icPbfM=(MyPuB5rf)_z)t3-u09Gy+FWx;_Fl5M%-=m zK6FVTszHWfLf4lA^D}{5(rrX)jV1-$N=cV5CnDA|Lq(c~ z{2n5E6b4;mN+(OK!~m&>cZ@yyA8GpG_s!n5!r6Stp+0^ax;X*7@SOe zO8dxrnF0921sYU6DO9)_(#r`i?o2GTDs9{oN*Cs~sIW461lJfDcERJi4W4%Sml!)4 zr6g{e`n@EXw#0=|&$3GL2ffH2q5e@MSmZB!dN%qV(UQO&eWe5lsEq>(cFr83$Z(4R zQA{)MjdH;|>X3jzMU4|J1poj5 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/resnet.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/resnet.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90a4bf304a5c09135bccd2ab648fd593efd70a3e GIT binary patch literal 17963 zcmcJ132Y=+dR|p^^~vsL^B9uDxq1$bre`V63`dd`MdwJOR=ZlRqGLzfmeSMHRV2IF z2j^9BhU}^KI--nSEHcW#0er#P7`wKEB*^K&h&N8M3F1Y9B#x5+3#8x##>P;<1aJ() zKmx(==KKCveUReJ+6K}@y?XWP-N*lb|53b}%VnPbWWD;G+lKMqjgemt@fUD;Pnm|{ z7>?OA%KC1WO?kE&v2sjeb|cYXib(U5l=YD=2UCCJT385Gu}Eg&zQ!mhLd(O9~w@^jWuR>W92!d zj5%4PWTTX$XfM~aTJz;&l0WZ^XaJ$-2Y{xh<5`K*WlWh zEyu*ln-15wQ;=TZvs~a z7xQ|*gTOa@Gcb0ohP7*$Mv!%kjg)uNN2hnq56mDh&x?cSoIF3*fA;NQEU+-V*uB_$ zR$x@(z40i`9;79D1wR>N0uwpoK`e*|ahZS=^5bfxam9o2UCT5ipT&HrGvTD}C6Ff* zD+01RSr;1qo|re zfS%Z%+guG((zUkhg$Z;KcgYiuZMdrKHY*-Zf)r5aq*c$~K}*Ryb$`85+YHCpi=~RU z*~IBf$ziCiVmjmyFcx!a?J%}hyBX%z+;*kfKT}~`2B&XJJY1DZty%TFO66}FpS*f$ zz0-1+5VqHLwyNz*wH>GGUGlIPms%~y_1zlhO2;o@_-@m?bhBF9xY@x7E-BY*V+}WV z)I2)PmGCYjFvjy{fq&MtIc-_y{=bZwN6NIBxAJB}{<>4cGql)Wsu~Xyf4uXYyoAf+ zdih3xzlOPMVPBampb^P6*T;yJI*!*RQ$2#4dVCN(!Ppf9FW~Y>WDKiY7->_c=ugq0 zdJ4Tu4Py2n z76(m8tIwfCT*@MMVEu?8b@WTd^o%9cNwg$WqRui_WWY|#?km|cgQ2;NUF(oG`#WK* zv*jO{(=Z95f2XRP&!VKq-ENq+l`-+hP_7j2-N});j*67Vcq)11a4IvE=x8CHtyFGp zRh!Y9w7b1oZF_RyoqElejTWXhRR@RL-_b*mjz5jo!(6nvYi_eysSLY|Wx9&@N_GNC z!Xz%Qh#+O!@%{VLGE=f+3n#L6))oR%L&m{xz+jq4+Q(o(e?c$FUK04T!C;c0ohDN7 z7cm$D1I=Ir5JU_n=j1hm0kau*3KCB{6VBv^U^BvO#;|fzU^X4!Z-RB!Huku4gnA0K zTu}u)FDr5ujF#gYa2ry`0k?S}_9c1wj6W+}5$h3g#W`?A zot7A+9R*jEG}JghZD7UvxsUZG{NoY7L>rAG1P8(iPvGe!t~t#uom6kq;kZsAt#IE| z-5}d20zjm8t=^=>#~APD$Qt)e@9V+jeM>E)L>5oqk(R)51GY5dpAN8N=8PbY9wm+e zhJaV6oa{Zea;E)ZKev-WPRmI$$Fwnr-m|oD_I~cOI`1%FKaXS2qJD;hn$JBjZ$k1S z9OQkZy%}V<{`W2KUEq+M;1rHK@7Ngk6nb(B?{53&d(+N%??^l4KZ;nPcf`MdaZUwC zINO8%PRPubf@!?Ni2CWl6k43r<3<{1{b+2%0P2D5%xESR-7EB0E`AlPh zuvu`*V}er_kz?|HOr7tS?#=j@Rt;w+n5kp0|FrcH_wZ%pnntc?{40UA3JfNxOAcx1 zT=&<9C$V^wptcCYR(#X-R=^N5#pw6iYEc+@vF;VmM2PUrLeXD$kxpdun182J6j;kF zOuxE)wMhQ1Qx^tR)mMvkz$esGtFErPKctq`y7K(umF+7dbq%{Ev;r`O6|F=FOih7G zjH0iYy1*1cGNd5k8G<0Q?ukBNDYmxIt($If z1^7W`?B$nM!U;#Dhw9CyTUk?`txa!_^lR@J0`Q_?HwM*~5{IR#<$jC6g;FZQg=&s3 z&a*m8#;(YmOIOuVre9+~0HfGon5lbpk#%aWdY-8)rygZ6Ky|9b$BPU;!(f5IV+d2sc4!$vIVqd5vsoV3BdFBYt8GAVubjmaK~T`}ybl=!Mhb4=7Ws>~yx&4# zOy$k2Im16oquBjn2FO+}prC}AVjiMlAd9&XL~J8Z-n5wmwGlOI?4c2fZB8P87WwCp zBa3(e`3uo~(wamX@C$0TO?!-G61Z$!8C?B3^j)KF#@Pa%h`ueLGjlq|6kzZHwD_}N za1Dun2>$+k&YmD&;_qR5|6lO;A3mh9r9U;;{~~I80NDRgy!yWnY3%Agun+j9#r|gy z5uF3{5+9~|*4F^~!II2|sm}Y*Re?D{#OcM_DTzTCK#o4#CliXT3`RkrnJ1Ps+%2X3!x>Ujw4*Pj~cz?;JqE?+V{SZ+ckPA)P^1b zGp2T9Xk%tjd$RwAZ6ltSHW?d^YaAFXiC>PHxViou^l5GEj?fuxu9 zeJQ$MEssTHu2uCmkV#W*n@{`pGf7;S2l<0ut`Z4pe~dJb`*bMRk;-K$*^zP}o-vE& zIY{~$&?3rwl=u2~q*y7ddvWBXM-?wV1IScude1HHZxA4YOf}3Y_tsV&>IL9|suNn! zfMjaxka{*dWMX77k&voqp@syR1rhoB;}WV(x$uv0iR!?xvKK*T(JPTkhZLwUm1fnS zXH^`tdX@pXt%d=D5MJOTRS9~XWMSBXdXa(f3I=`VAw3|zU`CJY_7HjVD@L3WCuB~R zfT||5y44o_OSQ=X4t{nN^9VKu5-60CP+&s63HdTc85FXkNT9gKMTSgZ4`MgcIki!>-NvyGX?EW)`UNn!k+)i?@qJhN*-L`b7XqcR_< zBn9s7anzXYxI-1KqPhuBF{j=d&eAd#)WM5AocAA;j%nN|6Dhmm7+!^0p1(ep2*>K} z!NZ|gm4GQszfo0Cf%s5VzR%~x+iu(Is4#KIt*@>7f}XmGD~ zE!SILg9w4wo11Ri32mz6!c=w3@5rG^eW|MI#MDrQ`atcWeY@`73DdPsM?sZY-7KXv z;%cE~MPap0FpvwPw&6!~e4 zL0rJi_Yy9T_}?*mCfK2|D%!q}Xr8cEy0`9*%xrI=cz1N#@jRkWefI9i172x7{p?+R zM?$1ke)et@K~i78{OsL+tOo^S0`#GUxrfS%TBu3|as-u(q3I0|y=a%h46v@MR+PJ` zUgPx0VTD?(d+N|>80H?JgbXU?sg#MhkBCq9jk5t3X8;%Q6p=lGQST|U&A z_u@UU?uhw<34`HUN6EUWmZHKxu^Y!30t<(Ngd#Rn_&7fh-B&HJ(^QZGJB>m3W=?xh z6NG+U)2z`;2Wc!rdPS|HUqflJJKnB0*6%DdHXeJfUEf&0BebRd2~LaJKu(F3#zS*C zOb~uAIck#?P(IUA)e**s^RZjkt2l>UCg?-qLLS>{sk?k@`JFJ~xVP&yXve`Im={D% z7Q@=ElyeccgJ@fCyP??*&01)#hvuDfPK%k9O`bWgJb`UM;RaTyiY}z8Y;!=wwYHGd zeT55|MZ_>qf_t1X=fU&3#}8f5J}0q<)I%mif>QDze-Zle%lAFib5z?!a znV(L@g_hUwIQR`%!+9hPSVKZs1NMNh2GB&Z24M+ttY$Jye&L#kcA6uQ6Yxj#2QMB< zZyPNi;0UDFM5(0zr1^1C@g+#%L93zmb4*RQ9cvyt$4@p=xYBr^qRkSl3aU}i==eRR zLp8W-;ywmi4XfNPO-b278SG8$N*jtpC{|Ja!DVtIWx%rzX@{JHz0Plc6K7z27n=I_ zf2x=93qb~D)4LWR;044c8k0e$F@=7ncyCNQ=1~Kxwipa_8K?s_SlpDHGm-)hWE!`^ zs3ve66%O7`5iaj5d1LVIh=W#Xc0-%2Y1WBqx(*;srX2xKVoqUD%K%E#d>9;?h*fCy z2~1(4@>oL_{C@xWwZ~pM{=dHevMGmIwbgFZ zp4@V)?b6smz9b+_x;In@u%qQ{s}9p}B)`jKgl6k-UiD=j9inCOsy?4e0lC_cu5piz z-t)0Ma-9UGrHoyWt6PXp$&huCW%0)o!uU(LycZB)UugplDRv}!Ar^@!hzS*k`uZ8f zaZV%#Q3DiKiyBs>mWwPkMbxQ9f&I%>is`8mQGimwVbtr#IC4$BG;W{4c)R2M(ew$H zHA1EmhmxruMxnT=-eqH?LZDEODA{It_KOzXLrfGjSMbNhWn53;d1A?aT|M)@xqaDB5CiX8FGGRrR-pD~E8mY=N#6tS1;zLVROyuJ5jU{ERk%pBU{%pEc! zl>60#<*vh}0Xu&3BT}#ly`Pt{VdV(RVHWO>wKhumgG$!nIX{Q@4_CJi)qZZ&aE_=<*?+Rkrj>pjU5WOJcdr&;}9 zVL&#j^|)Jzg>eD&>Mt;rN2D~ZevWZ6ZP7G`329IJ2z7Z7zOw&>yEPArJ@ULiZXKUi-$E~I>}vnV&BmYi z9RveV1eYLKXp5^B!qXC-K^S+=N_bAfM^-H-^Ye)^?1j#G2$PRG7vKz3a&m5xu0S|w z7Q&o!5!Yv&+$v-h`LkiWoj}_7>a;*FIBx-ZrQ9?vQDWWA(0vOqY)ss?z{#8iaoI8f zQ;$0*@Fwp(CS^}Lr%-lW;93DkX)#Q^DQ2eXuYk_#XtD2;v6vBq(Ti%$dwO6xa@J2SM?X4Ef z5!E%<>(_&ZQ5-TA6Bmk_$2WJ1j}HrJISVB;lBl^qiY~m5J^(nw_T?4P3!1#Nu}XG4 zliX@;UDrFTsxOQkQrXLqN6>?cQa-G=A-1va2Yd3xEq`;%*K=@Zz20xw8@8SMLHif)ExnZJ@l8y>PVk4GK1V275FF>FHm;IEb<1Aj;h+>oCyk~Bqq(`X|=v4+ey#Uuw|)7 zx^&@Yr_)45*VkRn7Hi=GFoP}xQ);I;=sk8Plq}7ip)r+%kbb+x2zheD`Pd*FmvrQI zRh1TDr}W*Yi?6^=ivD%lMZv#FRjqx?zzM6PI11I_B#E8fZte_w9(i#U`~7|gt21Ep(4Li_9rjS~!f2PVc^+)Y~K_nqP6+n>-5Z zZVOv$on5bb#l91f?uf>j11_zI`9zDQxU5F>o3D3||mB z2&P;Sx0kn}l%d{8GC(?2c7Hewq|7+nB20TQ8B6ki64w|zer zC^|Rd>Karcu?^E(_EYeQp@5ne!8Zd~1g{n!-X{=Yvq5~9gB^;GaRsle;Da>EC_N>dp0z;<+y% ze-zB|sd3i9yllkK{?}NljhSd1>CIvu=U^3@#~kv045@RBA>ZFee3o#$2k)T9u_*Ps zh|fzZK)aI&=BSeH9rw>6ot`+;Oy2dewQ^71|HG^5p2L%9s)D9KHMDGN0 zqDq{s{3 zsto)oF0=EOm~-?E-Cid@P1^)ihgn@k1gHtI!vfUbK)0UnMjqs021}ult{0|V3>nVp z(8>!!{}$U6Ku=Pl1i|MknteokHB7C#RaoEZt_Z*KV^|@WmG&Mc-MF+`5~!$0NynJU1J`SP3ax?sJBOmHOilHX-ljD7C^-ls-k#y(Sp;GF3)Ss6fXkK~XaTF+n}lfE zN}>&>k29wPPcZh+5ZuRfd2@&uverYXE`#;w%64b_D|+C-4kjETkP*iv6Ha3ZCXcEZ za%)Vy{+?oc0dY>Zxd!FU`^S6c_6#gbwy1tHJ#hEHs>E+yzHZz)bKThfEEGM=jZcEW z3s~auUJTzv7>-S23f0GAnUZfc4y~z+n%<6-x9}PuP+1l-KcvQKCFlPNUWGQHs;E-K z%n(5fj!}ON8T95PRqk88f11VOl2?6`IS$P3zd+hIIJ?t`7_`jebYVszhttOhGkc^z zv-=3yzzD^8rJ+IQ^^Xz3MmBwnT%DtH3VbvyDBF+*r4Z)to(*-=_wa=8R@5JCzku0` z%j|OI4488hjU>vx0ksw84=S%GB{hxo42@6fNq-FLG_Cf2Qur&sfk~^swS9R|*SV;! z9P)nv421(FF_dhK16@%0{(+P&s86;(J1Dm_Qto?f2c~76``glsN!0RfNuTPcUpL^d zIt?WpzP^z|mRwn;9gw`Q=0k7NSp$pKoclUvN0zu-+*GbWZ+zU+_jIh%>_~`Dm&ei( ztr(9XEljc!aNaPD&lg^o3d3=jc&X7IxH|kKE0>eT&8@gB3hRpF_9{IkDT$~U6NPhJ z>e|5@FsXwQw-GuUz5Z5*ELIE4sLpnSj zGEE;K=vMGGN#>w8&%Z`j)OQ&Ca|TZ`5GF1_AntGBdms3t(gG>YC@{ot`JO(NzrnS4ID=%s`h4?G+U8^elSs7ZcY^M-scbu1sXjh3=#(v+G|rB z%x$&2O@OWspooVOMtVucAv4`CvFl_*FmQWK3&WIMjbM{Yv(ZxBQ_ zh|s2@K=#e2LBoGRzW>DGYBK~$5Qd0tGGL?95OyQk(}4h~WT^)Nq*BK_lpf{rPmuac zEXAVzyuZwZzcG?3ec0b5@*dtZl6L({3P_hWr1E9_O16ZURrBrA!~vs9n1nY$r-tuT za;)SUgTKe%c?Lx2>IDX55yCF@I((0h+&_x5`Y?;}FXDZYv(BnhvvzC<$FiUL=w;|K zN;ceoDy(GfR6do<%%^gS^}#h}?28Hujn6wl32WSl{1JEXlGFR!h-jw;oK)M#R}DTW zyMj#Ci?=VzGb#6z;2kzjK=zBb@g0d{0RZ7-lJbJ*#<@XZ)QG{6Aq{$Os;2gB z!kY{^3|feTc0-~=jb9TPEQj;XFP_Mg-hlK-K*cX8)9SlU%7-l6$TtcwxtdOfvfeB{ z&Yd*|=`ce}S{BAQk?C}8Fg)k(#c`-?(24v?n4!YR6?c$sX$1S^xwG@pp9q>BR-!=p_4M^{0QfU6Gb_x=p%7{3Ms#e+%8q#XWNty^s0Qr|_U6`?Bl z{MK5$9GV-UdHVpF`6TlUb#1jyYxBAP&5eKy?R3u`;4(vZykY(@p-s8!XHiD|T?RY> zp}E-2Eka9odudT~0_=!%rv;S~m2BAL?=zRp7KGO+ZU))XVfM`}n3G$Y^C-d@iEXz*c(ll(Lxh+(4H6ZP--Qh3=a zWB--G?=m<*ivJ_h{u?K!h=`G(`(gs@ECaXeEa1{4o+n{)$XJ(=s+hY*z{%ePE>bkv z^xs4sU`6!HF#wOSiSdadeAZw&{6oY67R7G(F%~riZ-`is_T@WtY}@C8bR*eI<2a-? z5}h{yM-$xk8wn^u?!zkwZwQTg8Cc&kI0*3H!L}cZp71#pKIk2Tt1Z53-XGH8mQ3#9 zIbhMtJYz)d!0rfjOCrb)+Tq8&eBx(YWBAL>(qZ=@ZT+xZXfHcx5689#by$!ew1D~# zjzhOL9F6Y9gL9#U{4>U_B2;RL4Xl9rWsr+3JxsvsiaQHPx5bwFFZ~?UK6$_d0RwIJ zB&B=8ynyjV8>C?)gO9ZmCO9R$G(b8~g1(K+2_l5mL%0iI`+@molz0~xK*fY!MW_T} zYu03J&ghL}j}(G9>U%jT;1lW$NYfZRO#oeVAA)hbyCLtg(K{&X^gAbqoCU0XAt)f% zHv$XckHg;tX&;#FlYqo=xG-DlrvP$cYvvdLWOF*^(Ay{9{k7L|Hb?0dl(to>`SESD z*3j-ZA8D*xyzq4AX5;A_5olf1fc3^gv9l@4eCp$Mnka!gYodq-M((J6>SFjUdN3J@ z)r95)XsB!T)w=7z9Z$hNRO~yw7Df9F`vla{kqTQCaoD*By(}FUPxx`T5fKD$!v)+Lk92y;XWWrbh zCALT{l2|Kj$;cpw{UZ}AU*CD|rnGMH>gN30S z1U?Px1odUUOW(Wxc=_dTeRU_ag=qgTawt-5Z*pAA-cLG_Ex3k ztsSK2{}WaG5$XB6h!_bQSd$8+JVX&HmGb6A^Bj~#d2`Xcfcv6(5fonI9SAS@sD>Zd z*s(MCP)C25L-iJg8|~i8yje@Jm6El*Qi?6Ftd!2H|A1%B`cmTGzu0bTH>w%^xaHTI z-r{gigmFnx!kb(aF3@?laI u^IV8QF^E+v#3o;x{EG2qsQS)LUL7imbgci;RCW`q^cCZstPPRs*#849pDx${ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/backbones/__pycache__/resnext.cpython-36.pyc b/CDARTS_detection/mmdet/models/backbones/__pycache__/resnext.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ee91e6cdab60c61de08af51135a8048e854f7ad GIT binary patch literal 5428 zcmb7IOLODK5e7gI1i^=>hxg?d%9XgpM3yYccDc3^+pFF6BXQ)4H}<+1J6MP_lpuis zJpfk^gH@@>6`!=_V-CqJm;8fVlB0jboLrTYF8KjIr1JF)ACkS!gDf;>8Z$jTJw4rD zbFuJ;?AM!(rsav~+5A7MOX@XfK^ZQ)9brN$(EtrJ{d3 zO&-d{U!-gphdF06=DCT1ZBu$%X`aVC;oSrDocDRQ&GU0hzpQTEy8ZL5_qOlegT&U( zFp62wg~T8ZkGYs|60D;+U0QwevB!<<88o=jF_{6ImA^20CRyt9YNzytVH+JAdOMWST}%V&;J*o-d63(#T1!<@SD= z+J()mCygcJ)GSI%#>mMXa4UragO<)pA6tbHxTDIHYfrh=kvry+6l338GFW-~wl=Ef zb#NPanj{Usb9BsSkRP|e}cKpIkiUKxL2j#WK7-2 z{#kfmnqq0Zii0(1KUeTvRZj`4d1qD3xRyKur@?9m?w4k^oL^`4Q}cJE5v>9z!YZ@_ z>NQobhTa0kewg1F)!E#rk+}JjXthR-{Hgt?3#X_R4cafpO;=>|YH!aJb-e$MHbo8k zENFc&i*{cvT-yC~2QxdG7`u1|>A|z(v9+SAEL9^HKKa|sCH5>WM6saNBOGf zk3aA9{476?xgTZzbA#hN72Un(R{VUAgD)b+U&s$rUwJAGA^zskrceH>xhqpqQO}QZ zC8rzqdi=jg=|v*T{q>{u3$kWvX(xUIK8yQQ8Kha>i;hGE{qqvYo9&XU9dMEGILM+C zE*-6`w0EK~lMB5tj(5WDL69cF_)WDxxxTn3bC4lgAdnq07A3pVWf3lSlqS;I73px0 ziFv%=K6zSQ)oyRs?+>By4)?dygezm-xxFo}kdS$%!bG?e^FU8NUjYC7NszWZTTLv1 zA)X*^<#AjkOnmJz4I>X(|N8 zTou^kBe1 z8JEk2!zka=9SW2I>n74wJZhB`+2y%Zf;Ps2ltHADwjrChvWyElhr2>xClo^k9l5j; z9L;Sgu)!wt(_SxwG&Oaz-IR95`7$$4o zV-{v>8T@vw-)B7MUD}auufbN3u}1#616Rt{1kVzFl&=ns1$hib>+JU+M)`{6nyz)t zvdkq@KTn};<7?q-o1WPOb?|MPEj+}dH+l@U>(&zZP1B`u7R@BCGagaL@8$89lBwl* zc;}fXi?c|wI&nL=CS@2wS*2YjcO>V6J9@#`7tpQ;!KcG8)^F6-WMgc$E|23N5U-I8 zB5hOrl-jQop|n-UlhNV3mdUxxPvsOAnJQ><+{aXgt80A%0A+OS|2jcKvbkW-`RvH2fVRmuHKgfrps(9KCuGM0)r5r$e6PQ5dUu zqC=7s4hB47vMj=6m$&C-S*51gs?t%6Oa*7PoBwrn@fI!Xs7#efGn)C9aw^&EhX*{s zRns8rHyE9L1Y$I8^NP9@bP-&29USvYX>!RX7v%DxXtQ43$&qVvu82)kEKqT%_*4Yu^M2WqhMYI?$DBzaT*UzT$OR#pTv_@ zPi5p=h;uimxtZ}nzUMDTN#4Hc-%*{k=c~YzulPNY_WjS+UtRGxUs~~BesRTLe{mh; z<&DuSB`3o^0(Q8|vxyw6obVhtftV{k1ykhRWB;YuB%KW>6W*f4>Q5F%#GC3ji_w#@ zTZr1e3e%6BptCwu(SFwDzsBA^!RGEM)RNI80xKG3XaEEj54(F>;n~`g#Qzv!cgC22 zq&;qGI?R!WS=1$?d|Q+EhWTKaYvUg7MU#cI*~%JuEn_cG{J}W3J@A3k9w-R)VAUsl z1{>$;_oK6^5ed@$C#2@5u!uN}VA$`$cb7!X%=djL4SsXN31MCIihH^Tje(W)2!9!V z;SWL)_HiIboV8c|7uNkg2l%+OWRjA-I6na*^TXs=&q4ka7pyjcHU2UTPyS3lNUxzd zCOaW%!=S@%4grjMy=YhYrnb$@`brLe7{>ncPMXFLbbpVNg-8k+od)Ph0KTcOhEno| zt0URP$1~_lmREXnUksCkw1-i0I0fxdDe^EB*!6ar^PB!%z+>o_CjM5K!!Ne6&?P&N z<`Jf2QWPd=q)B`{(|jDlr+R;Y6YcJy_#C9T1>`?4q%$7r_MXiSY-Vh62yf`2?lE?N!6dx%z3@v11Nt-@Xe zLK%7a9QafMmw_!bPSB;F4IUMSstm^O65$V^mgcC2k(jgp#1d&&W;V)Bo0YGkY#&jX zeH93$Mg=0;&i=JRBaK(?Q~&y@ff9R+H3)jCQmgI#|7rN7q9V{|BW33H4RM2{-6KNS z_Chu%m@YjWwNA*Hta79nE>Jkgx(X?Qb^=)$2YXqahHBZ=BPO+l#xNI|fHMk}s5~sT zY3;|M7eXx3p+9-TTrAV5cZt&s^mQJ`wsu3jMDM7e5QL`0D@1+?g31Yr@jIx6FqK}P zDiK+o?o)coKngEUP_Kf)3i^_b{*EUjmSHXswzml1F9C3Ppi3rU_KVwYKmH&Dg|98Ik zd`8acpR|i&!YciQ`L@*n{O=uxJvRc316g&-V1p zR`=NJWk!OGAv|RH1s3n&fma?7{{iuWD3B21A=E6PupC`)f@Pr2U)#}gHB zD$?#PPHlfl@O9Ry9^x#l+i|r0gIl+BZ7+NnOBKXvoa;t3NV7al^P*ed3FCg8ZWX;+ zI#B&EF`W+A3&|i9L9{KSo#7x(^Fs^Ef?v7z_B-!i`(F3PdPEx?{hG5zb`1^5==#1T zPKndPd1^rg=RRheTr@=UNyRUT^P(kAJmJ10+G6nugIbovNqoy6vl8Y=g+F%`)h@2wwRgll{;&=J2$SYcmz7@PQIXeUC zoaKDXrdH1yTVs1%8asj?lsFr6(cE#fOS%0KA7e&P8nd3$bMtcUQESSlJg@YsWFI{4 zfjzY#_21^yge}UL=aHQboG+YL7gbR^;9odi!zyL$RsSD*J-ddBQ#V8zw25;q;BAfh zSvIBHoXFdB*0C!ZXPI!$u(PbUI4(~uj4cTU#V>R_wr)O(^X(uBvpg6M28pgjGERd2 zKuBF1j0ip=j%0R7b})uf3sCGKdJNLF9jQ{81X;WK4JRYPQz*sp>he;U84libybuD$8W}hL+liJBfL#b5N$OZ{NEj%yC$ZOSkoDVgaiPC}O%B~sfsp0*R2YAa23%RLX$_wvdZ@sYFLD>H5t++JQA+rs$joL>k3O5Ok;zI*^L-o0|4rC#0??v#JT z_g*IN>A7Z)3(xKGG;cwwEIcFi^ghGttapOmUYq>bcv7W%vtt$bk8cNO?*jWU~v)F|7NUE=4b)X4b5#}meGQVAHTgHI8WJU|*XX+codYK62FN54J$5&C`AqMZO*J-XHSBO)m zWE4D-@z!>p5hq&)VZ6#cV8-JuAgYR&p0?##)Nfh)i!&@eD?Nu3U4dQ$r{AKn1pw`> z0RpUtkPVnz`oNmllM+Hb`V)?z&wAF5t6F2gjd%b+>pt6i)5w&J%&Ub=js#H12@WV) zBrWVyNYXo&y8ev(_4~A68Gv<6z{U3shQwh_xtaT$Vq~2Gq~NU=KykYWdg{wKw4yv^ zAfimntLlnOM}4WnTpH^c@7HcP9LiJ}Luh_emy}FK#u)1BSU@}ZXo#rb6c*uox`b$x zX&!6G1g94bQUwp7X;Mb^b&;b;#Y1Wf zewz4BZuclBTaclw$dQAciI-k1=D#q`k%P@RjO2_xGIBAG_QPKMbv7yOUNmMyeTBXL zOf!%6pv;XOst14rXW+uLzosl%?-o{~v}a7E>eM0ybS!nA+LYEzjiPGQZP5@nT+E;- zU~(oRzN5kvk%26yzDeZRG2f%^OCY+sE_X*V&EqiXRE%By3XF<0N%_Yua*khrCi5cC z0Xnm6VS@8}YW!G|v(L9Jl$L%bPf8A3tvw1ET{5xww9V8ik-LR|9GDnDoK zpFN87|B(qh)j|VCLk%pes8}o%W>jOk&$l{_|3}8Gcr5sQ_fyL#*eD+D6SOA^c-C=l zZm1tp&nL1-EZ*%@v=;zPtm7%DeOQv>q08USR3zO$X@MnMc<6r*zkMZBW8yzi1UwihN0DN2xPjMW02n+1I z1V3IRnR6d6Q?v>Y+iCUIv9oTm@7$v#FhD*hNElZzPTqSh!%t9h+4 z4^4F4d2ZN1XUuzy3Jkb9%8MyS7_vy42d41Ag-fT|gz|%fzhroRVG-Rdp;(3-asguZ z*jhr-ah{?*R)E%>?s4=sMeL50Lnwkgd=r1Ba%~R>kC5+2eH3}SDmT|t66Zmb-y4Rh z2tt&CIr1zTN80FO+%NA1#S$f|5|7bUz=}}#bG#WPY6jQ5h%h}<#w<)DPOW6fCxEz0ceBeA5(Xpu8(wYz)P6g!<&&hJ*!zZChwe7S4oCIt36}kl{7UW%wIg} zu56A{Q*nlgt{%75`$nkWj~=c>5Teqss4Hj-Qws>-3Re*sTe2*< zB;C{=eN(qUF9Ymu4=`Yluy@!K@OD=P_5{7!A?3zt(RC7d#6upQ!}G(By1umJfBC8x ze$qtfZ*=A7axWtx$)$&+vgmtJI@q=(!Z%LoNR$ ztVH`QtZUzK$~dF9;z;gAjv0o>gXTi}2AcQ{h(rlic!sBrB+8Mc8R3_dGj+3d%R9i| zq*yt9kKa@d#cK%W8q%dC@yxmzeu`yTxk+V)W|)@NP*SDN4D*z_)O&_t#-E{EFgJB& zO(9wajW1+fp=3$9im1}e!3Z?M$bwf6_)^)|zL2409SS~7TKjphV1uK4l+hs0f~d!$ z{xFX-863qa3x1h$43168gPe|1){YEk9ur;T|2=5pGLRT4^kDU8GR0E@J24T)&&eA4 zl{_-`$;&bb?f-#VB~98_HSTNtn49QX@@j9hmk-z$(5yQih1q5_rlHsr!(40*29!w_ z$vDr7{wC~(rDF3ijQWRp#>D1G#;Irz$FKYy<6o2yhPxNc)(8*}O+HKWkaiZ{wRh4y z3RAJ$UdU^&aI)3VuFQGV(~eoC{aC~q1S^XeH|n%wOz8UeqfEvFcAN8@YwsxMgHXbf zSOD#_v9JN>QXI#!r`=exfzah%C_*W@ZrTkkQow7Vx#elsx~3~8`dK(&+7pr+b6%Qv zyNqjp;RH8HSj3Q5&E^Wl3n7yvaR~_WeBxsduV9}LTqmn|1^5-x!2DyFJ+10xVCv9p zRlfj-yJ+e_g`pW#tHKF}uXlob>0e z^1GB2b=`-x{6v+ZvZ2CfWLkwvtwN>yrqZYWoU5q{b(Q~|K;^q{(j#XLP5?&lmr$On z?x49H*w3^Enro+a>T?e4C?DSX1@xws!tTP>|4D(p+NnJ_ms4{hm%v_O0rbD^$2%;% zR|+gF?Vv}20)*(7A&S2%C#kEliN&K}yaM*hg*`KJAI2Ocm-;?P{)3TuWrJ3drde+# z0ZSl9KxZX`JU_vTAEDhxa``m|LC-7D=pvdnpc0zOIU#&qHIq86CQa%m=M?14ypWLw z%M$)a_ScOsP_`VnzYUD)M~Mqb^{` z(&W!Oa3IeDm&}7Om}8S4!1Fclif!97TybWCE8F>Dvi;C-an^0$u_x)_MvxB;ri5v0 zJ1E8i&<9e1?6H8EbA77GXd5sg!Y*qE-yQ{7E(0-SQG66Lx)BVyh0IZAQ7Sl&(=<3_ z!ALOLwmP&M<=Nw{$;C7rGk&9ewQczh-D)4RxZ9JuG9S2R5qJNT-#YDGpN#MK3@S(Y za14&vZ4N^YFd-Qa3|vq^?2X{KmkSn90H;iVgTf%Yamiv!5ORa-K?sQdUxEwxm`k@O zS4J6o0uD>C0qAB_ihRT)b|xL$px2*}jbQS{AQpg{T_bAEzgt#gzx9EIG3^+q_=f;I zykUreAWgDNSMR>|5P&z7AwO?s%A5_;Fk;#f@r0S^4#yqsg~K7ssIK0A5;2=xZs3fc zGfPT>0Wv3WxoPJ(_LJdkZi%K*>Kc(#?gx9gF7G{5b1az}Dv$xZQ!6#z$lA zSg>|&HZ-BvV6WFY5%jj8w`F_Rb$uR{LSY#ytTPbZw-B70dneX 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + +tup_pair = _ntuple(2) + +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = tup_pair(kernel_size) + self.stride = tup_pair(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = tup_pair(padding_val) + self.dilation = tup_pair(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + +def resolve_bn_args(kwargs): + bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + channels *= multiplier + return make_divisible(channels, divisor, channel_min) + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + +def make_divisible(v, divisor=8, min_value=None): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + norm_kwargs = norm_kwargs or {} + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': + # no expansion in this block, use depthwise, before SE + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + residual = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + if self.se is not None: + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + x += residual + return x + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE and CondConv routing""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs, **norm_kwargs) + + def feature_info(self, location): + if location == 'expansion': + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + x += residual + + return x + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + +def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m + +def resolve_se_args(kwargs, in_chs, act_layer=None): + se_kwargs = kwargs.copy() if kwargs is not None else {} + # fill in args that aren't specified with the defaults + for k, v in _SE_ARGS_DEFAULT.items(): + se_kwargs.setdefault(k, v) + # some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch + if not se_kwargs.pop('reduce_mid'): + se_kwargs['reduced_base_chs'] = in_chs + # act_layer override, if it remains None, the containing block's act_layer will be used + if se_kwargs['act_layer'] is None: + assert act_layer is not None + se_kwargs['act_layer'] = act_layer + return se_kwargs + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + +_SE_ARGS_DEFAULT = dict( + gate_fn=sigmoid, + act_layer=None, + reduce_mid=False, + divisor=1) + +def _decode_block_str(block_str): + """ Decode block definition string + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = nn.ReLU + elif v == 'r6': + value = nn.ReLU6 + elif v == 'hs': + value = HardSwish + elif v == 'sw': + value = Swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + fake_in_chs=fake_in_chs, + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + +class SqueezeExcite(nn.Module): + def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, + act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_): + super(SqueezeExcite, self).__init__() + self.gate_fn = gate_fn + reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor) + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) + self.act1 = act_layer(inplace=True) + self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) + + def forward(self, x): + x_se = self.avg_pool(x) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + x = x * self.gate_fn(x_se) + return x + +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + +class ConvBnAct(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(ConvBnAct, self).__init__() + norm_kwargs = norm_kwargs or {} + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion' or location == 'depthwise': + # no expansion or depthwise this block, use act after conv + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + return x + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + +def modify_block_args(block_args, kernel_size, exp_ratio): + # kernel_size: 3,5,7 + # exp_ratio: 4,6 + block_type = block_args['block_type'] + # each type of block has different valid arguments, fill accordingly + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) + return arch_args + + +class ChildNetBuilder: + """ Build Trunk Blocks + """ + def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + output_stride=32, pad_type='', act_layer=None, se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', + verbose=False): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + + # state updated during build, consumed by model + self.in_chs = None + self.features = OrderedDict() + + def _round_channels(self, chs): + return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input filters + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) + if ba.get('num_experts', 0) > 0: + block = CondConvResidual(**ba) + else: + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba))) + block = EdgeResidual(**ba) + elif bt == 'cn': + if self.verbose: + logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + logging.info('Building model trunk with %d stages...' % len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + logging.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + extract_features = '' # No features extracted + if self.verbose: + logging.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + do_extract = False + if self.feature_location == 'pre_pwl': + if last_block: + next_stage_idx = stage_idx + 1 + if next_stage_idx >= len(model_block_args): + do_extract = True + else: + do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 + elif self.feature_location == 'post_exp': + if block_args['stride'] > 1 or (last_stack and last_block) : + do_extract = True + if do_extract: + extract_features = self.feature_location + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_module = block.feature_module(extract_features) + if feature_module: + feature_module = 'blocks.{}.{}.'.format(stage_idx, block_idx) + feature_module + feature_channels = block.feature_channels(extract_features) + self.features[feature_idx] = dict( + name=feature_module, + num_chs=feature_channels + ) + feature_idx += 1 + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + +def _init_weight_goog(m, n='', fix_group_fanout=True, last_bn=None): + """ Weight initialization as per Tensorflow official implementations. + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + if n in last_bn: + m.weight.data.zero_() + m.bias.data.zero_() + else: + m.weight.data.fill_(1.0) + m.bias.data.zero_() + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def efficientnet_init_weights(model: nn.Module, init_fn=None, zero_gamma=False): + last_bn = [] + if zero_gamma: + prev_n = '' + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d): + if ''.join(prev_n.split('.')[:-1]) != ''.join(n.split('.')[:-1]): + last_bn.append(prev_n) + prev_n = n + last_bn.append(prev_n) + + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n, last_bn=last_bn) diff --git a/CDARTS_detection/mmdet/models/backbones/detnas.py b/CDARTS_detection/mmdet/models/backbones/detnas.py new file mode 100644 index 0000000..d2eab8e --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/detnas.py @@ -0,0 +1,342 @@ +import logging + +import torch +import torch.nn as nn +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcv.cnn import constant_init, kaiming_init +from .utils import load_checkpoint + +from ..registry import BACKBONES + +norm_cfg = { + 'BN': nn.BatchNorm2d, + 'SyncBN': nn.SyncBatchNorm, + 'GN': nn.GroupNorm, +} +_norm = 'SyncBN' +norm_layer = norm_cfg[_norm] + + +blocks_key = [ + 'shufflenet_3x3', + 'shufflenet_5x5', + 'shufflenet_7x7', + 'xception_3x3', +] + + +Blocks = { + 'shufflenet_3x3': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: conv1x1_dwconv_conv1x1(prefix, in_channels, output_channels, base_mid_channels, 3, stride, bn_training), + 'shufflenet_5x5': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: conv1x1_dwconv_conv1x1(prefix, in_channels, output_channels, base_mid_channels, 5, stride, bn_training), + 'shufflenet_7x7': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: conv1x1_dwconv_conv1x1(prefix, in_channels, output_channels, base_mid_channels, 7, stride, bn_training), + 'xception_3x3': lambda prefix, in_channels, output_channels, base_mid_channels, stride, bn_training: xception(prefix, in_channels, output_channels, base_mid_channels, stride, bn_training), +} + + +def create_spatial_conv2d_group_bn_relu(prefix, in_channels, out_channels, kernel_size, stride, padding=0, dilation=1, groups=1, + bias=False, has_bn=True, has_relu=True, channel_shuffle=False, has_spatial_conv=True, has_spatial_conv_bn=True, + conv_name_fun=None, bn_name_fun=None, bn_training=True, fix_weights=False): + conv_name = prefix + if conv_name_fun: + conv_name = conv_name_fun(prefix) + + layer = nn.Sequential() + + if has_spatial_conv: + spatial_conv_name = conv_name + '_s' + layer.add_module(spatial_conv_name, nn.Conv2d(in_channels=in_channels, out_channels=in_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + dilation=dilation, groups=in_channels, bias=bias)) + if fix_weights: + pass + + if has_spatial_conv_bn: + layer.add_module(spatial_conv_name + '_bn', norm_layer(in_channels)) + + if channel_shuffle: + pass + + assert in_channels % groups == 0 + assert out_channels % groups == 0 + + layer.add_module(conv_name, nn.Conv2d(in_channels=in_channels, out_channels=out_channels, + kernel_size=1, stride=1, padding=0, + groups=groups, bias=bias)) + if fix_weights: + pass + + if has_bn: + bn_name = 'bn_' + prefix + if bn_name_fun: + bn_name = bn_name_fun(prefix) + layer.add_module(bn_name, norm_layer(out_channels)) + if bn_training: + pass + + if has_relu: + layer.add_module('relu' + prefix, nn.ReLU(inplace=True)) + + return layer + + +def conv1x1_dwconv_conv1x1(prefix, in_channels, out_channels, mid_channels, kernel_size, stride, bn_training=True): + mid_channels = int(mid_channels) + layer = list() + + layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2a', in_channels=in_channels, out_channels=mid_channels, + kernel_size=-1, stride=1, padding=0, groups=1, has_bn=True, has_relu=True, + channel_shuffle=False, has_spatial_conv=False, has_spatial_conv_bn=False, + conv_name_fun=lambda p: 'interstellar' + p, + bn_name_fun=lambda p: 'bn' + p, + bn_training=bn_training)) + layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2b', in_channels=mid_channels, out_channels=out_channels, + kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=1, + has_bn=True, has_relu=False, channel_shuffle=False, has_spatial_conv=True, + has_spatial_conv_bn=True, + conv_name_fun=lambda p: 'interstellar' + p, + bn_name_fun=lambda p: 'bn' + p, + bn_training=bn_training)) + return nn.Sequential(*layer) + + +def xception(prefix, in_channels, out_channels, mid_channels, stride, bn_training=True): + mid_channels = int(mid_channels) + layer = list() + + layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2a', in_channels=in_channels, out_channels=mid_channels, + kernel_size=3, stride=stride, padding=1, groups=1, has_bn=True, has_relu=True, + channel_shuffle=False, has_spatial_conv=True, has_spatial_conv_bn=True, + conv_name_fun=lambda p: 'interstellar' + p, + bn_name_fun=lambda p: 'bn' + p, + bn_training=bn_training)) + + layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2b', in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=3, stride=1, padding=1, groups=1, has_bn=True, + has_relu=True, + channel_shuffle=False, has_spatial_conv=True, + has_spatial_conv_bn=True, + conv_name_fun=lambda p: 'interstellar' + p, + bn_name_fun=lambda p: 'bn' + p, + bn_training=bn_training)) + + layer.append(create_spatial_conv2d_group_bn_relu(prefix=prefix + '_branch2c', in_channels=mid_channels, + out_channels=out_channels, + kernel_size=3, stride=1, padding=1, groups=1, has_bn=True, + has_relu=False, + channel_shuffle=False, has_spatial_conv=True, + has_spatial_conv_bn=True, + conv_name_fun=lambda p: 'interstellar' + p, + bn_name_fun=lambda p: 'bn' + p, + bn_training=bn_training)) + return nn.Sequential(*layer) + + +class ConvBNReLU(nn.Module): + + def __init__(self, in_channel, out_channel, k_size, stride=1, padding=0, groups=1, + has_bn=True, has_relu=True, gaussian_init=False): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_channel, out_channel, kernel_size=k_size, + stride=stride, padding=padding, + groups=groups, bias=True) + if gaussian_init: + nn.init.normal_(self.conv.weight.data, 0, 0.01) + + if has_bn: + self.bn = norm_layer(out_channel) + + self.has_bn = has_bn + self.has_relu = has_relu + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv(x) + if self.has_bn: + x = self.bn(x) + if self.has_relu: + x = self.relu(x) + return x + + +def channel_shuffle2(x): + channels = x.shape[1] + assert channels % 4 == 0 + + height = x.shape[2] + width = x.shape[3] + + x = x.reshape(x.shape[0] * channels // 2, 2, height * width) + x = x.permute(1, 0, 2) + x = x.reshape(2, -1, channels // 2, height, width) + return x[0], x[1] + + +class ShuffleNetV2BlockSearched(nn.Module): + def __init__(self, prefix, in_channels, out_channels, stride, base_mid_channels, i_th, architecture): + super(ShuffleNetV2BlockSearched, self).__init__() + op = blocks_key[architecture[i_th]] + self.ksize = int(op.split('_')[1][0]) + self.stride = stride + if self.stride == 2: + self.conv = Blocks[op](prefix + '_' + op, in_channels, out_channels - in_channels, base_mid_channels, stride, True) + else: + self.conv = Blocks[op](prefix + '_' + op, in_channels // 2, out_channels // 2, base_mid_channels, stride, True) + if stride > 1: + self.proj_conv = create_spatial_conv2d_group_bn_relu(prefix + '_proj', in_channels, in_channels, self.ksize, + stride, self.ksize // 2, + has_bn=True, has_relu=True, channel_shuffle=False, + has_spatial_conv=True, has_spatial_conv_bn=True, + conv_name_fun=lambda p: 'interstellar' + p, + bn_name_fun=lambda p: 'bn' + p) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x_in): + if self.stride == 1: + x_proj, x = channel_shuffle2(x_in) + else: + x_proj = x_in + x = x_in + x_proj = self.proj_conv(x_proj) + x = self.relu(self.conv(x)) + + return torch.cat((x_proj, x), dim=1) + + +@BACKBONES.register_module +class DetNas(nn.Module): + def __init__(self, model_size='VOC_FPN_300M', out_indices=(3, 7, 15, 19), frozen_stages=-1): + super(DetNas, self).__init__() + print('Model size is {}.'.format(model_size)) + self.out_indices = out_indices + self.frozen_stages=frozen_stages + + if model_size == 'COCO_FPN_3.8G': + architecture = [0, 0, 3, 1, 2, 1, 0, 2, 0, 3, 1, 2, 3, 3, 2, 0, 2, 1, 1, 3, + 2, 0, 2, 2, 2, 1, 3, 1, 0, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3] + stage_repeats = [8, 8, 16, 8] + stage_out_channels = [-1, 72, 172, 432, 864, 1728, 1728] + elif model_size == 'COCO_FPN_1.3G': + architecture = [0, 0, 3, 1, 2, 1, 0, 2, 0, 3, 1, 2, 3, 3, 2, 0, 2, 1, 1, 3, + 2, 0, 2, 2, 2, 1, 3, 1, 0, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3] + stage_repeats = [8, 8, 16, 8] + stage_out_channels = [-1, 48, 96, 240, 480, 960, 1024] + elif model_size == 'COCO_FPN_300M': + architecture = [2, 1, 2, 0, 2, 1, 1, 2, 3, 3, 1, 3, 0, 0, 3, 1, 3, 1, 3, 2] + stage_repeats = [4, 4, 8, 4] + stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024] + elif model_size == 'COCO_RetinaNet_300M': + architecture = [2, 3, 1, 1, 3, 2, 1, 3, 3, 1, 1, 1, 3, 3, 2, 0, 3, 3, 3, 3] + stage_repeats = [4, 4, 8, 4] + stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024] + elif model_size == 'VOC_FPN_300M': + architecture = [2, 1, 0, 3, 1, 3, 0, 3, 2, 0, 1, 1, 3, 3, 3, 3, 3, 3, 3, 1] + stage_repeats = [4, 4, 8, 4] + stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024] + elif model_size == 'VOC_RetinaNet_300M': + architecture = [1, 3, 0, 0, 2, 3, 3, 3, 2, 3, 3, 3, 3, 2, 2, 0, 2, 3, 1, 1] + stage_repeats = [4, 4, 8, 4] + stage_out_channels = [-1, 16, 64, 160, 320, 640, 1024] + else: + raise NotImplementedError + + self.first_conv = ConvBNReLU(in_channel=3, out_channel=stage_out_channels[1], k_size=3, stride=2, padding=1, gaussian_init=True) + + self.features = list() + + in_channels = stage_out_channels[1] + i_th = 0 + for id_stage in range(1, len(stage_repeats) + 1): + out_channels = stage_out_channels[id_stage + 1] + repeats = stage_repeats[id_stage - 1] + for id_repeat in range(repeats): + prefix = str(id_stage) + chr(ord('a') + id_repeat) + stride = 1 if id_repeat > 0 else 2 + self.features.append(ShuffleNetV2BlockSearched(prefix, in_channels=in_channels, out_channels=out_channels, + stride=stride, base_mid_channels=out_channels // 2, i_th=i_th, + architecture=architecture)) + in_channels = out_channels + i_th += 1 + + self.features = nn.Sequential(*self.features) + + if self.out_indices[-1] == len(self.features): + self.last_conv = ConvBNReLU(in_channel=in_channels, out_channel=stage_out_channels[-1], k_size=1, stride=1, padding=0) + + # self.drop_out = nn.Dropout2d(p=0.2) + # self.global_pool = nn.AvgPool2d(7) + self._initialize_weights() + + for m in self.modules(): + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.first_conv.bn.eval() + for m in [self.first_conv]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + self.features[i].eval() + for param in self.features[i].parameters(): + param.requires_grad = False + + def _initialize_weights(self): + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'first' in name: + nn.init.normal_(m.weight, 0, 0.01) + else: + nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1]) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, norm_layer): + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0.0001) + nn.init.constant_(m.running_mean, 0) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0.0001) + nn.init.constant_(m.running_mean, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + x = self.first_conv(x) + + for i in range(len(self.features)): + x = self.features[i](x) + if i in self.out_indices: + outs.append(x) + + if self.out_indices[-1] == len(self.features): + x = self.last_conv(x) + outs.append(x) + + # x = self.last_conv(x) + # x = self.drop_out(x) + # x = self.global_pool(x).view(x.size(0), -1) + return tuple(outs) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/backbones/dropblock.py b/CDARTS_detection/mmdet/models/backbones/dropblock.py new file mode 100644 index 0000000..dcbd8b9 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/dropblock.py @@ -0,0 +1,150 @@ +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn + + +class DropBlock2D(nn.Module): + r"""Randomly zeroes 2D spatial blocks of the input tensor. + As described in the paper + `DropBlock: A regularization method for convolutional networks`_ , + dropping whole blocks of feature map allows to remove semantic + information as compared to regular dropout. + Args: + drop_prob (float): probability of an element to be dropped. + block_size (int): size of the block to drop + Shape: + - Input: `(N, C, H, W)` + - Output: `(N, C, H, W)` + .. _DropBlock: A regularization method for convolutional networks: + https://arxiv.org/abs/1810.12890 + """ + + def __init__(self, drop_prob, block_size): + super(DropBlock2D, self).__init__() + + self.drop_prob = drop_prob + self.block_size = block_size + + def forward(self, x): + # shape: (bsize, channels, height, width) + + assert x.dim() == 4, \ + "Expected input with 4 dimensions (bsize, channels, height, width)" + + if not self.training or self.drop_prob == 0.: + return x + else: + # get gamma value + gamma = self._compute_gamma(x) + + # sample mask and place on input device + mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).to(x) + + # compute block mask + block_mask = self._compute_block_mask(mask) + + # apply block mask + out = x * block_mask[:, None, :, :] + + # scale output + out = out * block_mask.numel() / block_mask.sum() + + return out + + def _compute_block_mask(self, mask): + block_mask = F.max_pool2d(input=mask[:, None, :, :], + kernel_size=(self.block_size, self.block_size), + stride=(1, 1), + padding=self.block_size // 2) + + if self.block_size % 2 == 0: + block_mask = block_mask[:, :, :-1, :-1] + + block_mask = 1 - block_mask.squeeze(1) + + return block_mask + + def _compute_gamma(self, x): + return self.drop_prob / (self.block_size ** 2) + + +class DropBlock3D(DropBlock2D): + r"""Randomly zeroes 3D spatial blocks of the input tensor. + An extension to the concept described in the paper + `DropBlock: A regularization method for convolutional networks`_ , + dropping whole blocks of feature map allows to remove semantic + information as compared to regular dropout. + Args: + drop_prob (float): probability of an element to be dropped. + block_size (int): size of the block to drop + Shape: + - Input: `(N, C, D, H, W)` + - Output: `(N, C, D, H, W)` + .. _DropBlock: A regularization method for convolutional networks: + https://arxiv.org/abs/1810.12890 + """ + + def __init__(self, drop_prob, block_size): + super(DropBlock3D, self).__init__(drop_prob, block_size) + + def forward(self, x): + # shape: (bsize, channels, depth, height, width) + + assert x.dim() == 5, \ + "Expected input with 5 dimensions (bsize, channels, depth, height, width)" + + if not self.training or self.drop_prob == 0.: + return x + else: + # get gamma value + gamma = self._compute_gamma(x) + + # sample mask and place on input device + mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).to(x) + + # compute block mask + block_mask = self._compute_block_mask(mask) + + # apply block mask + out = x * block_mask[:, None, :, :, :] + + # scale output + out = out * block_mask.numel() / block_mask.sum() + + return out + + def _compute_block_mask(self, mask): + block_mask = F.max_pool3d(input=mask[:, None, :, :, :], + kernel_size=(self.block_size, self.block_size, self.block_size), + stride=(1, 1, 1), + padding=self.block_size // 2) + + if self.block_size % 2 == 0: + block_mask = block_mask[:, :, :-1, :-1, :-1] + + block_mask = 1 - block_mask.squeeze(1) + + return block_mask + + def _compute_gamma(self, x): + return self.drop_prob / (self.block_size ** 3) + + +class DropBlockScheduled(nn.Module): + def __init__(self, dropblock, start_value, stop_value, nr_steps): + super(DropBlockScheduled, self).__init__() + self.dropblock = dropblock + self.i = 0 + self.drop_values = np.linspace(start=start_value, stop=stop_value, num=nr_steps) + + def forward(self, x): + if self.training: + self.step() + return self.dropblock(x) + + def step(self): + if self.i < len(self.drop_values): + self.dropblock.drop_prob = self.drop_values[self.i] + + self.i += 1 \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/backbones/efficientnet.py b/CDARTS_detection/mmdet/models/backbones/efficientnet.py new file mode 100644 index 0000000..72483aa --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/efficientnet.py @@ -0,0 +1,1934 @@ +""" PyTorch EfficientNet Family + +An implementation of EfficienNet that covers variety of related models with efficient architectures: + +* EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) + - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 + - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 + - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 + - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 + +* MixNet (Small, Medium, and Large) + - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 + +* MNasNet B1, A1 (SE), Small + - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 + +* FBNet-C + - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 + +* Single-Path NAS Pixel1 + - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 + +* And likely more... + +Hacked together by Ross Wightman +""" +import torch +import torch.nn as nn +from torch.nn import functional as F +import torch.utils.model_zoo as model_zoo + +from .efficientnet_builder import * +from .feature_hooks import FeatureHooks +from ..registry import BACKBONES + +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) + + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='avg', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.output_size = output_size + self.pool_type = pool_type + self.flatten = flatten + if pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + if pool_type != 'avg': + assert False, 'Invalid pool type: %s' % pool_type + self.pool = nn.AdaptiveAvgPool2d(output_size) + + def forward(self, x): + x = self.pool(x) + if self.flatten: + x = x.flatten(1) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'output_size=' + str(self.output_size) \ + + ', pool_type=' + self.pool_type + ')' + +def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m + + +def conv_bn(inp, oup, stride, groups=1, act_fn=nn.ReLU): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False, groups=groups), + nn.BatchNorm2d(oup), + act_fn(inplace=True) + ) + + +def conv_1x1_bn(inp, oup, groups=1, act_fn=nn.ReLU): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False, groups=groups), + nn.BatchNorm2d(oup), + act_fn(inplace=True) + ) + + +__all__ = ['EfficientNet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +default_cfgs = { + 'mnasnet_050': _cfg(url=''), + 'mnasnet_075': _cfg(url=''), + 'mnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth'), + 'mnasnet_140': _cfg(url=''), + + 'semnasnet_050': _cfg(url=''), + 'semnasnet_075': _cfg(url=''), + 'semnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth'), + 'semnasnet_140': _cfg(url=''), + 'mnasnet_small': _cfg(url=''), + + 'mobilenetv2_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth'), + 'mobilenetv2_110d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth'), + 'mobilenetv2_120d': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth'), + 'mobilenetv2_140': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth'), + + 'fbnetc_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', + interpolation='bilinear'), + 'spnasnet_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', + interpolation='bilinear'), + + 'efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth'), + 'efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', + input_size=(3, 240, 240), pool_size=(8, 8)), + 'efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + input_size=(3, 260, 260), pool_size=(9, 9)), + 'efficientnet_b2a': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', + input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0), + 'efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra-a5e2fbc7.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_b3a': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra-a5e2fbc7.pth', + input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), + 'efficientnet_b4': _cfg( + url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'efficientnet_b5': _cfg( + url='', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'efficientnet_b6': _cfg( + url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'efficientnet_b7': _cfg( + url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'efficientnet_b8': _cfg( + url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + 'efficientnet_l2': _cfg( + url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), + + 'efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth'), + 'efficientnet_em': _cfg( + url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_el': _cfg( + url='', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'efficientnet_cc_b0_4e': _cfg(url=''), + 'efficientnet_cc_b0_8e': _cfg(url=''), + 'efficientnet_cc_b1_8e': _cfg(url='', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'efficientnet_lite0': _cfg( + url=''), + 'efficientnet_lite1': _cfg( + url='', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'efficientnet_lite2': _cfg( + url='', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'efficientnet_lite3': _cfg( + url='', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'efficientnet_lite4': _cfg( + url='', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + + 'efficientnet_b1_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b2_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'efficientnet_b3_pruned': _cfg( + url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + + 'tf_efficientnet_b0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_b8_ap': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), + + 'tf_efficientnet_b0_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', + input_size=(3, 224, 224)), + 'tf_efficientnet_b1_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_b2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), + 'tf_efficientnet_b3_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + 'tf_efficientnet_b4_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), + 'tf_efficientnet_b5_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', + input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), + 'tf_efficientnet_b6_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', + input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), + 'tf_efficientnet_b7_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', + input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), + 'tf_efficientnet_l2_ns_475': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', + input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), + 'tf_efficientnet_l2_ns': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', + input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), + + 'tf_efficientnet_es': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 224, 224), ), + 'tf_efficientnet_em': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + 'tf_efficientnet_el': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), + + 'tf_efficientnet_cc_b0_4e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b0_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_efficientnet_cc_b1_8e': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), + + 'tf_efficientnet_lite0': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite1': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite2': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, + interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res + ), + 'tf_efficientnet_lite3': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), + 'tf_efficientnet_lite4': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', + mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), + input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), + + 'mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth'), + 'mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth'), + 'mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth'), + 'mixnet_xl': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth'), + 'mixnet_xxl': _cfg(), + + 'tf_mixnet_s': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'), + 'tf_mixnet_m': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'), + 'tf_mixnet_l': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'), +} + +_DEBUG = False + + +class EfficientNet(nn.Module): + """ (Generic) EfficientNet + + A flexible and performant PyTorch implementation of efficient network architectures, including: + * EfficientNet B0-B8, L2 + * EfficientNet-EdgeTPU + * EfficientNet-CondConv + * MixNet S, M, L, XL + * MnasNet A1, B1, and small + * FBNet C + * Single-Path NAS Pixel1 + + """ + + def __init__(self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, + channel_multiplier=1.0, channel_divisor=8, channel_min=None, + output_stride=32, pad_type='', fix_stem=False, act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg'): + super(EfficientNet, self).__init__() + norm_kwargs = norm_kwargs or {} + + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + + # Stem + if not fix_stem: + stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min) + self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + channel_multiplier, channel_divisor, channel_min, output_stride, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG) + self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) + self.feature_info = builder.features + self._in_chs = builder.in_chs + + # Head + Pooling + self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type) + self.bn2 = norm_layer(self.num_features, **norm_kwargs) + self.act2 = act_layer(inplace=True) + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + + # Classifier + self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) + + efficientnet_init_weights(self) + + def as_sequential(self): + layers = [self.conv_stem, self.bn1, self.act1] + layers.extend(self.blocks) + layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool]) + layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) + return nn.Sequential(*layers) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.num_classes = num_classes + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), num_classes) if num_classes else None + + def forward_features(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + x = self.blocks(x) + x = self.conv_head(x) + x = self.bn2(x) + x = self.act2(x) + return x + + def forward(self, x): + x = self.forward_features(x) + x = self.global_pool(x) + x = x.flatten(1) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + + +class EfficientNetFeatures(nn.Module): + """ EfficientNet Feature Extractor + + A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation + and object detection models. + """ + + def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', + in_chans=3, stem_size=32, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + output_stride=32, pad_type='', fix_stem=False, act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(EfficientNetFeatures, self).__init__() + norm_kwargs = norm_kwargs or {} + + # TODO only create stages needed, currently all stages are created regardless of out_indices + num_stages = max(out_indices) + 1 + + self.out_indices = out_indices + self.feature_location = feature_location + self.drop_rate = drop_rate + self._in_chs = in_chans + + # Stem + if not fix_stem: + stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min) + self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = EfficientNetBuilder( + channel_multiplier, channel_divisor, channel_min, output_stride, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, feature_location=feature_location, verbose=_DEBUG) + self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) + self._feature_info = builder.features # builder provides info about feature channels for each block + self._stage_to_feature_idx = { + v['stage_idx']: fi for fi, v in self._feature_info.items() if fi in self.out_indices} + self._in_chs = builder.in_chs + + efficientnet_init_weights(self) + if _DEBUG: + for k, v in self._feature_info.items(): + print('Feature idx: {}: Name: {}, Channels: {}'.format(k, v['name'], v['num_chs'])) + + # Register feature extraction hooks with FeatureHooks helper + self.feature_hooks = None + if feature_location != 'bottleneck': + hooks = [dict( + name=self._feature_info[idx]['module'], + type=self._feature_info[idx]['hook_type']) for idx in out_indices] + self.feature_hooks = FeatureHooks(hooks, self.named_modules()) + + def feature_channels(self, idx=None): + """ Feature Channel Shortcut + Returns feature channel count for each output index if idx == None. If idx is an integer, will + return feature channel count for that feature block index (independent of out_indices setting). + """ + if isinstance(idx, int): + return self._feature_info[idx]['num_chs'] + return [self._feature_info[i]['num_chs'] for i in self.out_indices] + + def feature_info(self, idx=None): + """ Feature Channel Shortcut + Returns feature channel count for each output index if idx == None. If idx is an integer, will + return feature channel count for that feature block index (independent of out_indices setting). + """ + if isinstance(idx, int): + return self._feature_info[idx] + return [self._feature_info[i] for i in self.out_indices] + + def forward(self, x): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + if self.feature_hooks is None: + features = [] + for i, b in enumerate(self.blocks): + x = b(x) + if i in self._stage_to_feature_idx: + features.append(x) + return features + else: + self.blocks(x) + return self.feature_hooks.get_output(x.device) + +def load_pretrained(model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True): + if cfg is None: + cfg = getattr(model, 'default_cfg') + if cfg is None or 'url' not in cfg or not cfg['url']: + logging.warning("Pretrained model URL is invalid, using random initialization.") + return + + state_dict = model_zoo.load_url(cfg['url'], progress=False, map_location='cpu') + + if in_chans == 1: + conv1_name = cfg['first_conv'] + logging.info('Converting first conv (%s) from 3 to 1 channel' % conv1_name) + conv1_weight = state_dict[conv1_name + '.weight'] + state_dict[conv1_name + '.weight'] = conv1_weight.sum(dim=1, keepdim=True) + elif in_chans != 3: + assert False, "Invalid in_chans for pretrained weights" + + classifier_name = cfg['classifier'] + if num_classes == 1000 and cfg['num_classes'] == 1001: + # special case for imagenet trained models with extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[1:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[1:] + elif num_classes != cfg['num_classes']: + # completely discard fully connected for all other differences between pretrained and created model + del state_dict[classifier_name + '.weight'] + del state_dict[classifier_name + '.bias'] + strict = False + + if filter_fn is not None: + state_dict = filter_fn(state_dict) + + model.load_state_dict(state_dict, strict=strict) + +def _create_model(model_kwargs, default_cfg, pretrained=False): + if model_kwargs.pop('features_only', False): + load_strict = False + model_kwargs.pop('num_classes', 0) + model_kwargs.pop('num_features', 0) + model_kwargs.pop('head_conv', None) + model_class = EfficientNetFeatures + else: + load_strict = True + model_class = EfficientNet + variant = model_kwargs.pop('variant', '') + model = model_class(**model_kwargs) + model.default_cfg = default_cfg + if pretrained: + load_pretrained( + model, + default_cfg, + num_classes=model_kwargs.get('num_classes', 0), + in_chans=model_kwargs.get('in_chans', 3), + strict=load_strict) + return model + + +def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-a1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r2_k3_s2_e6_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r4_k3_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c112_se0.25'], + # stage 5, 14x14in + ['ir_r3_k5_s2_e6_c160_se0.25'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r3_k5_s2_e3_c40'], + # stage 3, 28x28 in + ['ir_r3_k5_s2_e6_c80'], + # stage 4, 14x14in + ['ir_r2_k3_s1_e6_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a mnasnet-b1 model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet + Paper: https://arxiv.org/pdf/1807.11626.pdf. + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + ['ds_r1_k3_s1_c8'], + ['ir_r1_k3_s2_e3_c16'], + ['ir_r2_k3_s2_e6_c16'], + ['ir_r4_k5_s2_e6_c32_se0.25'], + ['ir_r3_k3_s1_e6_c32_se0.25'], + ['ir_r3_k5_s2_e6_c88_se0.25'], + ['ir_r1_k3_s1_e6_c144'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=8, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_mobilenet_v2( + variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): + """ Generate MobileNet-V2 network + Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py + Paper: https://arxiv.org/abs/1801.04381 + """ + arch_def = [ + ['ds_r1_k3_s1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r3_k3_s2_e6_c32'], + ['ir_r4_k3_s2_e6_c64'], + ['ir_r3_k3_s1_e6_c96'], + ['ir_r3_k3_s2_e6_c160'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), + num_features=1280 if fix_stem_head else round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + fix_stem=fix_stem_head, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=nn.ReLU6, + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """ FBNet-C + + Paper: https://arxiv.org/abs/1812.03443 + Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py + + NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, + it was used to confirm some building block details + """ + arch_def = [ + ['ir_r1_k3_s1_e1_c16'], + ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], + ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], + ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], + ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], + ['ir_r4_k5_s2_e6_c184'], + ['ir_r1_k3_s1_e6_c352'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=16, + num_features=1984, # paper suggests this, but is not 100% clear + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates the Single-Path NAS model from search targeted for Pixel1 phone. + + Paper: https://arxiv.org/abs/1904.02877 + + Args: + channel_multiplier: multiplier to number of channels per layer. + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_c16_noskip'], + # stage 1, 112x112 in + ['ir_r3_k3_s2_e3_c24'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], + # stage 3, 28x28 in + ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], + # stage 4, 14x14in + ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], + # stage 5, 14x14in + ['ir_r4_k5_s2_e6_c192'], + # stage 6, 7x7 in + ['ir_r1_k3_s1_e6_c320_noskip'] + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + stem_size=32, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet model. + + Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25'], + ['ir_r4_k5_s2_e6_c192_se0.25'], + ['ir_r1_k3_s1_e6_c320_se0.25'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + channel_multiplier=channel_multiplier, + act_layer=Swish, + norm_kwargs=resolve_bn_args(kwargs), + variant=variant, + **kwargs, + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """ Creates an EfficientNet-EdgeTPU model + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu + """ + + arch_def = [ + # NOTE `fc` is present to override a mismatch between stem channels and in chs not + # present in other models + ['er_r1_k3_s1_e4_c24_fc24_noskip'], + ['er_r2_k3_s2_e8_c32'], + ['er_r4_k3_s2_e8_c48'], + ['ir_r5_k5_s2_e8_c96'], + ['ir_r4_k5_s1_e8_c144'], + ['ir_r2_k5_s2_e8_c192'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier), + num_features=round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=nn.ReLU, + **kwargs, + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_efficientnet_condconv( + variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): + """Creates an EfficientNet-CondConv model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16_se0.25'], + ['ir_r2_k3_s2_e6_c24_se0.25'], + ['ir_r2_k5_s2_e6_c40_se0.25'], + ['ir_r3_k3_s2_e6_c80_se0.25'], + ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], + ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], + ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], + ] + # NOTE unlike official impl, this one uses `cc` option where x is the base number of experts for each stage and + # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), + num_features=round_channels(1280, channel_multiplier, 8, None), + stem_size=32, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=Swish, + **kwargs, + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates an EfficientNet-Lite model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite + Paper: https://arxiv.org/abs/1905.11946 + + EfficientNet params + name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) + 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), + 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), + 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), + 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), + 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), + + Args: + channel_multiplier: multiplier to number of channels per layer + depth_multiplier: multiplier to number of repeats per stage + """ + arch_def = [ + ['ds_r1_k3_s1_e1_c16'], + ['ir_r2_k3_s2_e6_c24'], + ['ir_r2_k5_s2_e6_c40'], + ['ir_r3_k3_s2_e6_c80'], + ['ir_r3_k5_s1_e6_c112'], + ['ir_r4_k5_s2_e6_c192'], + ['ir_r1_k3_s1_e6_c320'], + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), + num_features=1280, + stem_size=32, + fix_stem=True, + channel_multiplier=channel_multiplier, + act_layer=nn.ReLU6, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs, + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Small model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16'], # relu + # stage 1, 112x112 in + ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def), + num_features=1536, + stem_size=16, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): + """Creates a MixNet Medium-Large model. + + Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet + Paper: https://arxiv.org/abs/1907.09595 + """ + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c24'], # relu + # stage 1, 112x112 in + ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu + # stage 2, 56x56 in + ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish + # stage 3, 28x28 in + ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish + # stage 5, 14x14in + ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish + # 7x7 + ] + model_kwargs = dict( + block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), + num_features=1536, + stem_size=24, + channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + **kwargs + ) + model = _create_model(model_kwargs, default_cfgs[variant], pretrained) + return model + + +def mnasnet_050(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.5. """ + model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_075(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 0.75. """ + model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_100(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_b1(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.0. """ + return mnasnet_100(pretrained, **kwargs) + + +def mnasnet_140(pretrained=False, **kwargs): + """ MNASNet B1, depth multiplier of 1.4 """ + model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +def semnasnet_050(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ + model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) + return model + + +def semnasnet_075(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ + model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) + return model + + +def semnasnet_100(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_a1(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ + return semnasnet_100(pretrained, **kwargs) + + +def semnasnet_140(pretrained=False, **kwargs): + """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ + model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +def mnasnet_small(pretrained=False, **kwargs): + """ MNASNet Small, depth multiplier of 1.0. """ + model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_100(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.0 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_140(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.4 channel multiplier """ + model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_110d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" + model = _gen_mobilenet_v2( + 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +def mobilenetv2_120d(pretrained=False, **kwargs): + """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ + model = _gen_mobilenet_v2( + 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) + return model + + +def fbnetc_100(pretrained=False, **kwargs): + """ FBNet-C """ + if pretrained: + # pretrained model trained with non-default BN epsilon + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def spnasnet_100(pretrained=False, **kwargs): + """ Single-Path NAS Pixel1""" + model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b2a(pretrained=False, **kwargs): + """ EfficientNet-B2 @ 288x288 w/ 1.0 test crop""" + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b2a', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b3a(pretrained=False, **kwargs): + """ EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b3a', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8 """ + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_l2(pretrained=False, **kwargs): + """ EfficientNet-L2.""" + # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. """ + model = _gen_efficientnet_edge( + 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. """ + model = _gen_efficientnet_edge( + 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. """ + model = _gen_efficientnet_edge( + 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + +def efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_condconv( + 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + model = _gen_efficientnet_lite( + 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + + + +def efficientnet_b1_pruned(pretrained=False, **kwargs): + """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + variant = 'efficientnet_b1_pruned' + model = _gen_efficientnet( + variant, channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b2_pruned(pretrained=False, **kwargs): + """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b3_pruned(pretrained=False, **kwargs): + """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + + + +def tf_efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b1(pretrained=False, **kwargs): + """ EfficientNet-B1. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b2(pretrained=False, **kwargs): + """ EfficientNet-B2. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b3(pretrained=False, **kwargs): + """ EfficientNet-B3. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b4(pretrained=False, **kwargs): + """ EfficientNet-B4. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b5(pretrained=False, **kwargs): + """ EfficientNet-B5. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b6(pretrained=False, **kwargs): + """ EfficientNet-B6. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b7(pretrained=False, **kwargs): + """ EfficientNet-B7. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b8(pretrained=False, **kwargs): + """ EfficientNet-B8. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b0_ap(pretrained=False, **kwargs): + """ EfficientNet-B0 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b1_ap(pretrained=False, **kwargs): + """ EfficientNet-B1 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b2_ap(pretrained=False, **kwargs): + """ EfficientNet-B2 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b3_ap(pretrained=False, **kwargs): + """ EfficientNet-B3 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b4_ap(pretrained=False, **kwargs): + """ EfficientNet-B4 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b5_ap(pretrained=False, **kwargs): + """ EfficientNet-B5 AdvProp. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b6_ap(pretrained=False, **kwargs): + """ EfficientNet-B6 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b7_ap(pretrained=False, **kwargs): + """ EfficientNet-B7 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b8_ap(pretrained=False, **kwargs): + """ EfficientNet-B8 AdvProp. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b0_ns(pretrained=False, **kwargs): + """ EfficientNet-B0 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b1_ns(pretrained=False, **kwargs): + """ EfficientNet-B1 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b2_ns(pretrained=False, **kwargs): + """ EfficientNet-B2 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b3_ns(pretrained=False, **kwargs): + """ EfficientNet-B3 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b4_ns(pretrained=False, **kwargs): + """ EfficientNet-B4 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b5_ns(pretrained=False, **kwargs): + """ EfficientNet-B5 NoisyStudent. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b6_ns(pretrained=False, **kwargs): + """ EfficientNet-B6 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_b7_ns(pretrained=False, **kwargs): + """ EfficientNet-B7 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent @ 475x475. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_l2_ns(pretrained=False, **kwargs): + """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.5 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet( + 'tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_es(pretrained=False, **kwargs): + """ EfficientNet-Edge Small. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_em(pretrained=False, **kwargs): + """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_el(pretrained=False, **kwargs): + """ EfficientNet-Edge-Large. Tensorflow compatible variant """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_edge( + 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + +def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): + """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_condconv( + 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, + pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite0(pretrained=False, **kwargs): + """ EfficientNet-Lite0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite1(pretrained=False, **kwargs): + """ EfficientNet-Lite1 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite2(pretrained=False, **kwargs): + """ EfficientNet-Lite2 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite3(pretrained=False, **kwargs): + """ EfficientNet-Lite3 """ + # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) + return model + + +def tf_efficientnet_lite4(pretrained=False, **kwargs): + """ EfficientNet-Lite4 """ + # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_efficientnet_lite( + 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) + return model + + +def mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. + """ + model = _gen_mixnet_s( + 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. + """ + model = _gen_mixnet_m( + 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. + """ + model = _gen_mixnet_m( + 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +def mixnet_xl(pretrained=False, **kwargs): + """Creates a MixNet Extra-Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) + return model + + +def mixnet_xxl(pretrained=False, **kwargs): + """Creates a MixNet Double Extra Large model. + Not a paper spec, experimental def by RW w/ depth scaling. + """ + model = _gen_mixnet_m( + 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +def tf_mixnet_s(pretrained=False, **kwargs): + """Creates a MixNet Small model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_s( + 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mixnet_m(pretrained=False, **kwargs): + """Creates a MixNet Medium model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + + +def tf_mixnet_l(pretrained=False, **kwargs): + """Creates a MixNet Large model. Tensorflow compatible variant + """ + kwargs['bn_eps'] = BN_EPS_TF_DEFAULT + kwargs['pad_type'] = 'same' + model = _gen_mixnet_m( + 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) + return model + + +def efficientnet_b0(pretrained=False, **kwargs): + """ EfficientNet-B0 """ + # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 + model = _gen_efficientnet( + 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) + return model + +@BACKBONES.register_module +class SSDEFFB0(nn.Module): + def __init__(self, input_size, width_mult=1.0, + activation_type='relu', + single_scale=False): + super(SSDEFFB0, self).__init__() + self.input_size = input_size + self.single_scale = single_scale + self.width_mult = width_mult + self.backbone = _gen_efficientnet('efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=True, features_only=True) + + # del self.backbone.blocks[3][2] + + for m in self.backbone.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() + m.weight.requires_grad = False + m.bias.requires_grad = False + + # self.last_channel = self.backbone.blocks[-1][-1].conv.out_channels # self.backbone.blocks[-1][-1] + + # building last several layers + self.extra_convs = [] + if not self.single_scale: + self.extra_convs.append(conv_1x1_bn(self.last_channel, 1280, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(1280, 256, + act_fn=Swish)) + self.extra_convs.append(conv_bn(256, 256, 2, groups=256, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(256, 512, groups=1, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(512, 128, + act_fn=Swish)) + self.extra_convs.append(conv_bn(128, 128, 2, groups=128, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(128, 256, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(256, 128, + act_fn=Swish)) + self.extra_convs.append(conv_bn(128, 128, 2, groups=128, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(128, 256, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(256, 64, + act_fn=Swish)) + self.extra_convs.append(conv_bn(64, 64, 2, groups=64, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(64, 128, + act_fn=Swish)) + self.extra_convs = nn.Sequential(*self.extra_convs) + + def init_weights(self, pretrained=None): + if pretrained: + state_dict = torch.load(pretrained) + state_dict = state_dict['state_dict'] + self.backbone.load_state_dict(state_dict, strict=True) + else: + print("No pretrained model!") + return + + def forward(self, x): + outputs = self.backbone(x) + x = outputs[-1] + outs = [] + for i, conv in enumerate(self.extra_convs): + x = conv(x) + if i % 3 == 0: + outs.append(x) + + if self.single_scale: + # outs.append(x) + return outputs[1:] + + return tuple(outs) diff --git a/CDARTS_detection/mmdet/models/backbones/efficientnet_builder.py b/CDARTS_detection/mmdet/models/backbones/efficientnet_builder.py new file mode 100644 index 0000000..03beedd --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/efficientnet_builder.py @@ -0,0 +1,1026 @@ +import logging +import math +import re +from collections.__init__ import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +from torch.nn import functional as F + +import numpy as np +from functools import partial +from itertools import repeat +from torch._six import container_abcs +from typing import Tuple, Optional, List + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + +def _ntuple(n): + def parse(x): + if isinstance(x, container_abcs.Iterable): + return x + return tuple(repeat(x, n)) + return parse + +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + +tup_pair = _ntuple(2) + +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = tup_pair(kernel_size) + self.stride = tup_pair(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = tup_pair(padding_val) + self.dilation = tup_pair(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + +def resolve_bn_args(kwargs): + bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + channels *= multiplier + return make_divisible(channels, divisor, channel_min) + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + +def make_divisible(v, divisor=8, min_value=None): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + +class SqueezeExcite(nn.Module): + def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, + act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_): + super(SqueezeExcite, self).__init__() + self.gate_fn = gate_fn + reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor) + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) + self.act1 = act_layer(inplace=True) + self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) + + def forward(self, x): + x_se = self.avg_pool(x) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + x = x * self.gate_fn(x_se) + return x + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + norm_kwargs = norm_kwargs or {} + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': + # no expansion in this block, use depthwise, before SE + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + residual = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + if self.se is not None: + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + x += residual + return x + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE and CondConv routing""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs, **norm_kwargs) + + def feature_info(self, location): + if location == 'expansion': + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + x += residual + + return x + +class CondConvResidual(InvertedResidual): + """ Inverted residual block w/ CondConv routing""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + num_experts=0, drop_path_rate=0.): + + self.num_experts = num_experts + conv_kwargs = dict(num_experts=self.num_experts) + + super(CondConvResidual, self).__init__( + in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type, + act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs, + norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs, + drop_path_rate=drop_path_rate) + + self.routing_fn = nn.Linear(in_chs, self.num_experts) + + def forward(self, x): + residual = x + + # CondConv routing + pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) + routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) + + # Point-wise expansion + x = self.conv_pw(x, routing_weights) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x, routing_weights) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x, routing_weights) + x = self.bn3(x) + + if self.has_residual: + x += residual + return x + +class EdgeResidual(nn.Module): + """ Residual block with expansion convolution followed by pointwise-linear w/ stride""" + + def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + drop_path_rate=0.): + super(EdgeResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + if fake_in_chs > 0: + mid_chs = make_divisible(fake_in_chs * exp_ratio) + else: + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Expansion convolution + self.conv_exp = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + # Point-wise linear projection + self.conv_pwl = create_conv2d( + mid_chs, out_chs, pw_kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn2 = norm_layer(out_chs, **norm_kwargs) + + def feature_info(self, location): + if location == 'expansion': + info = dict(module='act1', hook_type='forward', num_chs=self.conv_exp.out_channels) + elif location == 'depthwise': + # there is no depthwise, take after SE, before PWL + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + residual = x + + # Expansion convolution + x = self.conv_exp(x) + x = self.bn1(x) + x = self.act1(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn2(x) + + if self.has_residual: + x += residual + + return x + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) + + def forward(self, x): + return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = out_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + +def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m + +class ConvBnAct(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(ConvBnAct, self).__init__() + norm_kwargs = norm_kwargs or {} + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion' or location == 'depthwise': + # no expansion or depthwise this block, use act after conv + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + return x + +def resolve_se_args(kwargs, in_chs, act_layer=None): + se_kwargs = kwargs.copy() if kwargs is not None else {} + # fill in args that aren't specified with the defaults + for k, v in _SE_ARGS_DEFAULT.items(): + se_kwargs.setdefault(k, v) + # some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch + if not se_kwargs.pop('reduce_mid'): + se_kwargs['reduced_base_chs'] = in_chs + # act_layer override, if it remains None, the containing block's act_layer will be used + if se_kwargs['act_layer'] is None: + assert act_layer is not None + se_kwargs['act_layer'] = act_layer + return se_kwargs + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + +_SE_ARGS_DEFAULT = dict( + gate_fn=sigmoid, + act_layer=None, + reduce_mid=False, + divisor=1) + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str): + """ Decode block definition string + + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = nn.ReLU + elif v == 'r6': + value = nn.ReLU6 + elif v == 'hs': + value = HardSwish + elif v == 'sw': + value = Swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + fake_in_chs=fake_in_chs, + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): + arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) + else: + arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) + return arch_args + + +class EfficientNetBuilder: + """ Build Trunk Blocks + + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + + """ + def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + output_stride=32, pad_type='', act_layer=None, se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', + verbose=False): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('bottleneck', 'depthwise', 'expansion', '') + self.verbose = verbose + + # state updated during build, consumed by model + self.in_chs = None + self.features = OrderedDict() + + def _round_channels(self, chs): + return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input filters + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) + if ba.get('num_experts', 0) > 0: + block = CondConvResidual(**ba) + else: + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba))) + block = EdgeResidual(**ba) + elif bt == 'cn': + if self.verbose: + logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + logging.info('Building model trunk with %d stages...' % len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + logging.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + extract_features = '' # No features extracted + if self.verbose: + logging.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + do_extract = False + if self.feature_location == 'bottleneck' or self.feature_location == 'depthwise': + if last_block: + next_stage_idx = stage_idx + 1 + if next_stage_idx >= len(model_block_args): + do_extract = True + else: + do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 + elif self.feature_location == 'expansion': + if block_args['stride'] > 1 or (last_stack and last_block): + do_extract = True + if do_extract: + extract_features = self.feature_location + + next_dilation = current_dilation + next_output_stride = current_stride + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_info = block.feature_info(extract_features) + if feature_info['module']: + feature_info['module'] = 'blocks.{}.{}.'.format(stage_idx, block_idx) + feature_info['module'] + feature_info['stage_idx'] = stage_idx + feature_info['block_idx'] = block_idx + feature_info['reduction'] = current_stride + self.features[feature_idx] = feature_info + feature_idx += 1 + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + + +def _init_weight_goog(m, n='', fix_group_fanout=True): + """ Weight initialization as per Tensorflow official implementations. + + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def efficientnet_init_weights(model: nn.Module, init_fn=None): + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n) + diff --git a/CDARTS_detection/mmdet/models/backbones/fbnet.py b/CDARTS_detection/mmdet/models/backbones/fbnet.py new file mode 100644 index 0000000..a0cc120 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/fbnet.py @@ -0,0 +1,77 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import math +import time +import numpy as np + +from .fbnet_blocks import * +from .fbnet_arch import predefine_archs + +import logging +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcv.cnn import constant_init, kaiming_init +from .utils import load_checkpoint + +from ..registry import BACKBONES + + +@BACKBONES.register_module +class FBNet(nn.Module): + def __init__(self, arch='fbnet_c', out_indices=(5, 9, 17, 22), frozen_stages=-1): + super(FBNet, self).__init__() + print('Model is {}.'.format(arch)) + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.arch = arch + self.input_size = 800 + + self.build_backbone(self.arch, self.input_size) + + def build_backbone(self, arch, input_size): + genotypes = predefine_archs[arch]['genotypes'] + strides = predefine_archs[arch]['strides'] + out_channels = predefine_archs[arch]['out_channels'] + + self.layers = nn.ModuleList() + self.layers.append(ConvBNReLU(input_size, in_channels=3, out_channels=out_channels[0], kernel_size=3, stride=strides[0], padding=1, + bias=True, relu_type='relu', bn_type='bn')) + input_size = input_size // strides[0] + + _in_channels = out_channels[0] + for genotype, stride, _out_channels in zip(genotypes[1:], strides[1:], out_channels[1:]): + if genotype.endswith('sb'): + self.layers.append(SUPER_PRIMITIVES[genotype](input_size, _in_channels, _out_channels, stride)) + else: + self.layers.append(PRIMITIVES[genotype](input_size, _in_channels, _out_channels, stride)) + input_size = input_size // stride + _in_channels = _out_channels + + for m in self.modules(): + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x, alphas=None): + outs = [] + cnt = 0 + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return outs \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/backbones/fbnet_arch.py b/CDARTS_detection/mmdet/models/backbones/fbnet_arch.py new file mode 100644 index 0000000..67db7fb --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/fbnet_arch.py @@ -0,0 +1,63 @@ +predefine_archs = { + + 'fbnet_b': { + 'genotypes' : [ + 'conv3', 'ir_k3_e1', + 'ir_k3_e6', 'ir_k5_e6', 'ir_k3_e1', 'ir_k3_e1', + 'ir_k5_e6', 'ir_k5_e3', 'ir_k3_e6', 'ir_k5_e6', + 'ir_k5_e6', 'ir_k5_e1', 'skip' , 'ir_k5_e3', + 'ir_k5_e6', 'ir_k3_e1', 'ir_k5_e1', 'ir_k5_e3', + 'ir_k5_e6', 'ir_k5_e1', 'ir_k5_e6', 'ir_k5_e6', + 'ir_k3_e6', 'conv1', 'avgpool'], + 'strides' : [ + 2, 1, + 2, 1, 1, 1, + 2, 1, 1, 1, + 2, 1, 1, 1, + 1, 1, 1, 1, + 2, 1, 1, 1, + 1, 1, 7], + 'out_channels' : [ + 16, 16, + 24, 24, 24, 24, + 32, 32, 32, 32, + 64, 64, 64, 64, + 112, 112, 112, 112, + 184, 184, 184, 184, + 352, 1984, 1984, + ], + 'dropout_ratio' : 0.2, + 'search_space': 'fbsb', + }, + + 'fbnet_hit': { + 'genotypes' : [ + 'conv3', + 'ir_k3_e3', 'ir_k3_e3', 'ir_k3_e3_r2', 'ir_k3_e3', + 'ir_k5_e6', 'ir_k5_e6', 'ir_k3_e3', 'ir_k3_e3', + 'ir_k7_e6', 'ir_k5_e6', 'ir_k5_e6_r2', 'ir_k5_e3', + 'ir_k5_e6', 'ir_k5_e6_r2', 'ir_k5_e6', 'ir_k5_e6_r2', + 'ir_k7_e6', 'ir_k5_e6', 'ir_k5_e6_r2', 'ir_k5_e6', + 'ir_k3_e3', 'conv1'], + 'strides' : [ + 2, + 2, 1, 1, 1, + 2, 1, 1, 1, + 2, 1, 1, 1, + 1, 1, 1, 1, + 2, 1, 1, 1, + 1, 1], + 'out_channels' : [ + 16, + 48, 48, 48, 48, + 96, 96, 96, 96, + 184, 184, 184, 184, + 256, 256, 256, 256, + 352, 352, 352, 352, + 1024, 2048 + ], + 'dropout_ratio' : 0.2, + 'search_space': 'fbsb', + }, + +} diff --git a/CDARTS_detection/mmdet/models/backbones/fbnet_blocks.py b/CDARTS_detection/mmdet/models/backbones/fbnet_blocks.py new file mode 100644 index 0000000..325c22f --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/fbnet_blocks.py @@ -0,0 +1,156 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import time +import numpy as np + +norm_cfg = { + 'BN': nn.BatchNorm2d, + 'SyncBN': nn.SyncBatchNorm, + 'GN': nn.GroupNorm, +} +# _norm = 'SyncBN' +_norm = 'BN' +norm_layer = norm_cfg[_norm] + +PRIMITIVES = { + 'skip': lambda input_size, in_channels, out_channels, stride: Identity('skip', input_size, in_channels, out_channels, stride), + 'ir_k3_e1': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 3), + 'ir_k3_e1_r2': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 3, dilation=2), + 'ir_k3_e1_r3': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 3, dilation=3), + 'ir_k3_e3': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 3, stride, 3), + 'ir_k3_e3_r2': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 3, stride, 3, dilation=2), + 'ir_k3_e3_r3': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 3, stride, 3, dilation=3), + 'ir_k3_e6': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 6, stride, 3), + 'ir_k3_e6_r2': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 6, stride, 3, dilation=2), + 'ir_k3_e6_r3': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 6, stride, 3, dilation=3), + 'ir_k3_s2': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 3, 2), + 'ir_k5_e1': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 5), + 'ir_k5_e1_r2': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 5, dilation=2), + 'ir_k5_e1_r3': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 5, dilation=3), + 'ir_k5_e3': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 3, stride, 5), + 'ir_k5_e6': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 6, stride, 5), + 'ir_k5_e6_r2': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 6, stride, 5, dilation=2), + 'ir_k5_e6_r3': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 6, stride, 5, dilation=3), + 'ir_k5_s2': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 1, stride, 5, 2), + 'ir_k7_e6': lambda input_size, in_channels, out_channels, stride: MBBlock(input_size, in_channels, out_channels, 6, stride, 7), + 'sep_k3' : lambda input_size, in_channels, out_channels, stride: SepBlock('sep_k3', input_size, in_channels, out_channels, 1, stride, 3), + 'sep_k5' : lambda input_size, in_channels, out_channels, stride: SepBlock('sep_k5', input_size, in_channels, out_channels, 1, stride, 5), + 'conv1' : lambda input_size, in_channels, out_channels, stride: ConvBNReLU(input_size, in_channels, out_channels, 1, stride, 0, False, 'relu', 'bn'), + 'conv3' : lambda input_size, in_channels, out_channels, stride: ConvBNReLU(input_size, in_channels, out_channels, 3, stride, 1, False, 'relu', 'bn'), +} + + +class AvgPool(nn.Module): + def __init__(self, args, input_size, in_channels, stride): + super(AvgPool, self).__init__() + self.args, self.stride = args, stride + + def forward(self, x): + return F.avg_pool2d(x, self.stride) + + +class ChannelShuffle(nn.Module): + def __init__(self, input_size, in_channels, groups=1): + super(ChannelShuffle, self).__init__() + self.groups = groups + + def forward(self, x): + if self.groups == 1: + return x + N, C, H, W = x.size() + cpg = C // self.groups # channels per group + out = x.view(N, self.groups, cpg, H, W) + out = out.permute(0, 2, 1, 3, 4).contiguous() + out = out.view(N, C, H, W) + return out + +class ConvBNReLU(nn.Module): + def __init__(self, input_size, in_channels, out_channels, kernel_size, stride, padding, bias, relu_type, bn_type, groups=1, dilation=1): + super(ConvBNReLU, self).__init__() + assert(relu_type in ['relu', 'none']) + self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.bias, self.relu_type, self.bn_type, self.groups, self.dilation = \ + in_channels, out_channels, kernel_size, stride, padding, bias, relu_type, bn_type, groups, dilation + self.input_size = input_size + + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) + nn.init.kaiming_normal_(self.conv.weight, mode="fan_out", nonlinearity="relu") + if self.conv.bias is not None: + nn.init.constant_(self.conv.bias, 0.0) + if bn_type == 'bn': + self.bn = norm_layer(out_channels) + if bn_type == 'gn': + self.bn = nn.GroupNorm(gn_group, num_channels = out_channels) + if relu_type == 'relu': + self.relu = nn.ReLU(inplace=True) + else: + self.relu = nn.Sequential() + + def forward(self, x): + out = self.conv(x) + out = self.relu(self.bn(out)) + return out + + +class Identity(nn.Module): + def __init__(self, genotype, input_size, in_channels, out_channels, stride): + super(Identity, self).__init__() + if in_channels != out_channels or stride != 1: + self.conv = ConvBNReLU(input_size, in_channels, out_channels, kernel_size=1, stride=stride, padding=0, + bias=False, relu_type='relu', bn_type='bn') + else: + self.conv = nn.Sequential() + + def forward(self, x): + if isinstance(self.conv, ConvBNReLU): + return self.conv(x) + else: + return x + + +class SepBlock(nn.Module): + def __init__(self, genotype, input_size, in_channels, out_channels, expansion, stride, kernel_size, groups=1, bn_type='bn'): + super(SepBlock, self).__init__() + padding = (kernel_size - 1) // 2 + self.input_size, self.in_channels, self.out_channels, self.kernel_size, self.stride, self.expansion, self.groups, self.bn_type, self.padding = \ + input_size, in_channels, out_channels, kernel_size, stride, expansion, groups, bn_type, padding + + self.conv1 = ConvBNReLU(input_size, in_channels, in_channels, kernel_size = kernel_size, stride=self.stride, padding = padding, bias=False, relu_type='relu', bn_type=bn_type, groups = in_channels) + self.conv2 = ConvBNReLU(input_size // stride, in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False, relu_type='none', bn_type=bn_type, groups = groups) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + return out + + +class MBBlock(nn.Module): + def __init__(self, input_size, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1, has_se=False, bn_type='bn'): + super(MBBlock, self).__init__() + padding = (kernel_size - 1) * dilation // 2 + self.in_channels, self.out_channels, self.kernel_size, self.has_se, self.stride, self.expansion, self.groups, self.bn_type, self.padding, self.dilation = \ + in_channels, out_channels, kernel_size, has_se, stride, expansion, groups, bn_type, padding, dilation + mid_channels = self.in_channels * self.expansion + + self.conv1 = ConvBNReLU(input_size, in_channels, mid_channels, kernel_size=1, stride=1, padding=0, dilation=1, + bias=False, relu_type='relu', bn_type=bn_type, groups = groups) + self.conv2 = ConvBNReLU(input_size, mid_channels, mid_channels, kernel_size = kernel_size, stride=self.stride, padding = padding, dilation=dilation, + bias=False, relu_type='relu', bn_type=bn_type, groups = mid_channels) + self.conv3 = ConvBNReLU(input_size//self.stride, mid_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, + bias=False, relu_type='none', bn_type=bn_type, groups = groups) + + self.shuffle = ChannelShuffle(input_size, self.in_channels, self.groups) + + def forward(self, x): + out = self.conv1(x) + if not self.groups == 1: + out = self.shuffle(out) + out = self.conv2(out) + if self.has_se == True: + se_out = self.se(out) + out = out * se_out + out = self.conv3(out) + if self.in_channels == self.out_channels and self.stride == 1: + out = out + x + return out diff --git a/CDARTS_detection/mmdet/models/backbones/feature_hooks.py b/CDARTS_detection/mmdet/models/backbones/feature_hooks.py new file mode 100644 index 0000000..8ffcda8 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/feature_hooks.py @@ -0,0 +1,31 @@ +from collections import defaultdict, OrderedDict +from functools import partial + + +class FeatureHooks: + + def __init__(self, hooks, named_modules): + # setup feature hooks + modules = {k: v for k, v in named_modules} + for h in hooks: + hook_name = h['name'] + m = modules[hook_name] + hook_fn = partial(self._collect_output_hook, hook_name) + if h['type'] == 'forward_pre': + m.register_forward_pre_hook(hook_fn) + elif h['type'] == 'forward': + m.register_forward_hook(hook_fn) + else: + assert False, "Unsupported hook type" + self._feature_outputs = defaultdict(OrderedDict) + + def _collect_output_hook(self, name, *args): + x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre + if isinstance(x, tuple): + x = x[0] # unwrap input tuple + self._feature_outputs[x.device][name] = x + + def get_output(self, device): + output = tuple(self._feature_outputs[device].values())[::-1] + self._feature_outputs[device] = OrderedDict() # clear after reading + return output diff --git a/CDARTS_detection/mmdet/models/backbones/hrnet.py b/CDARTS_detection/mmdet/models/backbones/hrnet.py new file mode 100644 index 0000000..178d102 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/hrnet.py @@ -0,0 +1,484 @@ +import logging + +import torch.nn as nn +from mmcv.cnn import constant_init, kaiming_init +from mmcv.runner import load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from ..registry import BACKBONES +from ..utils import build_norm_layer, build_conv_layer +from .resnet import BasicBlock, Bottleneck + + +class HRModule(nn.Module): + """ High-Resolution Module for HRNet. In this module, every branch + has 4 BasicBlocks/Bottlenecks. Fusion/Exchange is in this module. + """ + + def __init__(self, + num_branches, + blocks, + num_blocks, + in_channels, + num_channels, + multiscale_output=True, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(HRModule, self).__init__() + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, blocks, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=False) + + def _check_branches(self, num_branches, num_blocks, in_channels, + num_channels): + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( + num_branches, len(num_blocks)) + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( + num_branches, len(num_channels)) + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( + num_branches, len(in_channels)) + raise ValueError(error_msg) + + def _make_one_branch(self, + branch_index, + block, + num_blocks, + num_channels, + stride=1): + downsample = None + if stride != 1 or \ + self.in_channels[branch_index] != \ + num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + self.in_channels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, num_channels[branch_index] * + block.expansion)[1]) + + layers = [] + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + self.in_channels[branch_index] = \ + num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append( + block( + self.in_channels[branch_index], + num_channels[branch_index], + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv_downsamples = [] + for k in range(i - j): + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=False))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@BACKBONES.register_module +class HRNet(nn.Module): + """HRNet backbone. + + High-Resolution Representations for Labeling Pixels and Regions + arXiv: https://arxiv.org/abs/1904.04514 + + Args: + extra (dict): detailed configuration for each stage of HRNet. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + + def __init__(self, + extra, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=True, + with_cp=False, + zero_init_residual=False): + super(HRNet, self).__init__() + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + + # stem net + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + + self.conv1 = build_conv_layer( + self.conv_cfg, + 3, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + 64, + 64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + # stage 1 + self.stage1_cfg = self.extra['stage1'] + num_channels = self.stage1_cfg['num_channels'][0] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'][0] + + block = self.blocks_dict[block_type] + stage1_out_channels = num_channels * block.expansion + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + + # stage 2 + self.stage2_cfg = self.extra['stage2'] + num_channels = self.stage2_cfg['num_channels'] + block_type = self.stage2_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition1 = self._make_transition_layer([stage1_out_channels], + num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = self.extra['stage3'] + num_channels = self.stage3_cfg['num_channels'] + block_type = self.stage3_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition2 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = self.extra['stage4'] + num_channels = self.stage4_cfg['num_channels'] + block_type = self.stage4_cfg['block'] + + block = self.blocks_dict[block_type] + num_channels = [channel * block.expansion for channel in num_channels] + self.transition3 = self._make_transition_layer(pre_stage_channels, + num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + self.conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) + + layers = [] + layers.append( + block( + inplanes, + planes, + stride, + downsample=downsample, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes, + planes, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg)) + + return nn.Sequential(*hr_modules), in_channels + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['num_branches']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['num_branches']): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['num_branches']): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + return y_list + + def train(self, mode=True): + super(HRNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/CDARTS_detection/mmdet/models/backbones/mnasnet.py b/CDARTS_detection/mmdet/models/backbones/mnasnet.py new file mode 100644 index 0000000..006162b --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/mnasnet.py @@ -0,0 +1,192 @@ +import math + +import torch +import torch.nn as nn +from torch.autograd import Variable + +from .dropblock import DropBlockScheduled, DropBlock2D + +import logging +from torch.nn.modules.batchnorm import _BatchNorm + +import torch.nn.functional as F +import time +import numpy as np + +from ..registry import BACKBONES + + +def Conv_3x3(inp, oup, stride, activation=nn.ReLU6, act_params={"inplace": True}): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + activation(**act_params) + ) + + +def Conv_1x1(inp, oup, activation=nn.ReLU6, act_params={"inplace": True}): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + activation(**act_params) + ) + + +def SepConv_3x3(inp, oup, activation=nn.ReLU6, act_params={"inplace": True}): # input=32, output=16 + return nn.Sequential( + # dw + nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + activation(**act_params), + # pw-linear + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ) + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio, kernel, drop_prob=0.0, num_steps=3e5, activation=nn.ReLU6, + act_params={"inplace": True}): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + self.use_res_connect = self.stride == 1 and inp == oup + + self.conv = nn.Sequential( + # pw + nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False), + nn.BatchNorm2d(inp * expand_ratio), + DropBlockScheduled( + DropBlock2D(drop_prob=drop_prob, block_size=7), + start_value=0., + stop_value=drop_prob, + nr_steps=num_steps), + activation(**act_params), + # dw + nn.Conv2d(inp * expand_ratio, inp * expand_ratio, kernel, stride, kernel // 2, groups=inp * expand_ratio, + bias=False), + nn.BatchNorm2d(inp * expand_ratio), + DropBlockScheduled( + DropBlock2D(drop_prob=drop_prob, block_size=7), + start_value=0., + stop_value=drop_prob, + nr_steps=num_steps), + activation(**act_params), + # pw-linear + nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + DropBlockScheduled( + DropBlock2D(drop_prob=drop_prob, block_size=7), + start_value=0., + stop_value=drop_prob, + nr_steps=num_steps), + ) + if self.use_res_connect: + self.skip_drop = DropBlockScheduled( + DropBlock2D(drop_prob=drop_prob, block_size=7), + start_value=0., + stop_value=drop_prob, + nr_steps=num_steps) + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + if self.use_res_connect: + return self.skip_drop(x + self.conv(x)) + else: + return self.conv(x) + + +@BACKBONES.register_module +class MnasNet(nn.Module): + def __init__(self, out_indices=(1,2,3,4), width_mult=1., drop_prob=0.0, num_steps=3e5, activation=nn.ReLU6, + act_params={"inplace": True}): + super(MnasNet, self).__init__() + self.out_indices = out_indices + + self.activation = activation + self.act_params = act_params + + # setting of inverted residual blocks + self.interverted_residual_setting = [ + # t, c, n, s, k, dp + [3, 24, 3, 2, 3, 0], # -> 56x56 + [3, 40, 3, 2, 5, 0], # -> 28x28 + [6, 80, 3, 2, 5, 0], # -> 14x14 + [6, 96, 2, 1, 3, drop_prob], # -> 14x14 + [6, 192, 4, 2, 5, drop_prob], # -> 7x7 + [6, 320, 1, 1, 3, drop_prob], # -> 7x7 + ] + self.num_steps = num_steps + + input_channel = int(32 * width_mult) + self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280 + + # building first two layer + self.features = [Conv_3x3(3, input_channel, 2, self.activation, self.act_params), + SepConv_3x3(input_channel, 16, self.activation, self.act_params)] + input_channel = 16 + + # building inverted residual blocks (MBConv) + for t, c, n, s, k, dp in self.interverted_residual_setting: + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: + self.features.append(InvertedResidual(input_channel, output_channel, s, t, k, dp, self.num_steps, + self.activation, self.act_params)) + else: + self.features.append(InvertedResidual(input_channel, output_channel, 1, t, k, dp, self.num_steps, + self.activation, self.act_params)) + input_channel = output_channel + + # building last several layers + self.features.append(Conv_1x1(input_channel, self.last_channel, self.activation, self.act_params)) + + # make it nn.Sequential + self.features = nn.Sequential(*self.features) + + def forward(self, x): + import pdb + pdb.set_trace() + outs = [] + cnt = 0 + for i, layer in enumerate(self.features): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return outs + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + + +if __name__ == '__main__': + net = MnasNet() + print(net) + x_image = Variable(torch.randn(1, 3, 224, 224)) + y = net(x_image) + # print(y) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/backbones/mobilenetv2.py b/CDARTS_detection/mmdet/models/backbones/mobilenetv2.py new file mode 100644 index 0000000..01865bb --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/mobilenetv2.py @@ -0,0 +1,201 @@ +import logging + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcv.cnn import constant_init, kaiming_init +from .utils import load_checkpoint + +from ..registry import BACKBONES + +norm_cfg = { + 'BN': nn.BatchNorm2d, + 'SyncBN': nn.SyncBatchNorm, + 'GN': nn.GroupNorm, +} +_norm = 'BN' +norm_layer = norm_cfg[_norm] + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio, kernel_size=3, rf_series=1, rf_sd=1, rf_bn=True, rf_relu=True): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = round(inp * expand_ratio) + self.use_res_connect = self.stride == 1 and inp == oup + + if expand_ratio == 1: + self.conv = nn.Sequential( + # dw + nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, 1, groups=hidden_dim, bias=False), + norm_layer(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + norm_layer(oup), + ) + else: + self.conv = [] + # pw + self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)) + self.conv.append(norm_layer(hidden_dim)) + self.conv.append(nn.ReLU6(inplace=True)) + # dw + + for idx in range(rf_series): + self.conv.append(nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, + padding=int((kernel_size-1)*(idx+1)/2), + dilation=idx+1, groups=hidden_dim, bias=False)) + if rf_bn: + self.conv.append(norm_layer(hidden_dim)) + if rf_relu: + self.conv.append(nn.ReLU6(inplace=True)) + if not rf_bn: + self.conv.append(norm_layer(hidden_dim)) + if not rf_relu: + self.conv.append(nn.ReLU6(inplace=True)) + + # pw-linear + self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)) + self.conv.append(norm_layer(oup)) + self.conv = nn.Sequential(*self.conv) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +@BACKBONES.register_module +class MobileNetV2(nn.Module): + + def __init__(self, + width_mult=1., + input_channel=32, + last_channel = 1280, + kernel_size=3, + + out_indices=(2, 5, 12, 17), + style='pytorch', + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True): + + super(MobileNetV2, self).__init__() + block = InvertedResidual + input_channel = int(input_channel * width_mult) + last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel + interverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], # 112x112 0 + [6, 24, 2, 2], # 56x56 2 + [6, 32, 3, 2], # 28x28 5 + [6, 64, 4, 2], # 14x14 9 + [6, 96, 3, 1], # 14x14 12 + [6, 160, 3, 2], # 7x7 15 + [6, 320, 1, 1], # 7x7 16 + ] + self.kernel_size=kernel_size + self.out_indices = out_indices + self.style = style + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + + self.mv2_layer = [] + features = [] + features.append( + nn.Sequential( + nn.Conv2d(3, input_channel, 3, stride=2, padding=1, bias=False), + norm_layer(input_channel), + nn.ReLU6(inplace=True) + ) + ) + + # building inverted residual blocks + for t, c, n, s in interverted_residual_setting: + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: + features.append(block(input_channel, output_channel, s, expand_ratio=t, + kernel_size=3)) + else: + features.append(block(input_channel, output_channel, 1, expand_ratio=t, + kernel_size=kernel_size)) + input_channel = output_channel + + features.append( + nn.Sequential( + nn.Conv2d(input_channel, last_channel, 1, 1, 0, bias=False), + norm_layer(last_channel), + nn.ReLU6(inplace=True) + ) + ) + for i, module in enumerate(features): + layer_name = 'features{}'.format(i) + self.add_module(layer_name, module) + self.mv2_layer.append(layer_name) + + for m in self.modules(): + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + self._freeze_stages() + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, 'layer{}'.format(i)) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.mv2_layer): + layer = getattr(self, layer_name) + x = layer(x) + + if i in self.out_indices: + outs.append(x) + return tuple(outs) + ''' + def train(self, mode=True): + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + ''' \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/backbones/mobilenetv3.py b/CDARTS_detection/mmdet/models/backbones/mobilenetv3.py new file mode 100644 index 0000000..c17141a --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/mobilenetv3.py @@ -0,0 +1,389 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F + +from timm.models import resume_checkpoint +from .builder import * +from ..registry import BACKBONES + +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='avg', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.output_size = output_size + self.pool_type = pool_type + self.flatten = flatten + if pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + if pool_type != 'avg': + assert False, 'Invalid pool type: %s' % pool_type + self.pool = nn.AdaptiveAvgPool2d(output_size) + + def forward(self, x): + x = self.pool(x) + if self.flatten: + x = x.flatten(1) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'output_size=' + str(self.output_size) \ + + ', pool_type=' + self.pool_type + ')' + +def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + +def conv_bn(inp, oup, stride, groups=1, act_fn=nn.ReLU): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False, groups=groups), + nn.BatchNorm2d(oup), + act_fn(inplace=True) + ) + + +def conv_1x1_bn(inp, oup, groups=1, act_fn=nn.ReLU): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False, groups=groups), + nn.BatchNorm2d(oup), + act_fn(inplace=True) + ) + + + +default_cfgs = { + 'mobilenetv3_large_075': _cfg(url=''), + 'mobilenetv3_large_100': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), + 'mobilenetv3_small_075': _cfg(url=''), + 'mobilenetv3_small_100': _cfg(url=''), + 'mobilenetv3_rw': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + interpolation='bicubic'), + 'tf_mobilenetv3_large_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), +} + +_DEBUG = False + + +class ChildNet(nn.Module): + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, + channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', pool_bn=False, zero_gamma=False): + super(ChildNet, self).__init__() + + norm_layer = nn.SyncBatchNorm + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + self.pool_bn = pool_bn + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = ChildNetBuilder( + channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG) + self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) + # self.blocks = builder(self._in_chs, block_args) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) + + if pool_bn: + self.pool_bn = nn.BatchNorm1d(1) + + efficientnet_init_weights(self, zero_gamma=zero_gamma) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None + + def forward_features(self, x): + # architecture = [[0], [], [], [], [], [0]] + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + outputs = [] + # 24, 40, 96, 320 + block_idxs = [1, 2, 4, 6] + for i, block in enumerate(self.blocks): + x = block(x) + if i in block_idxs: + outputs.append(x) + + # x = self.blocks(x) + return tuple(outputs) + + def forward(self, x): + x = self.forward_features(x) + return x + + +def modify_block_args(block_args, kernel_size, exp_ratio): + # kernel_size: 3,5,7 + # exp_ratio: 4,6 + block_type = block_args['block_type'] + # each type of block has different valid arguments, fill accordingly + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + + +def _gen_childnet(**kwargs): + # 390M + arch_list = [[0], [3, 4, 2, 0], [5, 2, 4, 0], [4, 3, 2, 2], [1, 3, 0, 1], [2, 4, 4, 2], [0]] + # 290M + # arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]] + + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + choices_list = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']] + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + + + + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c960_se0.25'], + ] + + # arch_def = [ + # # stage 0, 112x112 in + # ['ds_r1_k3_s1_e1_c16_se0.25'], + # # stage 1, 112x112 in + # ['ir_r1_k3_s2_e4_c24_se0.25'], + # # stage 2, 56x56 in + # ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # # stage 3, 28x28 in + # ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], + # # stage 4, 14x14in + # ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', + # 'ir_r1_k3_s1_e6_c96_se0.25'], + # # stage 5, 14x14in + # ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # # stage 6, 7x7 in + # ['cn_r1_k1_s1_c960_se0.25'], + # ] + + new_arch = [] + # change to child arch_def + for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)): + if len(layer_arch) == 1: + new_arch.append(layer_arch) + continue + else: + new_layer = [] + for j, (block_choice, block_arch) in enumerate(zip(layer_choice, layer_arch)): + kernel_size, exp_ratio = choices_list[block_choice] + elements = block_arch.split('_') + block_arch = block_arch.replace(elements[2], 'k{}'.format(str(kernel_size))) + block_arch = block_arch.replace(elements[4], 'e{}'.format(str(exp_ratio))) + new_layer.append(block_arch) + new_arch.append(new_layer) + + model_kwargs = dict( + block_args=decode_arch_def(new_arch), + num_features=num_features, + stem_size=16, + # channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8), + num_classes=1000, + drop_rate=0.2, + drop_path_rate=0.2, + global_pool='avg' + ) + model = ChildNet(**model_kwargs) + return model + +@BACKBONES.register_module +class SSDMobilenetV3(nn.Module): + def __init__(self, input_size, width_mult=1.0, + activation_type='relu', + single_scale=False): + super(SSDMobilenetV3, self).__init__() + self.input_size = input_size + self.single_scale = single_scale + self.width_mult = width_mult + self.backbone = _gen_childnet() + # del self.backbone.blocks[3][2] + # del self.backbone.blocks[3][4] + + #for m in self.backbone.modules(): + # if isinstance(m, nn.BatchNorm2d): + # m.eval() + # m.weight.requires_grad = False + # m.bias.requires_grad = False + + self.last_channel = self.backbone.blocks[-1][-1].conv.out_channels # self.backbone.blocks[-1][-1] + + # building last several layers + self.extra_convs = [] + if not self.single_scale: + self.extra_convs.append(conv_1x1_bn(self.last_channel, 1280, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(1280, 256, + act_fn=Swish)) + self.extra_convs.append(conv_bn(256, 256, 2, groups=256, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(256, 512, groups=1, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(512, 128, + act_fn=Swish)) + self.extra_convs.append(conv_bn(128, 128, 2, groups=128, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(128, 256, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(256, 128, + act_fn=Swish)) + self.extra_convs.append(conv_bn(128, 128, 2, groups=128, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(128, 256, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(256, 64, + act_fn=Swish)) + self.extra_convs.append(conv_bn(64, 64, 2, groups=64, + act_fn=Swish)) + self.extra_convs.append(conv_1x1_bn(64, 128, + act_fn=Swish)) + self.extra_convs = nn.Sequential(*self.extra_convs) + + def init_weights(self, pretrained=None): + if pretrained: + state_dict = torch.load(pretrained) + state_dict = state_dict['state_dict'] + # resume_checkpoint(self.backbone, pretrained) + self.backbone.load_state_dict(state_dict, strict=True) + else: + print("No pretrained model!") + return + + def forward(self, x): + outputs = self.backbone(x) + x = outputs[-1] + outs = [] + for i, conv in enumerate(self.extra_convs): + x = conv(x) + if i % 3 == 0: + outs.append(x) + + if self.single_scale: + # outs.append(x) + return outputs + + return tuple(outs) + + diff --git a/CDARTS_detection/mmdet/models/backbones/resnet.py b/CDARTS_detection/mmdet/models/backbones/resnet.py new file mode 100644 index 0000000..497ce5a --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/resnet.py @@ -0,0 +1,822 @@ +import logging + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from torch.nn.modules.batchnorm import _BatchNorm + +from mmcv.cnn import constant_init, kaiming_init +# from mmcv.runner import load_checkpoint + +from mmdet.ops import DeformConv, ModulatedDeformConv, ContextBlock +from mmdet.models.plugins import GeneralizedAttention + +from ..registry import BACKBONES +from ..utils import build_conv_layer, build_norm_layer + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + kernel_size=3, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv2_split=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None, + gen_attention=None): + super(BasicBlock, self).__init__() + assert dcn is None, "Not implemented yet." + assert gen_attention is None, "Not implemented yet." + assert gcb is None, "Not implemented yet." + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + assert not with_cp + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + kernel_size=3, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv2_split=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None, + gen_attention=None): + """Bottleneck block for ResNet. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert gcb is None or isinstance(gcb, dict) + assert gen_attention is None or isinstance(gen_attention, dict) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv2_split = conv2_split + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.gcb = gcb + self.with_gcb = gcb is not None + self.gen_attention = gen_attention + self.with_gen_attention = gen_attention is not None + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = dcn.get('fallback_on_stride', False) + self.with_modulated_dcn = dcn.get('modulated', False) + if not self.with_dcn or fallback_on_stride: + if not self.conv2_split: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=kernel_size, + stride=self.conv2_stride, + padding=int((kernel_size-1)*dilation/2), + dilation=dilation, + bias=False) + else: + self.conv2_d1 = build_conv_layer( + conv_cfg, planes, planes-2*int(planes/3), kernel_size=3, + stride=self.conv2_stride, padding=dilation, + dilation=dilation, bias=False) + self.conv2_d2 = build_conv_layer( + conv_cfg, planes, int(planes/3), kernel_size=3, + stride=self.conv2_stride, padding=dilation+1, + dilation=dilation+1, bias=False) + self.conv2_d3 = build_conv_layer( + conv_cfg, planes, int(planes/3), kernel_size=3, + stride=self.conv2_stride, padding=dilation+2, + dilation=dilation+2, bias=False) + else: + assert conv_cfg is None, 'conv_cfg must be None for DCN' + deformable_groups = dcn.get('deformable_groups', 1) + if not self.with_modulated_dcn: + conv_op = DeformConv + offset_channels = 18 + else: + conv_op = ModulatedDeformConv + offset_channels = 27 + """ original code + self.conv2_offset = nn.Conv2d( + planes, + deformable_groups * offset_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation) + """ + # for huang lang test + self.conv2_offset = StructualDeformBlock( + planes, + deformable_groups * offset_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation) + self.conv2 = conv_op( + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + deformable_groups=deformable_groups, + bias=False) + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_gcb: + gcb_inplanes = planes * self.expansion + self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb) + + # gen_attention + if self.with_gen_attention: + self.gen_attention_block = GeneralizedAttention( + planes, **gen_attention) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if not self.with_dcn: + if not self.conv2_split: + out = self.conv2(out) + else: + out_d1 = self.conv2_d1(out) + out_d2 = self.conv2_d2(out) + out_d3 = self.conv2_d3(out) + out = torch.cat((out_d1, out_d2, out_d3), 1) + elif self.with_modulated_dcn: + offset_mask = self.conv2_offset(out) + offset = offset_mask[:, :18, :, :] + mask = offset_mask[:, -9:, :, :].sigmoid() + out = self.conv2(out, offset, mask) + else: + offset = self.conv2_offset(out) + out = self.conv2(out, offset) + out = self.norm2(out) + out = self.relu(out) + + if self.with_gen_attention: + out = self.gen_attention_block(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_gcb: + out = self.context_block(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +# for huang lang test +from torch.nn import functional as F +class StructualDeformBlock(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1): + super(StructualDeformBlock, self).__init__() + assert out_channels == 2 * kernel_size**2 + self.out_channels = out_channels + 6 + self.in_channels = in_channels + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + + # conv weights + self.weight = nn.Parameter(torch.Tensor(self.out_channels, in_channels, kernel_size, kernel_size)) + self.bias = nn.Parameter(torch.Tensor(self.out_channels)) + + # homogeneous coordinate map + coord = (torch.arange(kernel_size, dtype=torch.float) - kernel_size // 2) * dilation + coord = list(torch.meshgrid([coord, coord])) + coord.append(torch.ones(kernel_size, kernel_size)) + self.coord_map = torch.autograd.Variable(torch.stack(coord, dim=0).view(3, -1), requires_grad=False) # (3, K**2) + + def extra_repr(self): + s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}' + ', stride={stride}, padding={padding}, dilation={dilation}') + return s.format(**self.__dict__) + + def forward(self, x_): + offset_affine = F.conv2d(x_, self.weight, self.bias, self.stride, self.padding, self.dilation) + n, c, h, w = offset_affine.shape + # apply affine transformation on conv grids + deform_params = offset_affine[:, -6:].view(n, 2, 3, h, w) + structural_offset = torch.einsum('nijhw,jk->nikhw', (deform_params, self.coord_map.to(deform_params.device))) + offset = structural_offset.reshape(n, -1, h, w) + offset_affine[:, :-6] + return offset + + +class MBBlock(nn.Module): + def __init__(self, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1): + super(MBBlock, self).__init__() + self.in_channels = in_channels + self.out_channels =out_channels + self.stride = stride + self.groups = groups + mid_channels = in_channels * expansion + padding = (kernel_size - 1) * dilation // 2 + + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + + self.conv2 = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False, groups=mid_channels), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + + self.conv3 = nn.Sequential( + nn.Conv2d(mid_channels, out_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups), + nn.BatchNorm2d(out_channels) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1]) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0.0001) + nn.init.constant_(m.running_mean, 0) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + if self.in_channels == self.out_channels and self.stride == 1: + out = out + x + return out + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + kernel_size=3, + dilation=1, + style='pytorch', + with_cp=False, + conv2_split=False, + toy_replace=None, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None, + gen_attention=None, + gen_attention_blocks=[]): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + kernel_size=kernel_size, + dilation=dilation, + downsample=downsample, + style=style, + with_cp=with_cp, + conv2_split=conv2_split, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (0 in gen_attention_blocks) else None)) + inplanes = planes * block.expansion + for i in range(1, blocks): + if blocks > 30 and i % 2 == 1: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + kernel_size=3, + dilation=2, + style=style, + with_cp=with_cp, + conv2_split=conv2_split, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (i in gen_attention_blocks) else None)) + elif toy_replace is not None and i == toy_replace.get('layer', 30): + if toy_replace.get('block', 'res') == 'ir': + layers.append( + MBBlock(inplanes, inplanes, 1, 1, toy_replace.get('conv_kernel'), dilation=toy_replace.get('dilation'), groups=1) + ) + else: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + kernel_size=toy_replace.get('conv_kernel'), + dilation=toy_replace.get('dilation'), + style=style, + with_cp=with_cp, + conv2_split=conv2_split, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (i in gen_attention_blocks) else None)) + else: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + kernel_size=kernel_size, + dilation=dilation, + style=style, + with_cp=with_cp, + conv2_split=conv2_split, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (i in gen_attention_blocks) else None)) + # for [1,2,3,1] + ''' + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + kernel_size=3, + dilation=2, + style=style, + with_cp=with_cp, + conv2_split=conv2_split, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention if + (i in gen_attention_blocks) else None)) + ''' + return nn.Sequential(*layers) + + +@BACKBONES.register_module +class ResNet(nn.Module): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + arch_settings = { + 10: (BasicBlock, (1, 1, 1, 1)), + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + num_stages=4, + strides=(1, 2, 2, 2), + kernel_size=3, + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + gcb=None, + stage_with_gcb=(False, False, False, False), + gen_attention=None, + stage_with_gen_attention=((), (), (), ()), + with_cp=False, + conv2_split=False, + toy_replace=None, # for toy experiments replace + zero_init_residual=True): + super(ResNet, self).__init__() + if depth not in self.arch_settings: + raise KeyError('invalid depth {} for resnet'.format(depth)) + self.depth = depth + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.kernel_size=kernel_size + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.conv2_split = conv2_split + self.toy_replace = toy_replace + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.gen_attention = gen_attention + self.gcb = gcb + self.stage_with_gcb = stage_with_gcb + if gcb is not None: + assert len(stage_with_gcb) == num_stages + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = 64 + + self._make_stem_layer() + + self.res_layers = [] + _toy_replace = None + for i, num_blocks in enumerate(self.stage_blocks): + if self.toy_replace is not None: + if i != self.toy_replace.get('stage'): + _toy_replace = None + else: + _toy_replace = self.toy_replace + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + gcb = self.gcb if self.stage_with_gcb[i] else None + planes = 64 * 2**i + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + kernel_size=kernel_size, + dilation=dilation, + style=self.style, + with_cp=with_cp, + conv2_split=conv2_split, + toy_replace=_toy_replace, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb, + gen_attention=gen_attention, + gen_attention_blocks=stage_with_gen_attention[i]) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * 64 * 2**( + len(self.stage_blocks) - 1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self): + self.conv1 = build_conv_layer( + self.conv_cfg, + 3, + 64, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, 'layer{}'.format(i)) + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.dcn is not None: + for m in self.modules(): + if isinstance(m, Bottleneck) and hasattr( + m, 'conv2_offset'): + constant_init(m.conv2_offset, 0) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +from collections import OrderedDict + +def load_checkpoint(model, + filename, + strict=False, + logger=None): + + + checkpoint = torch.load(filename) + # get state_dict from checkpoint + if isinstance(checkpoint, OrderedDict): + state_dict = checkpoint + elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + raise RuntimeError( + 'No state_dict found in checkpoint file {}'.format(filename)) + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + # load state_dict + if hasattr(model, 'module'): + load_state_dict(model.module, state_dict, strict, logger) + else: + omit_name = None + if model.toy_replace is not None: + # layer3.1.conv2.weight + omit_name = 'layer' + str(model.toy_replace.get('stage')+1) + '.' + str(model.toy_replace.get('layer')) + '.conv2.weight' + load_state_dict(model, state_dict, strict, logger, omit_name=omit_name) + return checkpoint + + +def load_state_dict(module, state_dict, strict=False, logger=None, omit_name=None): + """Load state_dict to a module. + Args: + logger (:obj:`logging.Logger`, optional): Logger to log the error + message. If not specified, print function will be used. + """ + unexpected_keys = [] + own_state = module.state_dict() + state_dict_modify = state_dict.copy() + for name, param in state_dict.items(): + if isinstance(param, torch.nn.Parameter): + # backwards compatibility for serialized parameters + param = param.data + if 'conv2' in name and 'layer4.0.conv2_d2.weight' in own_state.keys(): + d1 = name.replace('conv2', 'conv2_d1') + d1_c = own_state[d1].size(0) + own_state[d1].copy_(param[:d1_c,:,:,:]) + state_dict_modify[d1] = param[:d1_c,:,:,:] + + d2 = name.replace('conv2', 'conv2_d2') + d2_c = own_state[d2].size(0) + own_state[d2].copy_(param[d1_c:d1_c+d2_c,:,:,:]) + state_dict_modify[d2] = param[d1_c:d1_c+d2_c,:,:,:] + + d3 = name.replace('conv2', 'conv2_d3') + own_state[d3].copy_(param[d1_c+d2_c:,:,:,:]) + state_dict_modify[d3] = param[d1_c+d2_c:,:,:,:] + else: + if name not in own_state: + unexpected_keys.append(name) + continue + try: + if name == omit_name: + print('{} is omitted.'.format(omit_name)) + else: + own_state[name].copy_(param) + except Exception: + raise RuntimeError( + 'While copying the parameter named {}, ' + 'whose dimensions in the model are {} and ' + 'whose dimensions in the checkpoint are {}.'.format( + name, own_state[name].size(), param.size())) + missing_keys = set(own_state.keys()) - set(state_dict_modify.keys()) + + err_msg = [] + if unexpected_keys: + err_msg.append('unexpected key in source state_dict: {}\n'.format( + ', '.join(unexpected_keys))) + if missing_keys: + err_msg.append('missing keys in source state_dict: {}\n'.format( + ', '.join(missing_keys))) + err_msg = '\n'.join(err_msg) + if err_msg: + if strict: + raise RuntimeError(err_msg) + elif logger is not None: + logger.warn(err_msg) + else: + print(err_msg) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/backbones/resnext.py b/CDARTS_detection/mmdet/models/backbones/resnext.py new file mode 100644 index 0000000..c0b42b2 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/resnext.py @@ -0,0 +1,223 @@ +import math + +import torch.nn as nn + +from mmdet.ops import DeformConv, ModulatedDeformConv +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet +from ..registry import BACKBONES +from ..utils import build_conv_layer, build_norm_layer + + +class Bottleneck(_Bottleneck): + + def __init__(self, inplanes, planes, groups=1, base_width=4, **kwargs): + """Bottleneck block for ResNeXt. + If style is "pytorch", the stride-two layer is the 3x3 conv layer, + if it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(inplanes, planes, **kwargs) + + if groups == 1: + width = self.planes + else: + width = math.floor(self.planes * (base_width / 64)) * groups + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, width, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.inplanes, + width, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + self.with_modulated_dcn = False + if self.with_dcn: + fallback_on_stride = self.dcn.get('fallback_on_stride', False) + self.with_modulated_dcn = self.dcn.get('modulated', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + groups = self.dcn.get('groups', 1) + deformable_groups = self.dcn.get('deformable_groups', 1) + if not self.with_modulated_dcn: + conv_op = DeformConv + offset_channels = 18 + else: + conv_op = ModulatedDeformConv + offset_channels = 27 + self.conv2_offset = nn.Conv2d( + width, + deformable_groups * offset_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation) + self.conv2 = conv_op( + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + deformable_groups=deformable_groups, + bias=False) + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + width, + self.planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +def make_res_layer(block, + inplanes, + planes, + blocks, + stride=1, + dilation=1, + groups=1, + base_width=4, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + gcb=None): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1], + ) + + layers = [] + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + dilation=dilation, + downsample=downsample, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + dilation=dilation, + groups=groups, + base_width=base_width, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + gcb=gcb)) + + return nn.Sequential(*layers) + + +@BACKBONES.register_module +class ResNeXt(ResNet): + """ResNeXt backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + num_stages (int): Resnet stages, normally 4. + groups (int): Group of resnext. + base_width (int): Base width of resnext. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_cfg (dict): dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, groups=1, base_width=4, **kwargs): + super(ResNeXt, self).__init__(**kwargs) + self.groups = groups + self.base_width = base_width + + self.inplanes = 64 + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + gcb = self.gcb if self.stage_with_gcb[i] else None + planes = 64 * 2**i + res_layer = make_res_layer( + self.block, + self.inplanes, + planes, + num_blocks, + stride=stride, + dilation=dilation, + groups=self.groups, + base_width=self.base_width, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + gcb=gcb) + self.inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() diff --git a/CDARTS_detection/mmdet/models/backbones/ssd_vgg.py b/CDARTS_detection/mmdet/models/backbones/ssd_vgg.py new file mode 100644 index 0000000..f7ba8a4 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/ssd_vgg.py @@ -0,0 +1,134 @@ +import logging + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (VGG, xavier_init, constant_init, kaiming_init, + normal_init) +from mmcv.runner import load_checkpoint + +from ..registry import BACKBONES + + +@BACKBONES.register_module +class SSDVGG(VGG): + extra_setting = { + 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), + 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), + } + + def __init__(self, + input_size, + depth, + with_last_pool=False, + ceil_mode=True, + out_indices=(3, 4), + out_feature_indices=(22, 34), + l2_norm_scale=20.): + super(SSDVGG, self).__init__( + depth, + with_last_pool=with_last_pool, + ceil_mode=ceil_mode, + out_indices=out_indices) + assert input_size in (300, 512) + self.input_size = input_size + + self.features.add_module( + str(len(self.features)), + nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) + self.features.add_module( + str(len(self.features)), + nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) + self.features.add_module( + str(len(self.features)), nn.ReLU(inplace=True)) + self.features.add_module( + str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) + self.features.add_module( + str(len(self.features)), nn.ReLU(inplace=True)) + self.out_feature_indices = out_feature_indices + + self.inplanes = 1024 + self.extra = self._make_extra_layers(self.extra_setting[input_size]) + self.l2_norm = L2Norm( + self.features[out_feature_indices[0] - 1].out_channels, + l2_norm_scale) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.features.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, nn.BatchNorm2d): + constant_init(m, 1) + elif isinstance(m, nn.Linear): + normal_init(m, std=0.01) + else: + raise TypeError('pretrained must be a str or None') + + for m in self.extra.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + constant_init(self.l2_norm, self.l2_norm.scale) + + def forward(self, x): + outs = [] + for i, layer in enumerate(self.features): + x = layer(x) + if i in self.out_feature_indices: + outs.append(x) + for i, layer in enumerate(self.extra): + x = F.relu(layer(x), inplace=True) + if i % 2 == 1: + outs.append(x) + outs[0] = self.l2_norm(outs[0]) + if len(outs) == 1: + return outs[0] + else: + return tuple(outs) + + def _make_extra_layers(self, outplanes): + layers = [] + kernel_sizes = (1, 3) + num_layers = 0 + outplane = None + for i in range(len(outplanes)): + if self.inplanes == 'S': + self.inplanes = outplane + continue + k = kernel_sizes[num_layers % 2] + if outplanes[i] == 'S': + outplane = outplanes[i + 1] + conv = nn.Conv2d( + self.inplanes, outplane, k, stride=2, padding=1) + else: + outplane = outplanes[i] + conv = nn.Conv2d( + self.inplanes, outplane, k, stride=1, padding=0) + layers.append(conv) + self.inplanes = outplanes[i] + num_layers += 1 + if self.input_size == 512: + layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1)) + + return nn.Sequential(*layers) + + +class L2Norm(nn.Module): + + def __init__(self, n_dims, scale=20., eps=1e-10): + super(L2Norm, self).__init__() + self.n_dims = n_dims + self.weight = nn.Parameter(torch.Tensor(self.n_dims)) + self.eps = eps + self.scale = scale + + def forward(self, x): + # normalization layer convert to FP32 in FP16 training + x_float = x.float() + norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps + return (self.weight[None, :, None, None].float().expand_as(x_float) * + x_float / norm).type_as(x) diff --git a/CDARTS_detection/mmdet/models/backbones/utils.py b/CDARTS_detection/mmdet/models/backbones/utils.py new file mode 100644 index 0000000..7724315 --- /dev/null +++ b/CDARTS_detection/mmdet/models/backbones/utils.py @@ -0,0 +1,100 @@ +import logging +import torch +from collections import OrderedDict + + +def load_checkpoint(model, + filename, + strict=False, + logger=None): + + + checkpoint = torch.load(filename) + # get state_dict from checkpoint + if isinstance(checkpoint, OrderedDict): + state_dict = checkpoint + elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + raise RuntimeError( + 'No state_dict found in checkpoint file {}'.format(filename)) + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + # load state_dict + if hasattr(model, 'module'): + load_state_dict(model.module, state_dict, strict, logger) + else: + load_state_dict(model, state_dict, strict, logger) + return checkpoint + + +def load_state_dict(module, state_dict, strict=False, logger=None): + """Load state_dict to a module. + Args: + logger (:obj:`logging.Logger`, optional): Logger to log the error + message. If not specified, print function will be used. + """ + unexpected_keys = [] + own_state = module.state_dict() + state_dict_modify = state_dict.copy() + for name, param in state_dict.items(): + ''' for mobilenet v2 + if 'features' in name: + name = name.replace('features.','features') + ''' + if isinstance(param, torch.nn.Parameter): + # backwards compatibility for serialized parameters + param = param.data + if 'conv2' in name and 'layer4.0.conv2_d2.weight' in own_state.keys(): + d1 = name.replace('conv2', 'conv2_d1') + d1_c = own_state[d1].size(0) + own_state[d1].copy_(param[:d1_c,:,:,:]) + state_dict_modify[d1] = param[:d1_c,:,:,:] + + d2 = name.replace('conv2', 'conv2_d2') + d2_c = own_state[d2].size(0) + own_state[d2].copy_(param[d1_c:d1_c+d2_c,:,:,:]) + state_dict_modify[d2] = param[d1_c:d1_c+d2_c,:,:,:] + + d3 = name.replace('conv2', 'conv2_d3') + own_state[d3].copy_(param[d1_c+d2_c:,:,:,:]) + state_dict_modify[d3] = param[d1_c+d2_c:,:,:,:] + else: + if name not in own_state: + unexpected_keys.append(name) + continue + try: + own_state[name].copy_(param) + except Exception: + raise RuntimeError( + 'While copying the parameter named {}, ' + 'whose dimensions in the model are {} and ' + 'whose dimensions in the checkpoint are {}.'.format( + name, own_state[name].size(), param.size())) + missing_keys = set(own_state.keys()) - set(state_dict_modify.keys()) + ''' + if 'layer4.0.conv2_d2.weight' in own_state.keys(): + missing_keys = set(own_state.keys()) - set(state_dict_modify.keys()) + else: + # for mobilenetv2 + own_state_set = [] + for name in set(own_state.keys()): + own_state_set.append(name.replace('features','features.')) + missing_keys = set(own_state_set) - set(state_dict.keys()) + ''' + err_msg = [] + if unexpected_keys: + err_msg.append('unexpected key in source state_dict: {}\n'.format( + ', '.join(unexpected_keys))) + if missing_keys: + err_msg.append('missing keys in source state_dict: {}\n'.format( + ', '.join(missing_keys))) + err_msg = '\n'.join(err_msg) + if err_msg: + if strict: + raise RuntimeError(err_msg) + elif logger is not None: + logger.warn(err_msg) + else: + print(err_msg) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/bbox_heads/__init__.py b/CDARTS_detection/mmdet/models/bbox_heads/__init__.py new file mode 100644 index 0000000..a668bdb --- /dev/null +++ b/CDARTS_detection/mmdet/models/bbox_heads/__init__.py @@ -0,0 +1,7 @@ +from .bbox_head import BBoxHead +from .convfc_bbox_head import ConvFCBBoxHead, SharedFCBBoxHead +from .double_bbox_head import DoubleConvFCBBoxHead + +__all__ = [ + 'BBoxHead', 'ConvFCBBoxHead', 'SharedFCBBoxHead', 'DoubleConvFCBBoxHead' +] diff --git a/CDARTS_detection/mmdet/models/bbox_heads/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/bbox_heads/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80b0e28220e3f8df74fb4b1088f36e40ee8599b2 GIT binary patch literal 385 zcmY*VyH3L}6t(j%YEcG$A`6yaVL*Udgir^TZeA=WHcccuuHpo>pTsYDW#R)^I&mFD z3YPBC@ww-oYx#PyIJ-aA{0kxU0X3Zn*aMWk0T4)Fg*2uZdmgAjhcwg?jmCSZVx7(#A1=8RMZ=a&ab#*K(Trm&jA-hIk`swxNU4&HC8Mxt%q)f& zVAPEvxf_fR9@;)_RdR8aee%^#RqnauluFrEIk|G`Tbx|B%7^5tN>%c`#_a6QT23Vn zrtt8((Rkhc`n~VH?)gf)?ce@%FIxSQrv0mS>axLq1Go4-2-BFJX}$WX4~?FoYRsWA zw0bspBXdTLUPGrAX6BB(o;UJ){;1h&j#|CesNHMhoyF{IWwhE`RlJjRMr*w_#W%9` z(ME4W*OHCl<~tg5nfFX%USbT-&CHqJ+hY1dt?M6QEZV)UF70p}rMzoPFPfA^*dM?8 zRoUF#EuMyD#0N9|$nZZo_FiqyLM*SODm<*=F zJa9&0F(0j(taWIDZ-Z;Hl|!@N{DIcj*(&Qi^Ptc*w*E}(IZ0!tu?@EQOoIwsc8+bK z)=PZ$MYesYvoEpd*rjJW-ZfEoMb*8?o@bX;T?=(rRb9Y7uOzfl(^WNJU@xeel^?{e+7E zstuGj(|#I9Wm@DxlrulT#ghR~grF)s$4OkKkCSZw?gvK==?@D%r9=s&YK)d@^)VGV!Zzz*m>iBzuh&ZRql@y*_?*1&<%Dwkqr4PF%UqeUI+D%|08(`zsyyG^W=KL1F22*OKUTHI9Xv{Fm$`BVTQ(P>~ z@9GumL#r}C?aBmo%0^|)sMf8tq097bO+AfmZ4+GRRW_(!IiSs|0otn2GHh2K=n5kl zt7WJ1m5eoTO~tK4ia*7-LyP~Ugj0}I_2yjelgy#_bxpmu`QPLH7V4ZCjq5z#Is8|Z z$N!00xx=i=+0>@3d7GAMbAi#=wwZ>Nx9@SRgmy&a?vVxBy(n!l87G{RhNK&YX`Ys0 zC|B+Xk#Jh%w>dAkY?D=l(c?ilE{aULsu*Wt&XEC-f5|p4(y*UICD!R6>75iKol z=3%@S<#~cHoB3oEDuYUdY}L*jj*=+Ht9eP3OvrTzuiJ&uATI=bTDAxV!X%G&vxIR^ zja}IT4arlS!UH4ErFZA?;64U@gTdR6C*Zu7=1Ih*(~lurs3-dA0Zeu$gD@QpM7J$1 zkz{@T9K`cWM4l&dnaJme1Vlbh|vzU}2OGhE(0$sdtK8f(B+NXbf zeXkfLH$d~j{v^t;$NMZ2*TuLH*GD6kl;lE1Uh~&S1t3mbSD;5edWzi{@AGZ+wvJoe z0MR-tdP{fI&)2u+oGKf>?i<8)z&ZK~ai9ll$8fN7y!cpYJhX;3)0r_db@&nLVEd?RFnwmgn_$QM&lhby)z(~U>m9dM_G#7jIrz)XzkTOl z4xZf}{O3>qMyaR+ z3J96$Q_!J-+k{edX3h-MkgMMSwW&pp2s?#4!@uJx75s~{#1G}^-#nGPo$@;Y0Tk1XH?Pv&_9Vddk0c# zZNO-JXk!zd zVm+))01oNH*+SAnBD#h&c3zPtg4Y>seCprIQc=dmX#6@YuAmMa?cl|Q8jym_Gm)KJ zSrJE>cpc2~H^0-sasb}(z&IHt{|1reb>eqWr>0pC>38X=O)40a|I5xkK!@}B|9?8m zqFta_*Z7}j_Tfoq%Iwj8fwWz_#@YaFIQieHS50N?y^e^G*rfRY=qr#1A}TEAsn~iH zy%iqkd06s@B2#}Ll}5%$K?;GD?6qkr7DY~`&EEo%%|%c`@LDCeqKZ0fowXt9pV-KK zd@Lw}XvPY#mwp}HSo&FTwu6n&urK}x_>A8If%Ur4c%1ErWpOWdNgOUht_E)9fIkOQ zYBO!>R!CYOdHjBz0O;2en8f zTxQ=wdZZ-%Lj^2*WWo6e;J;HDLl3vF@Qo4^e_NY2^I)moW6sR1eDr{~eWXm&CcbV~ zO}I8{;NC>-TFPPsF8Xxg;Ql-?U{1J@5gO8`P~MN?63GcN1U69tGb|n>mq}To;;=N4 z-ShMKM46BZ%l!9I?6#!E(zul7D0(Vw$cXo(DJCOn_p>4@5o5|^RL63HwA$RhM%>ql zP<-#U>VUFX6zSA9CEMybPfd~9D5Ra6#=JBSIQ)GG!gokofjsX~!6lbWa|YWx#wQry zZ=$qCAMN4`u>+!cj_&H0bZcQrEm)R|pQB%ZrLC(s6#HFd2M6tib&-@M19Bit8PK;8 z`%5f&iCE4r1A~p3UYaE`;u(V0a}=^_bGTXBFdpoRm~m*RVCKR=$mIHDKG-H|-=nvn zwD6Qm4+-NvWYhN4%d_8XGxTA&c(0`VvhwCKu-_Ue8N(*BX&CZ0V$21N*@u*E9JkV$ zv((C5YUTWu&aYrzb_uiQzlOQn&!eyWwX!vAS2i4DrSgWW6tVE^;9f(}*0HZ^z<}-N zw7L$CLvlUkJPnM~h1{<}no9w@Tqmw-3^#CZ;yyRCKQg91jNY%B)I#MuFl&nF@MaTv zqRP!VIWx@L9HluYusEV(oaRIGMzY)7DXFQ(TP($kY<+TQok!3qUe_} za842W1R|v!XGNY!2f6oX9CF&2VdKn;QHYFuyf2NikPb^8r*XnhY@U!sdMbGc%RMel z?5SlF;zMj7C}EyEgtV{)5GPnAJ-iFWUW5%?OCUaCdtSS31JF!mX%g}}N;ER@I`YxB z{x~qzeC=IlqM%$MY&~iE45Jj3?`fWobmgMq>KFBvaRGLJMZcmF8Q)lkKWrffc2R~u z7zgJU{;(_!hXr)dK>jCU@(j8*a4)>%-%x=4;s+bAZ4;n7M54#6Lw)L04(%*USk6p; zWQbQv>%00un>L>Qtb_$p?F{+R$3LFBr9-LTX-Sp)2)>5BAo+qvasatH>>N3F?MtvH zqw)ZzEqE0@VZoJ)=L&$Fo~z2wnpLZ6S1U7X+F^FJdO@46RjWhfPO$RzYOPvl@azgU z|2+eM+W;(gs`YJTZMyc!8-IoxK(G^CN1pK@>0{p%)Jqhw2}gttRP+S1A=EcY$j;JNwE_LML3EcJpDg@u``~6bz=_Cw z3O8SRL2YHvNKs%;DHi5VUiz(C0UtvHce7}K;pd1$I81>W(|(Xd@m_#$QaV3TT3(s} zO&jx#f zh_rc>50V<0u+hu0u_*v&Xqy})vV&9$(1GNAzur`)t3!R*JHdyr zr*Mu0hP~#=x`&)o&nm1xN|JHUBdQ|&Stx6BKv0GgHRnV0?<&NyAymUz{8f`j1ZPs5F_v7!$CGV-R(yLvAi@rCYT>lJ4>BIVA)&KB%;Wobim` z@$aDSpK%N7Uuy>{D+rK3uOo9N|GEf1Fb=j)`Pec5VRO3J26Xj%OGV=@{>4!6gNNF@ zb?BTmk46d3S^Y4?>D~lMQ7Bts_{k*7=5HEd$ci`&`JZ4=>Wq(nl{l3T{vmOHM1(RC z{w*RCBHt%M6O!%ud1IVp8Kf)@4=@VaF>pHmQm4LcFybbNYg*>%ttgHQ7uyL3+=a`o zyW+X)uG_lkHZ;BUx%IA+&^=#6wW~H#J9*Augt*?uadD?V$<_8O%8+O-cuw1w-qxa3 ztq8F*Dbq~U$H~4r4BepEQHR1`CkyBO`CKPCnz)C=X5kB uSL-5@)w(0;ystE`rf{O4nj`H<_g3BVb-+k{nnZZpLdJN>0M%ROivHh>%i0M5 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/bbox_heads/__pycache__/convfc_bbox_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/bbox_heads/__pycache__/convfc_bbox_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4baf5f1e0208f63188151f71e08bf4c1967d7b78 GIT binary patch literal 7729 zcmb7JOLH98b?*1{^gJ*afZ#(kJ*Fg^V@rTdD@yEABt9gXaU{ec<R_iX3x~M z&h4)ST+QX31@aOW-a=P2nzu`hBh2dX6<|o6mL7uAaNMZjBG`MnU-8C>ZwL z_iny>A2r_Xas23madFk2Oo{*;t32H||BrAUp_0w?^asZhtm}g%N52aJ(IM!m0Vx$n9*=WpXI_cT@UT8I6>3m*?^B#UV(}s!I6Y-MFr@$l~8n5>6`AbHNM>l%4w5`^I(-d0tV zpL1Wkt=vuyCXrGuO1bO%gLshoe(nuozrPd2aWu+n;T~ zitAzSh2zI@66{SzQSRN3_79>s9R#DiasNr&zZInYoz1b@%dI||=aoIVMhT`#262)G zaX-pe{bUmL2U}14Fr4_?lLJ3K*y~!kl|-X0wS@Jl78j}S;k&B7-ya1@;`@KE{rcX< z&Ui1n4jOMiISAs7{*y3BHj>FW+1T3)qcrNLgK=EsH!ycJN;V!o93RTPO*Xiut-e3! ztWTb(CZ^)8OI`!f^rr6M=jpD#s#|&+@0Q*&J>*yQx_$|1Rd1S2ypPsSUE}rHmiBoh ze7NsCklRSfmqBo{xE)jd7 ze7`s_0S65e1uR0{0*h5OV2d-YfESx?H^M^Hj zWM^i;5jp}Lkl?3PKnI}m6m(Sb$_KXuH00j*<@+!O&u@+#ho6fa8Kv9KDxePV!d%sk z-OLX43tHx^0)#JV$JMklbdjp4cOmOKAPo4CRe&DkdzlM)uMBIU6IPz-Q=Fk+pZ9Sy zjE7mOnS(y==k#fu=tDGkaUP-(@veFJOWczsL4}_kduc24(6>4@KGD@R?;^V$`l3~EpG{NcRrT!>k9 zZC<_*mLrvjB7C%FqUhcg`@wjz*5d#>8E!qu=A)}QJ3=hR#9g=iX#<_I#gyInq{wb9uLx;4lb#R z!VYhECy2w&R=+a}ow)(SdG4Ns?!Y|i_R=;!jk1(Q&wim;*P!5l*^cQeZ)G^Ot8-f6FHbrap zX+iLz;&hN$POjn_FYL)|DQo>y7n{;JuTlGE7LtwRoQHk4YbV!J^JC}-u^ zHfdk(8p)>A;i4xy(!x+-2Ky2b{870k6WZfj0RC z@?WZJWv*Isk1`y@kmFosTGhS<{E%^Li+*UqkxsT?d8O1-|APE%7UoxmFPCd8a-H;* z;j3bSa|dWITUf=f!2JsHoco$+%`S5GCx*i6hhN3p3a!|MPDVYne!Fbn$=cHTYhqE` zn{u(Bjuh=dMRe)DR%oBM$Pn$*yqP4Cq7=QS)L7*%B@cFLQs!_;^A?O+zqs^1g)zyS zCpfsxd4pwiDywU=;=B&SoUCJ!b#)oeFH}dq%#wdn1LjMxaZfQ^Y17}@&Rr@spSk+O zahi@Ie`}zio-6n^n)7t>=oIF)bo|6uktpPR#fLv3OThyC0w@Hq^ZKbT^vV)e*;^^| zrxtSYw+!T8Cwf7n&Ud21EXSxb1M5Ha!@*u&UF=7t+>2hFnR$zboI~(v+na;zugF%uWh_>7|I-=tpI4wn@ZSdmH(;dqan}F}jOvW0@l8g)V zAdIHriaXS#jy-?-lvbJ>4tM$lTs{;WCb1m;76_f0$4&~jXQ~~;X$KfkXj_N>ox(bp z>Tq}e1BN)vZCDIn&zzWIY90d^Lu0q9KBZATv=~d68ffvUlv zKzB}!Re)xVV@S`FJ(|Vr`<(~0p%#1LiAv#K{3HWS-au`=gvz_fHw0quQZ)>l-_nkq z!*659U~OT?)@M70-eI|COUE@B?lyM1MTG&U0c~}_^!`1WdFj}Jf`O{pGGSjV9esE+ z3}-gOSe53$!%^>;?<#eb*{|}i7wfY*STUB4BdpmoCe#`bGn7O{gAPq z+*A-PW1z$u%WHy51(1pP+cg$eu=o&NyD)#nHlB-)2nu}S8b)3SRKZ;q>CU|(fc5?$ zf#D9zW;+rY%+2j6&7EK}iQ=&ECwODe3{uFfNc0c5_yv%_wh9V4ri69t0ssZp7qEB| z>q`!HrU1+8ASpFMA?^=3cKuX8eUP_gQ%?Ff-eRD<30OK&w=h@>$NPI2@}H2Bdmx%; zL1^Lp8*R7(EyOVD5Nt~~@UtMsmyj&{nsXjokkir~_B0(p0Q)l*vIgKdx_VadUsMcz z`hI+9oG~CO5b%%400OKM0_*|;)b|eGm;=ByTurf+)Ou!>08pX$rQ)v-;c1G{T`5Iq zHkMn6kot$iTXWHPb3WEz)503Uw=bxSTLG}R7?%P{$f%Sf4`t-au?2e!mYIl%arD~% z`|5YAg^C&Cqtz~n;Qt!J39@ zgX+YQz|J4K7{85MPA|Y>OPK|`m+8mo2_?f-c=J`*sJHijj(WzTwa~;$KSc{m{WGYY zNj>Tuq+%``bJ;Ri{nT7&DP#TX1MTnv`ZiG8lrMF_cRtbf{|B=;vss|nZg3vF*JUPY zg?b=GITK6(td}-^6JxE;zX~m7M1B9RNc9rNP-xZSYGdpp%n)t33@&%AouR#OU8qMl zGe!0hU+2{;;s-pi=l_dG9g#-@15j+gKP6+6geR&*XSj1S)WW5EjOaFFJaSd(n5qv! zaub$ZadZESJh{F%nEQz2Cdprd08bXt5JrZ3_0bqHaRkz$xc?+Kw)#E$kmf31unY@p zI_V=&kXI$#JwZ_5EM~lnkyd$&`4O7o;Re^C3JCbH2=Qi&s11%~pa#DMk9VDxV9{n3 z*5Voh;%DyuECACic2TzSw-G7|^@k*HgKXjvN4{&^+3b2}0ycTY_rq}?k!N~hiuz6w z`xHe|qC%e+$e|0z4Nb0rxUOXax<2FAyp5>R@4GM6UBzB~r1>+`a0yMDd9bGlX^Aiu0Sjan7)vqb-vpul3p2C-&S3CD99DyW z7RV)(t4EyzMguQ3CE+e1>c#8RtBQw7U$j?YmWGKap&znkz?NkY|FlPOlXpZ6d)>3} z$H&F9A2hfBF|`~pzfC>mfcBa5D~|1RVr^XYJDku!#x>+C@b8tIj^UcF$8lla6_=Wh zqcYJhcX8kdEaKp!MRfOz9;KuTvO_g-+&`$d`(7pd<<< pAZK6fDw@5`uTB_h_6kuk>Sp`Z_69`cue6}4{~wzu{-*!{ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/bbox_heads/__pycache__/double_bbox_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/bbox_heads/__pycache__/double_bbox_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f88be921213ff8f0ae6521b734993603fc5bc8 GIT binary patch literal 4624 zcmbVQ-EZ5-5$BQ=MbWb3_?(Z+{5yC$`7pq+(VI~X#a|P>EF;lq3Gkj9s1R_g;%2PUd=zKlHg&8AA+BfrR8{Q| zOdMt@tv}>xw=1N`%djhpVOaLXY*20*(B2khxE&`u+eI#Bx5SQ!amH=#jGeIy=c$ja$Tf1Cq}(#{nvvILawD_2_u9i~udEMza!t0tFd`Z9tWr zBwYb%xz&idK)wg*GZ>(bL(bwvv;tjIqg^3oKwA9BL}3)kIyGy??W)IBy=az@a(HuE z?rS^GwfjUo{L!9ye zUNhzFu?^0KAK4eILkE5EBuGHv2ds^Qx&RIGB9ZrrTn2&kLp(Iw)BfD-4lF7^q9KNS zp9a(sHZNnze~E@7pRkx~2Q0wt<32d|=2oVoTRxq49uGwn=^%<=R^Tf5W)wXKyiI%R zQN)V`VwWP+m{opEZ(W?8U?Q>%9y4E4i^&lEl9p!Nid;*Pllu*~X0!95ktPt#FY2XB zJp*|YuT6jfv4PvbYT!078n{h(O}K5+PsghgPN&Wn`4Vv4=V$mbo(+TN0gQZBuiiIa zvIPu!xHZcf@hyUNcv4876pobOgr(KcE0kBlCs(%G?KyxF48Hr?xNQVhz zLO;ck3a{KgWz=b!e}kD)^vq~kq%&kb?Iov8fA^9|@pMVugr|RJFdEJc%Ur! zyOo^qbc}gX9T;mx{@5i<9oImEF`?~3*@T`&-2Q~dErB~Tf$nG-+*!k|fO}8=g}b@W zF@LbSY90FJxxsnx{M^SJ8E1yNS|g z4FsRWRDGtKL?;EuAZwybevfoFP_jg(TG7i9@`}+LZOb@M`g*}Aoi>(CH*`+hjbnBN zPB_KvxR)2IOcNAxv`E5nq2wicmG?N!MJ%<4iI`xXhzR)sZPcJz!=RcZSeDm_P>m*k zN@Sj>w6_C5>nXiJd*kVlbj^UpAdNv6vfdNiiU7&-n{0&ztcjm@{Bt~DJD|{BWy|&o zJNWQ47&wM^Iyt&1GZQKqc;5t50?;ScA=|r7E;|5x-?x7IF@gHPE#Z0tINreH>=L;! z3IwEk`?WiPOFv)z0!aZF=7KPd*@GeAk56B49b9f;j@d$CWETq#?IJW?4L#8WnLYA3?5{5FsaOrMHbt$^7@Y z6{SyOWakpQ$PRw+ZsX2lb#7W4Q|G3Aj7CY`X^Pp`1i#dx0@kM1-X~}|19xJAH2GQn$l4v@BQbifB*CI?O)x| z^--Rp&={hl$>D=C-8LmX>`04}cZl33asx!ytLj7PAXRCuN+6Rker#+gDH>b$v>Y0~ zWh&jI@5tz-NPGQK{SiYeo3SOfWG@2^4n8{7W+!JQ^ai>pU6Df?XZ$l5*u*mBo3lr$ zfug>7=pyBq0`B<=VjDJ;SIKs>q7Lrq3~C(ZA9p6UDfm3o2aBSQP5WH*d3%>Jvkt8* zhXysq^|3dh!qI!)H@X|tK150y*Sa>WUTYhNG@lJe|HUPrVwtT#{tOREm1vbv{8>8E zF21i{qCCfU#j(i^7@K3jk(fHOwg)Iz8V>{9Xd(M(*U*Nkt#?trblg4UB5mVtV0fgG zLW;Le#1x2s#Yz-av=+b@plkz(0l)jq69j=wKGjM6o@Kk}gYu&S&{ z6{jSB(_V$u)koNwY99U(sbZok6y}?%wntmi{>^H{ZHAVj@4x^#j`AOV&dSXt_CHc4 BbX@=d literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__init__.py b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33c6bbe393da5b2a60e276e0bd7d63a41e1f75fe GIT binary patch literal 167 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnuMqu={M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*waDq~;XsCne=q#Al=?rWETZmX_p$ gnfmeZnR%Hd@$q^EmA5!-fGSIKQtd#t6$3E?0AJB8@c;k- literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/build_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/build_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d8dffb0167da47e2d23a930f751c148ae0e970b GIT binary patch literal 582 zcmYjO&2rN)5Y~@ll0f(?2adjhFZB%Em=1+dW++T^Xc!K;WNb-JoLH7CD-3R(OT(G> z;8l18T{-azJ+V@H=+5kFzt!&Q+tusQXn1_HO#cvs{-Qeq7`+2+o`d0t;}S_c!x(YG z=}$BxJeZ=m_YcE$jP3oiEGrdTe-bHYF6M;Z>IXQRb1)tBSr7E*4z17*ZE=TJn3Eme zk`B7AiGFFwHNKbt3IM$^G)Eo&g{Q~_ytnFgkh{azxC%JLoW|khi}9u?d9;L=BdNG3 zqg*Q)F;(ToBH{%zhrZ-80jje#Pz) znb5;!BJGJA-1q}r_zn9zd;<7;S zJ5h&u+sN-8BGzHuE5y36*IR1-2E!+)-@AtU(C``q?_;Tb{q#iIF{fwyG}^zF4y1RZ zJD_~8ReyME0MbFL`v|1ICduQWG*9Mv2s1exau=M3>YmlmZv^oWgb4YVA;{5shQ^p_ zOuuq`P33ikeEk{Hk#9hxw)BoiqqGRmo#f zg=@4V30WeHYLKZ`6X1HS0XJ$rF&SoLf|rCj+X!@4qtlnl)^7J3*=b}5X-O7F_+DGs3uobirjzuljoOAg%-X8m+Nlj@|6(jL z#AVjb4gx>!!aD!?6kR+7`#Vbn9(ER9b+`85zI#i;pVwZp$uxE5{P`(>1B&7fWv7@` zG@J%`9;Mn7HzBJcj#LA zB+j5T4y7SvCn)^`y`Y0>kwtsJ^T~V`MRJLA20C&pWCPK9vB)Ezz`nxF@F;* z`x;m8QQ`}!tJd*%Vb!wtl-?Q_9O}&WU?J2G3pF8LZmtJ(W7F4$yayJ;zNtW!2U!H5 zaw%tNt-u9mywS>7}(NTs_b<1c-0)u5sjElk~~-y9tueS7rX7bnttyh7$4 z)(UEW+*acgL#6VdNySw))d*gF Mrmh17Tw;^|0a5|12mk;8 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/mbblock_ops.cpython-36.pyc b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/__pycache__/mbblock_ops.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86084014cbc5ecfea89667736f33bf4f5d80a65a GIT binary patch literal 8857 zcmd5?TXWmS6~>JK2vQfzvYj}Ni48(;bQY6SM zK*v_8m(bI+5BdSkWTwxV=`ZMGJJTPrPo3$LUit&_)bpJMC{ki(bjd@Z0d{eAv3vHM z?_74N8&gxk+uv?R|9sXk{$q?>7pVSIy39y!iFG z)#`miJS8qYHpC@b!PLNBohHiD;xbV#=adM5cRSSR~OoAYpqUwE9`XBYu!i6zuk)3Ya+Tcg+6P-D-gN6nN07s~tK?p@4QD@m6q(!;~Y z17mV8UY~wcLBs5jCG#@I2F7GIo}kJ&8^R_=oL!$7DJ@HUE@$Ym2X)mf<%>ouf_!qm zRSJPix18DRWWEIc)l1+0vN9Q@mOkQ=xZ-YA$1T^D5+-b&nE>Ld@28O$!7 zYVO=PVPeOe_vwlAA|c>4W8l#>a|j(-H5R;&}jrQ9^`5@UIJQ3s`I7c7^@{@rxMPuj=U5XN(_QK)oE>J_Kktnv<8NWy@V0k!qBR4R4MIs zW647(KMWg5BMU=Lr76jE(zvy*UrbdF!+I-9Q)U>mB)dB*jY@kp*CG z&lL0LZ8aC`N}_fghAId{+Q{@;s8_@AvtHEFEfNcnw=m6AzRr5wY87k6g2^@)(TUd$ zkv~s2uiH1Pj#dIuh8IhO-crb?(4=r#mzJ;}mr0k{LyzjxQ`1X&1Glo-TVHR*?3oWq z@MyY~3pBBO3lFSLxf`<6>!y?^L!9z9J&HArD-_@DEA+xRM~C$aUyJvf$fq)|Fn z*s|nw-&`TYHO+zXsk3A4*gMWnsc)nGqOs#ri$yzIdh5G=r(YVBcD%mVq&*;k+=Orj zbO13hn2MgXC8J&r-U%;m9=EVb(j*3M`KRjD4~5pb>wrJi>Xqm&Ga|W6T;G zy-qK!`C1k?(b-&9=8`hsQ|2eiu6H+-Oj8#3{fZF#4D@e5Z0iloGH&(sQ3U44gH$!TW)$Wv%kQ;)P91LIBy7t z6kF++g+=PWB}k!}(<~LL>sLt8UOy1-u1{{rr8cigwWixF)A^Ap{9T(|6y5`2Y?b97 zxkn|Zlm`~I>ao9~mg<1|hEvHR(E^(9@7Z*}{G_o{?N{j_p(3ig1TJcy8>J860H^vs z1N7fC3Y|NNsa-PxH+sZbc8@d5@;zF$a*|G>?T|tC5n&5?CE+O&qLFOyu2VT{jVRTi z^VXvzB%lrYGx~Hw2AF7>Bx4} z3YGhL+}PO6V479|<^%9Z%D9?qGf7VoK58DE6;^^H(D$dH(u+LX2+IDq}L z7**MdB^^WYxfF{_@6Gibz#3;<5_ty;P5vCS2RuV7dKAmap6PAsm0qO>LQU12wS2O_ zOJsXhbB^j&)2BaB18e&%7O!);!#)e3#=S&$irkI8kU21S;Ey;a^{s(^FJhDY4fWG1 zfL+w=ecvJPqY0Ly9~>hPP4Gzy*}zqhgP0p(s&Eb^ZG(DbM6uI23jpE{-G#Hbz*b%- z%J9OvaUCPYP}a)3^&+65#GV4caJ~e{`6RpqB~C~;mV zKf>#ocSO|98V}+;Ss)wnka57AKSl?;9X(WbBgvG#UcZjVV$C^-^fl@^8K=LamJ}cX zPJz^`=5}S}-N`tF${B|M%mEw{26wE?YT7^}W7E+!qLEu=T8>Xw3i@8czeq$Vji*hj zDKBkov^x!PB*gXe2TGwAF@?Xf^u49N`$_#GhJ9ALlZ9L zFf_O9^fJXObIX>uGg}|6)4?;HtRl0fSm5rK0o)`2bUDJ%Zy}U!cq8e?y%9Iy%HPvO z+R0%`hNKKp<876v0SP`eAlYZLF98$lTFRHWOJndxXGR(OX`e+ESS$2Qh zXNsQ%4#Nx(8^R2@EMTV7Zj)VGkr*Nwn&itKU`~u;P?kTO>vW)iRN5Ff2ZgWoiAjnJ zEx5*W8;^4v?1hj=BgYxdqkVavzR+F)9M=)CaiJUnWSL&Z0g?m4qBNxhztksxMZSpv z!EL3B20Rx5g&-tK)b8c&B?gJRtf4`v-aabeqxJ!z;~hIJj^wV!2Q$q1Amig@`bKXQ z7i;B7;85lGVK+)r`~-nj>G_cLYS>ML%^#?F2pfu@2uZW~dCy_gz>0>b0apcT@*p|p zek=)=Ws@F9ax-60$6;g)Xx~<_Q2;7;>R{l6i8DmHjcGKvraeZ*n8%q6E}y}L%^ASu z3U3TtWj@i*gEpfJh{6T)#Le#KUsM@K7a<2xk>g??m@4&4^mGYDM(9ziGf&BL=Cf0( z5WaZRPd`Kjl&(6v0re_#YlRc(5@C=Po77f6J;g zPleKMO1f!eof6yfACxTP_eOsJ>I|d0tX*FYZEpnH{EN#Zd;{7aZG80?T;IG+QF4t4 zwONgI?49<5Gyi(0+|4KrDT8G!GmM9Jz@U<^;3I?#Wl=Y5iYTt>U2?5D$wK57zSy@7 z=9Ncop#g4CRoe|Q%)2MY@;_6*Vb1BA#h-6Yll9=9=Zq6s=k{#=Qr75}7F^Qkb=~pR3*D4d)4s8yNZ% zrp}G~y#Afd)MNfX73Wlm_H*>ZxRrEODeK7krt-PCmLxhKJzKn9Tkj?OvwPH1!G4_+ zI#szFW%bQv+J#ZRI>rA&P!Lr4J`Q)#CO;*qDEn6*t|<2dKJ4PNDSZ&cHpRIy=OY|+ z*c#!&4g`JrAzvSs-7z8OpV0%`tH7+<4sE*v2X9CDn|9{PbF*`o=g!Pc%}xIgC*1FC literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/bbox_heads/auto_head/build_head.py b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/build_head.py new file mode 100644 index 0000000..a50447a --- /dev/null +++ b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/build_head.py @@ -0,0 +1,22 @@ +# -------------------------------------------------------- +# Copyright (c) 2019 Jianyuan Guo (guojianyuan1@huawei.com) +# -------------------------------------------------------- + +# from .darts_head_search import DartsHead +from .mbblock_head_search import MbblockHead + + +def build_search_head(cfg): + """Build head model from config dict. + """ + if cfg is not None: + cfg_ = cfg.copy() + head_type = cfg_.pop('type') + if head_type == 'DARTS': + raise NotImplementedError + elif head_type == 'MBBlock': + return MbblockHead(**cfg_) + else: + raise KeyError('Invalid head type {}'.fromat(head_type)) + else: + return None \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_head_search.py b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_head_search.py new file mode 100644 index 0000000..f0981e4 --- /dev/null +++ b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_head_search.py @@ -0,0 +1,54 @@ +# -------------------------------------------------------- +# Copyright (c) 2019 Jianyuan Guo (guojianyuan1@huawei.com) +# -------------------------------------------------------- + +import torch +import torch.nn as nn +import torch.nn.functional as F +from .mbblock_ops import OPS + +PRIMITIVES = [ + 'ir_k3_e3', + 'ir_k3_e6', + 'ir_k3_e6_r2', + 'ir_k5_e3', + 'ir_k5_e6', + 'ir_k7_e6' +] + +norm_cfg_ = { + 'BN': nn.BatchNorm2d, + 'SyncBN': nn.SyncBatchNorm, + 'GN': nn.GroupNorm, +} +norm_layer = norm_cfg_['BN'] + +class MbblockHead(nn.Module): + def __init__(self, latency=None, gamma=0.02, genotype=None, **kwargs): + super(MbblockHead, self).__init__() + self.latency = latency + self.gamma = gamma + self.genotype = genotype + self.last_dim = kwargs.get('out_channels', [256])[-1] + self.strides = kwargs.get('strides') + self.out_channels = kwargs.get('out_channels') + bn_type = kwargs.get('bn_type', 'BN') + + self.cells = nn.ModuleList() + input_size = 7 + _in_channel = self.last_dim # usually the same as input channel in detector + + for _genotype, _stride, _out_channel in zip(genotype, self.strides, self.out_channels): + self.cells.append(OPS[_genotype](input_size, _in_channel, _out_channel, _stride, bn=bn_type)) + input_size = input_size // _stride + _in_channel = _out_channel + + for m in self.modules(): + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + def forward(self, x): + for cell in self.cells: + x = cell(x) + + return x, None \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_ops.py b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_ops.py new file mode 100644 index 0000000..c2c2ae4 --- /dev/null +++ b/CDARTS_detection/mmdet/models/bbox_heads/auto_head/mbblock_ops.py @@ -0,0 +1,169 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +norm_cfg_ = { + 'BN': nn.BatchNorm2d, + 'SyncBN': nn.SyncBatchNorm, + 'GN': nn.GroupNorm, +} + +OPS = { + 'skip': lambda input_size, in_channels, out_channels, stride, bn='BN': Identity(input_size, in_channels, out_channels, stride), + 'ir_k3_e1': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 3, bn=bn), + 'ir_k3_e1_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 3, dilation=2, bn=bn), + 'ir_k3_e3': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 3, stride, 3, bn=bn), + 'ir_k3_e6': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 3, bn=bn), + 'ir_k3_e6_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 3, dilation=2, bn=bn), + 'ir_k3_s2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 3, 2, bn=bn), + 'ir_k5_e1': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 5, bn=bn), + 'ir_k5_e1_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 5, dilation=2, bn=bn), + 'ir_k5_e3': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 3, stride, 5, bn=bn), + 'ir_k5_e6': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 5, bn=bn), + 'ir_k5_e6_r2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 5, dilation=2, bn=bn), + 'ir_k5_s2': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 1, stride, 5, 2, bn=bn), + 'ir_k7_e3': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 3, stride, 7, bn=bn), + 'ir_k7_e6': lambda input_size, in_channels, out_channels, stride, bn='BN': MBBlock(input_size, in_channels, out_channels, 6, stride, 7, bn=bn), + 'sep_k3' : lambda input_size, in_channels, out_channels, stride, bn='BN': SepConv(input_size, in_channels, out_channels, 1, stride, 3), + 'sep_k5' : lambda input_size, in_channels, out_channels, stride, bn='BN': SepConv(input_size, in_channels, out_channels, 1, stride, 5), + 'conv1' : lambda input_size, in_channels, out_channels, stride, bn='BN': ConvBNReLU(input_size, in_channels, out_channels, 1, stride, bn_type=bn), + 'conv3' : lambda input_size, in_channels, out_channels, stride, bn='BN': ConvBNReLU(input_size, in_channels, out_channels, 3, stride, bn_type=bn), + 'conv5' : lambda input_size, in_channels, out_channels, stride, bn='BN': ConvBNReLU(input_size, in_channels, out_channels, 5, stride, bn_type=bn), + 'avgpool': lambda input_size, in_channels, out_channels, stride, bn='BN': AvgPool(input_size, in_channels, stride), +} + + +class AvgPool(nn.Module): + def __init__(self, stride): + super(AvgPool, self).__init__() + self.stride = stride + + def forward(self, x): + return F.avg_pool2d(x, self.stride) + + +class ChannelShuffle(nn.Module): + def __init__(self, groups=1): + super(ChannelShuffle, self).__init__() + self.groups = groups + + def forward(self, x): + if self.groups == 1: + return x + N, C, H, W = x.size() + cpg = C // self.groups # channels per group + out = x.view(N, self.groups, cpg, H, W) + out = out.permute(0, 2, 1, 3, 4).contiguous() + out = out.view(N, C, H, W) + return out + + +class ConvBNReLU(nn.Module): + def __init__(self, input_size, in_channels, out_channels, kernel_size, stride, dilation=1, bias=False, relu_type='relu', bn_type='BN', groups=1): + super(ConvBNReLU, self).__init__() + assert(relu_type in ['relu', 'none']) + padding = (kernel_size - 1) * dilation // 2 + + if bn_type == 'none': + bias = True + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups) + nn.init.kaiming_normal_(self.conv.weight, mode="fan_out", nonlinearity="relu") + if self.conv.bias is not None: + nn.init.constant_(self.conv.bias, 0.0) + + if bn_type == 'none' : + self.bn = nn.Sequential() + elif bn_type == 'GN': + norm_layer = norm_cfg_[bn_type] + self.bn = norm_layer(num_channels=out_channels, num_groups=32) + else: + norm_layer = norm_cfg_[bn_type] + self.bn = norm_layer(out_channels) + + self.relu = nn.ReLU(inplace=True) if relu_type == 'relu' else nn.Sequential() + + def forward(self, x): + out = self.conv(x) + out = self.relu(self.bn(out)) + return out + + +class SE(nn.Module): + def __init__(self, input_size, in_channels, se_ratio): + super(SE, self).__init__() + self.in_channels, self.se_ratio = in_channels, se_ratio + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) + self.fc1 = nn.Conv2d(in_channels, max(1, int(in_channels * se_ratio)), 1, bias=False) + self.fc2 = nn.Conv2d(max(1, int(in_channels * se_ratio)), in_channels, 1, bias=False) + + def forward(self, x): + out = self.pooling(x) + out = self.fc1(out) + out = F.relu(out) + out = self.fc2(out) + out = F.sigmoid(out) + return out + + +class Identity(nn.Module): + def __init__(self, input_size, in_channels, out_channels, stride): + super(Identity, self).__init__() + if in_channels != out_channels or stride != 1: + self.conv = ConvBNReLU(input_size, in_channels, out_channels, kernel_size=1, stride=stride, + padding=0, bias=False, relu_type='relu', bn_type='bn') + else: + self.conv = nn.Sequential() + + def forward(self, x): + return self.conv(x) + + +class SepConv(nn.Module): + def __init__(self, input_size, in_channels, out_channels, expansion, stride, kernel_size, groups=1, bn_type='BN'): + super(SepConv, self).__init__() + self.conv1 = ConvBNReLU(input_size, in_channels, in_channels, kernel_size=kernel_size, stride=stride, + bias=False, relu_type='relu', bn_type=bn_type, groups=in_channels) + self.conv2 = ConvBNReLU(input_size//stride, in_channels, out_channels, kernel_size=1, stride=1, + bias=False, relu_type='none', bn_type=bn_type, groups=groups) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + return out + + +class MBBlock(nn.Module): + def __init__(self, input_size, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1, has_se=False, bn='BN'): + super(MBBlock, self).__init__() + self.in_channels = in_channels + self.out_channels =out_channels + self.has_se = has_se + self.stride = stride + self.groups = groups + mid_channels = in_channels * expansion + + self.conv1 = ConvBNReLU(input_size, in_channels, mid_channels, kernel_size=1, stride=1, dilation=1, + bias=False, relu_type='relu', bn_type=bn, groups=groups) + self.conv2 = ConvBNReLU(input_size, mid_channels, mid_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, + bias=False, relu_type='relu', bn_type=bn, groups=mid_channels) + self.conv3 = ConvBNReLU(input_size//self.stride, mid_channels, out_channels, kernel_size=1, stride=1, dilation=1, + bias=False, relu_type='none', bn_type=bn, groups=groups) + + if has_se == True: + self.se = SE(input_size, mid_channels, se_ratio=0.05) + + if groups != 1: + self.shuffle = ChannelShuffle(input_size, in_channels, groups) + + def forward(self, x): + out = self.conv1(x) + if self.groups != 1: + out = self.shuffle(out) + out = self.conv2(out) + if self.has_se: + out = out * self.se(out) + out = self.conv3(out) + if self.in_channels == self.out_channels and self.stride == 1: + out = out + x + return out \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/bbox_heads/bbox_head.py b/CDARTS_detection/mmdet/models/bbox_heads/bbox_head.py new file mode 100644 index 0000000..2e983ff --- /dev/null +++ b/CDARTS_detection/mmdet/models/bbox_heads/bbox_head.py @@ -0,0 +1,241 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.modules.utils import _pair + +from mmdet.core import (auto_fp16, bbox_target, delta2bbox, force_fp32, + multiclass_nms) +from ..builder import build_loss +from ..losses import accuracy +from ..registry import HEADS + + +@HEADS.register_module +class BBoxHead(nn.Module): + """Simplest RoI head, with only two fc layers for classification and + regression respectively""" + + def __init__(self, + with_avg_pool=False, + with_cls=True, + with_reg=True, + roi_feat_size=7, + in_channels=256, + num_classes=81, + target_means=[0., 0., 0., 0.], + target_stds=[0.1, 0.1, 0.2, 0.2], + reg_class_agnostic=False, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=1.0, loss_weight=1.0)): + super(BBoxHead, self).__init__() + assert with_cls or with_reg + self.with_avg_pool = with_avg_pool + self.with_cls = with_cls + self.with_reg = with_reg + self.roi_feat_size = _pair(roi_feat_size) + self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] + self.in_channels = in_channels + self.num_classes = num_classes + self.target_means = target_means + self.target_stds = target_stds + self.reg_class_agnostic = reg_class_agnostic + self.fp16_enabled = False + + self.loss_cls = build_loss(loss_cls) + self.loss_bbox = build_loss(loss_bbox) + + in_channels = self.in_channels + if self.with_avg_pool: + self.avg_pool = nn.AvgPool2d(self.roi_feat_size) + else: + in_channels *= self.roi_feat_area + if self.with_cls: + self.fc_cls = nn.Linear(in_channels, num_classes) + if self.with_reg: + out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes + self.fc_reg = nn.Linear(in_channels, out_dim_reg) + self.debug_imgs = None + + def init_weights(self): + if self.with_cls: + nn.init.normal_(self.fc_cls.weight, 0, 0.01) + nn.init.constant_(self.fc_cls.bias, 0) + if self.with_reg: + nn.init.normal_(self.fc_reg.weight, 0, 0.001) + nn.init.constant_(self.fc_reg.bias, 0) + + @auto_fp16() + def forward(self, x): + if self.with_avg_pool: + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + cls_score = self.fc_cls(x) if self.with_cls else None + bbox_pred = self.fc_reg(x) if self.with_reg else None + return cls_score, bbox_pred + + def get_target(self, sampling_results, gt_bboxes, gt_labels, + rcnn_train_cfg): + pos_proposals = [res.pos_bboxes for res in sampling_results] + neg_proposals = [res.neg_bboxes for res in sampling_results] + pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] + pos_gt_labels = [res.pos_gt_labels for res in sampling_results] + reg_classes = 1 if self.reg_class_agnostic else self.num_classes + cls_reg_targets = bbox_target( + pos_proposals, + neg_proposals, + pos_gt_bboxes, + pos_gt_labels, + rcnn_train_cfg, + reg_classes, + target_means=self.target_means, + target_stds=self.target_stds) + return cls_reg_targets + + @force_fp32(apply_to=('cls_score', 'bbox_pred')) + def loss(self, + cls_score, + bbox_pred, + labels, + label_weights, + bbox_targets, + bbox_weights, + reduction_override=None): + losses = dict() + if cls_score is not None: + avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) + losses['loss_cls'] = self.loss_cls( + cls_score, + labels, + label_weights, + avg_factor=avg_factor, + reduction_override=reduction_override) + losses['acc'] = accuracy(cls_score, labels) + if bbox_pred is not None: + pos_inds = labels > 0 + if self.reg_class_agnostic: + pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] + else: + pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, + 4)[pos_inds, labels[pos_inds]] + losses['loss_bbox'] = self.loss_bbox( + pos_bbox_pred, + bbox_targets[pos_inds], + bbox_weights[pos_inds], + avg_factor=bbox_targets.size(0), + reduction_override=reduction_override) + return losses + + @force_fp32(apply_to=('cls_score', 'bbox_pred')) + def get_det_bboxes(self, + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None): + if isinstance(cls_score, list): + cls_score = sum(cls_score) / float(len(cls_score)) + scores = F.softmax(cls_score, dim=1) if cls_score is not None else None + + if bbox_pred is not None: + bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means, + self.target_stds, img_shape) + else: + bboxes = rois[:, 1:].clone() + if img_shape is not None: + bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) + bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) + + if rescale: + if isinstance(scale_factor, float): + bboxes /= scale_factor + else: + scale_factor = torch.from_numpy(scale_factor).to(bboxes.device) + bboxes = (bboxes.view(bboxes.size(0), -1, 4) / + scale_factor).view(bboxes.size()[0], -1) + + if cfg is None: + return bboxes, scores + else: + det_bboxes, det_labels = multiclass_nms(bboxes, scores, + cfg.score_thr, cfg.nms, + cfg.max_per_img) + + return det_bboxes, det_labels + + @force_fp32(apply_to=('bbox_preds', )) + def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): + """Refine bboxes during training. + + Args: + rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, + and bs is the sampled RoIs per image. + labels (Tensor): Shape (n*bs, ). + bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). + pos_is_gts (list[Tensor]): Flags indicating if each positive bbox + is a gt bbox. + img_metas (list[dict]): Meta info of each image. + + Returns: + list[Tensor]: Refined bboxes of each image in a mini-batch. + """ + img_ids = rois[:, 0].long().unique(sorted=True) + assert img_ids.numel() == len(img_metas) + + bboxes_list = [] + for i in range(len(img_metas)): + inds = torch.nonzero(rois[:, 0] == i).squeeze() + num_rois = inds.numel() + + bboxes_ = rois[inds, 1:] + label_ = labels[inds] + bbox_pred_ = bbox_preds[inds] + img_meta_ = img_metas[i] + pos_is_gts_ = pos_is_gts[i] + + bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, + img_meta_) + # filter gt bboxes + pos_keep = 1 - pos_is_gts_ + keep_inds = pos_is_gts_.new_ones(num_rois) + keep_inds[:len(pos_is_gts_)] = pos_keep + + bboxes_list.append(bboxes[keep_inds]) + + return bboxes_list + + @force_fp32(apply_to=('bbox_pred', )) + def regress_by_class(self, rois, label, bbox_pred, img_meta): + """Regress the bbox for the predicted class. Used in Cascade R-CNN. + + Args: + rois (Tensor): shape (n, 4) or (n, 5) + label (Tensor): shape (n, ) + bbox_pred (Tensor): shape (n, 4*(#class+1)) or (n, 4) + img_meta (dict): Image meta info. + + Returns: + Tensor: Regressed bboxes, the same shape as input rois. + """ + assert rois.size(1) == 4 or rois.size(1) == 5 + + if not self.reg_class_agnostic: + label = label * 4 + inds = torch.stack((label, label + 1, label + 2, label + 3), 1) + bbox_pred = torch.gather(bbox_pred, 1, inds) + assert bbox_pred.size(1) == 4 + + if rois.size(1) == 4: + new_rois = delta2bbox(rois, bbox_pred, self.target_means, + self.target_stds, img_meta['img_shape']) + else: + bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means, + self.target_stds, img_meta['img_shape']) + new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) + + return new_rois diff --git a/CDARTS_detection/mmdet/models/bbox_heads/convfc_bbox_head.py b/CDARTS_detection/mmdet/models/bbox_heads/convfc_bbox_head.py new file mode 100644 index 0000000..09f39eb --- /dev/null +++ b/CDARTS_detection/mmdet/models/bbox_heads/convfc_bbox_head.py @@ -0,0 +1,327 @@ +import torch.nn as nn + +from .bbox_head import BBoxHead +from ..registry import HEADS +from ..utils import ConvModule + +from .auto_head.build_head import build_search_head + + +class ResidualBlock(nn.Module): + def __init__(self, in_channel, out_channel): + super(ResidualBlock, self).__init__() + self.in_channel = in_channel + self.out_channel = out_channel + self.relu = nn.ReLU() + if in_channel != out_channel: + self.downsample = nn.Conv2d(in_channel, out_channel, 1, bias=False) + self.conv = nn.Sequential( + nn.Conv2d(in_channel, in_channel, 3, padding=1, bias=False), + nn.SyncBatchNorm(in_channel), + nn.Conv2d(in_channel, out_channel, 1, bias=False), + nn.SyncBatchNorm(out_channel) + ) + else: + self.downsample = nn.Sequential() + self.conv = nn.Sequential( + nn.Conv2d(in_channel, in_channel // 4, 1, bias=False), + nn.SyncBatchNorm(in_channel // 4), + nn.Conv2d(in_channel // 4, in_channel // 4, 3, padding=1, bias=False), + nn.SyncBatchNorm(in_channel // 4), + nn.Conv2d(in_channel // 4, out_channel, 1, bias=False), + nn.SyncBatchNorm(out_channel) + ) + + for m in self.modules(): + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + def forward(self, x): + out = self.conv(x) + short_cut = self.downsample(x) + out = self.relu(out + short_cut) + return out + + +# For toy experiments +class MBBlock(nn.Module): + def __init__(self, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1): + super(MBBlock, self).__init__() + self.in_channels = in_channels + self.out_channels =out_channels + self.stride = stride + self.groups = groups + mid_channels = in_channels * expansion + padding = (kernel_size - 1) * dilation // 2 + + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups), + nn.SyncBatchNorm(mid_channels), + nn.ReLU(inplace=True) + ) + + self.conv2 = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False, groups=mid_channels), + nn.SyncBatchNorm(mid_channels), + nn.ReLU(inplace=True) + ) + + self.conv3 = nn.Sequential( + nn.Conv2d(mid_channels, out_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups), + nn.SyncBatchNorm(out_channels) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1]) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0.0001) + nn.init.constant_(m.running_mean, 0) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + if self.in_channels == self.out_channels and self.stride == 1: + out = out + x + return out + + +@HEADS.register_module +class ConvFCBBoxHead(BBoxHead): + """More general bbox head, with shared conv and fc layers and two optional + separated branches. + + /-> cls convs -> cls fcs -> cls + shared convs -> shared fcs + \-> reg convs -> reg fcs -> reg + """ # noqa: W605 + + def __init__(self, + num_shared_convs=0, + num_shared_fcs=0, + num_cls_convs=0, + num_cls_fcs=0, + num_reg_convs=0, + num_reg_fcs=0, + convs_kernel=3, + conv_out_channels=256, + fc_out_channels=1024, + conv_cfg=None, + norm_cfg=None, + search_head=None, + toy_replace=None, + bottle_first='conv', + *args, + **kwargs): + super(ConvFCBBoxHead, self).__init__(*args, **kwargs) + assert (num_shared_convs + num_shared_fcs + num_cls_convs + + num_cls_fcs + num_reg_convs + num_reg_fcs >= 0) + if num_cls_convs > 0 or num_reg_convs > 0: + assert num_shared_fcs == 0 + if not self.with_cls: + assert num_cls_convs == 0 and num_cls_fcs == 0 + if not self.with_reg: + assert num_reg_convs == 0 and num_reg_fcs == 0 + self.num_shared_convs = num_shared_convs + self.num_shared_fcs = num_shared_fcs + self.num_cls_convs = num_cls_convs + self.num_cls_fcs = num_cls_fcs + self.num_reg_convs = num_reg_convs + self.num_reg_fcs = num_reg_fcs + self.convs_kernel = convs_kernel + self.conv_out_channels = conv_out_channels + self.fc_out_channels = fc_out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.bottle_first = bottle_first + self.SearchHead = build_search_head(search_head) + self.toy_replace = toy_replace # for toy experiments replace + + # add shared convs and fcs + self.shared_convs, self.shared_fcs, last_layer_dim = \ + self._add_conv_fc_branch( + self.num_shared_convs, self.num_shared_fcs, self.in_channels, + True, toy_replace) + self.shared_out_channels = last_layer_dim + + # add cls specific branch + self.cls_convs, self.cls_fcs, self.cls_last_dim = \ + self._add_conv_fc_branch_2head( + self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) + + # add reg specific branch + self.reg_convs, self.reg_fcs, self.reg_last_dim = \ + self._add_conv_fc_branch_2head( + self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) + + if self.num_shared_fcs == 0 and not self.with_avg_pool: + if self.num_cls_fcs == 0: + self.cls_last_dim *= (self.roi_feat_size * self.roi_feat_size) + if self.num_reg_fcs == 0: + self.reg_last_dim *= (self.roi_feat_size * self.roi_feat_size) + + if self.SearchHead is not None and self.num_shared_fcs == 0: + self.cls_last_dim = self.SearchHead.last_dim + self.reg_last_dim = self.SearchHead.last_dim + + self.relu = nn.ReLU(inplace=True) + # reconstruct fc_cls and fc_reg since input channels are changed + if self.with_cls: + self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) + if self.with_reg: + out_dim_reg = (4 if self.reg_class_agnostic else 4 * + self.num_classes) + self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) + + def _add_conv_fc_branch(self, + num_branch_convs, + num_branch_fcs, + in_channels, + is_shared=False, + toy_replace=None): + """Add shared or separable branch + + convs -> avg pool (optional) -> fcs + """ + last_layer_dim = in_channels + # add branch specific conv layers + branch_convs = nn.ModuleList() + if num_branch_convs > 0: + for i in range(num_branch_convs): + conv_in_channels = ( + last_layer_dim if i == 0 else self.conv_out_channels) + if toy_replace is not None and i == toy_replace.get('stage', 30): + if toy_replace.get('block', 'res') == 'ir': + branch_convs.append( + MBBlock(conv_in_channels, self.conv_out_channels, 1, 1, + toy_replace.get('conv_kernel'), dilation=toy_replace.get('dilation'), groups=1)) + else: + branch_convs.append( + ConvModule( + conv_in_channels, + self.conv_out_channels, + toy_replace.get('conv_kernel'), + padding=(toy_replace.get('conv_kernel')-1) * toy_replace.get('dilation') // 2, + dilation=toy_replace.get('dilation'), + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bottle_first=self.bottle_first)) + else: + branch_convs.append( + ConvModule( + conv_in_channels, + self.conv_out_channels, + self.convs_kernel, + padding=(self.convs_kernel-1) // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bottle_first=self.bottle_first)) + last_layer_dim = self.conv_out_channels + # add branch specific fc layers + branch_fcs = nn.ModuleList() + if num_branch_fcs > 0: + # for shared branch, only consider self.with_avg_pool + # for separated branches, also consider self.num_shared_fcs + if (is_shared + or self.num_shared_fcs == 0) and not self.with_avg_pool: + last_layer_dim *= self.roi_feat_area + for i in range(num_branch_fcs): + fc_in_channels = ( + last_layer_dim if i == 0 else self.fc_out_channels) + branch_fcs.append( + nn.Linear(fc_in_channels, self.fc_out_channels)) + last_layer_dim = self.fc_out_channels + return branch_convs, branch_fcs, last_layer_dim + + def _add_conv_fc_branch_2head(self, + num_branch_convs, + num_branch_fcs, + in_channels): + """convs -> avg pool (optional) -> fcs + """ + last_layer_dim = in_channels + # add branch specific conv layers + branch_convs = nn.ModuleList() + if num_branch_convs > 0: + for i in range(num_branch_convs): + conv_in_channels = (last_layer_dim if i == 0 else self.conv_out_channels) + branch_convs.append( + ResidualBlock(conv_in_channels, self.conv_out_channels) + ) + last_layer_dim = self.conv_out_channels + + # add branch specific fc layers + branch_fcs = nn.ModuleList() + if num_branch_fcs > 0: + for i in range(num_branch_fcs): + fc_in_channels = (last_layer_dim * self.roi_feat_size * self.roi_feat_size if i == 0 else self.fc_out_channels) + branch_fcs.append( + nn.Linear(fc_in_channels, self.fc_out_channels)) + last_layer_dim = self.fc_out_channels + return branch_convs, branch_fcs, last_layer_dim + + def init_weights(self): + super(ConvFCBBoxHead, self).init_weights() + for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: + for m in module_list.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + # shared part + if self.num_shared_convs > 0: + for conv in self.shared_convs: + x = conv(x) + + loss_latency = None + if self.SearchHead is not None: + x, loss_latency = self.SearchHead(x) + + if self.num_shared_fcs > 0: + if self.with_avg_pool: + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + for fc in self.shared_fcs: + x = self.relu(fc(x)) + # separate branches + x_cls = x + x_reg = x + + for conv in self.cls_convs: + x_cls = conv(x_cls) + if x_cls.dim() > 2: + if self.with_avg_pool: + x_cls = self.avg_pool(x_cls) + x_cls = x_cls.view(x_cls.size(0), -1) + for fc in self.cls_fcs: + x_cls = self.relu(fc(x_cls)) + + for conv in self.reg_convs: + x_reg = conv(x_reg) + if x_reg.dim() > 2: + if self.with_avg_pool: + x_reg = self.avg_pool(x_reg) + x_reg = x_reg.view(x_reg.size(0), -1) + for fc in self.reg_fcs: + x_reg = self.relu(fc(x_reg)) + + cls_score = self.fc_cls(x_cls) if self.with_cls else None + bbox_pred = self.fc_reg(x_reg) if self.with_reg else None + return cls_score, bbox_pred, loss_latency + + +@HEADS.register_module +class SharedFCBBoxHead(ConvFCBBoxHead): + def __init__(self, num_convs=0, num_fcs=2, fc_out_channels=1024, *args, **kwargs): + super(SharedFCBBoxHead, self).__init__( + num_shared_convs=num_convs, + num_shared_fcs=num_fcs, + fc_out_channels=fc_out_channels, + *args, + **kwargs) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/bbox_heads/double_bbox_head.py b/CDARTS_detection/mmdet/models/bbox_heads/double_bbox_head.py new file mode 100644 index 0000000..2190a94 --- /dev/null +++ b/CDARTS_detection/mmdet/models/bbox_heads/double_bbox_head.py @@ -0,0 +1,167 @@ +import torch.nn as nn +from mmcv.cnn.weight_init import normal_init, xavier_init + +from ..backbones.resnet import Bottleneck +from ..registry import HEADS +from ..utils import ConvModule +from .bbox_head import BBoxHead + + +class BasicResBlock(nn.Module): + """Basic residual block. + This block is a little different from the block in the ResNet backbone. + The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. + Args: + in_channels (int): Channels of the input feature map. + out_channels (int): Channels of the output feature map. + conv_cfg (dict): The config dict for convolution layers. + norm_cfg (dict): The config dict for normalization layers. + """ + + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(BasicResBlock, self).__init__() + + # main path + self.conv1 = ConvModule( + in_channels, + in_channels, + kernel_size=3, + padding=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + self.conv2 = ConvModule( + in_channels, + out_channels, + kernel_size=1, + bias=False, + activation=None, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + + # identity path + self.conv_identity = ConvModule( + in_channels, + out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=None) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + identity = x + + x = self.conv1(x) + x = self.conv2(x) + + identity = self.conv_identity(identity) + out = x + identity + + out = self.relu(out) + return out + + +@HEADS.register_module +class DoubleConvFCBBoxHead(BBoxHead): + r"""Bbox head used in Double-Head R-CNN + /-> cls + /-> shared convs -> + \-> reg + roi features + /-> cls + \-> shared fc -> + \-> reg + """ # noqa: W605 + + def __init__(self, + num_convs=0, + num_fcs=0, + conv_out_channels=1024, + fc_out_channels=1024, + conv_cfg=None, + norm_cfg=dict(type='BN'), + **kwargs): + kwargs.setdefault('with_avg_pool', True) + super(DoubleConvFCBBoxHead, self).__init__(**kwargs) + assert self.with_avg_pool + assert num_convs > 0 + assert num_fcs > 0 + self.num_convs = num_convs + self.num_fcs = num_fcs + self.conv_out_channels = conv_out_channels + self.fc_out_channels = fc_out_channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + # increase the channel of input features + self.res_block = BasicResBlock(self.in_channels, + self.conv_out_channels) + + # add conv heads + self.conv_branch = self._add_conv_branch() + # add fc heads + self.fc_branch = self._add_fc_branch() + + out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes + self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) + + self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes) + self.relu = nn.ReLU(inplace=True) + + def _add_conv_branch(self): + """Add the fc branch which consists of a sequential of conv layers""" + branch_convs = nn.ModuleList() + for i in range(self.num_convs): + branch_convs.append( + Bottleneck( + inplanes=self.conv_out_channels, + planes=self.conv_out_channels // 4, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + return branch_convs + + def _add_fc_branch(self): + """Add the fc branch which consists of a sequential of fc layers""" + branch_fcs = nn.ModuleList() + for i in range(self.num_fcs): + fc_in_channels = ( + self.in_channels * + self.roi_feat_area if i == 0 else self.fc_out_channels) + branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) + return branch_fcs + + def init_weights(self): + normal_init(self.fc_cls, std=0.01) + normal_init(self.fc_reg, std=0.001) + + for m in self.fc_branch.modules(): + if isinstance(m, nn.Linear): + xavier_init(m, distribution='uniform') + + def forward(self, x_cls, x_reg): + # conv head + x_conv = self.res_block(x_reg) + + for conv in self.conv_branch: + x_conv = conv(x_conv) + + if self.with_avg_pool: + x_conv = self.avg_pool(x_conv) + + x_conv = x_conv.view(x_conv.size(0), -1) + bbox_pred = self.fc_reg(x_conv) + + # fc head + x_fc = x_cls.view(x_cls.size(0), -1) + for fc in self.fc_branch: + x_fc = self.relu(fc(x_fc)) + + cls_score = self.fc_cls(x_fc) + + return cls_score, bbox_pred \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/builder.py b/CDARTS_detection/mmdet/models/builder.py new file mode 100644 index 0000000..0c9b644 --- /dev/null +++ b/CDARTS_detection/mmdet/models/builder.py @@ -0,0 +1,43 @@ +from torch import nn + +from mmdet.utils import build_from_cfg +from .registry import (BACKBONES, NECKS, ROI_EXTRACTORS, SHARED_HEADS, HEADS, + LOSSES, DETECTORS) + + +def build(cfg, registry, default_args=None): + if isinstance(cfg, list): + modules = [ + build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg + ] + return nn.Sequential(*modules) + else: + return build_from_cfg(cfg, registry, default_args) + + +def build_backbone(cfg): + return build(cfg, BACKBONES) + + +def build_neck(cfg): + return build(cfg, NECKS) + + +def build_roi_extractor(cfg): + return build(cfg, ROI_EXTRACTORS) + + +def build_shared_head(cfg): + return build(cfg, SHARED_HEADS) + + +def build_head(cfg): + return build(cfg, HEADS) + + +def build_loss(cfg): + return build(cfg, LOSSES) + + +def build_detector(cfg, train_cfg=None, test_cfg=None): + return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg)) diff --git a/CDARTS_detection/mmdet/models/detectors/__init__.py b/CDARTS_detection/mmdet/models/detectors/__init__.py new file mode 100644 index 0000000..586aeab --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/__init__.py @@ -0,0 +1,20 @@ +from .base import BaseDetector +from .double_head_rcnn import DoubleHeadRCNN +from .single_stage import SingleStageDetector +from .two_stage import TwoStageDetector +from .rpn import RPN +from .fast_rcnn import FastRCNN +from .faster_rcnn import FasterRCNN +from .mask_rcnn import MaskRCNN +from .cascade_rcnn import CascadeRCNN +from .htc import HybridTaskCascade +from .retinanet import RetinaNet +from .fcos import FCOS +from .grid_rcnn import GridRCNN +from .mask_scoring_rcnn import MaskScoringRCNN + +__all__ = [ + 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN', + 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', + 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'DoubleHeadRCNN' +] diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7955085359434329184443785dd686634a96aded GIT binary patch literal 904 zcmZ{i&2HL25XWt7z@L1C03jq*uUu-Wy;f~iqBK2}R*=(PEaTmP)mZcZJP?H(QH4)2LR=_9**Acka!?C~=( zA|qs6mv4y+@}LNq-S(Q&*_*>=#*a28NH-)dbR73 zD|)>#7B7CAdZmR~1s@Z|Zdl3EvXDCN-4w?yXLl^2>*Z>tGowwG?>O6($?o~V!Tn?L zZ_ZkOg9_(1QDsx@0W;Y|&X+`eZ`fZZDorSB!r|R%D>Hhp4>UJ)CwI-tEKgRf)Pa4w z{JMempY*w=st*k}X(9EDkdpcp8qx-7gLFW;AP$HN;(_=e0Z0e}Z(8<2EG?_Wb!Bh0 zG1!g_D0&j_wJEVo^BfP6K%p_Q0%!1<#4R6-CWgmXmOSJ*192a;4|Oq&Q~3}xS!ZCj z$`yK*I)*VxkkM=dHed8e+Qom^r7$%C{X03{myW&l9M9gg0sWnT+TT> z=ljmgTXS>OcfaZQ|6VbSe;QMljrz}V%N7b|Ff%fm^lwFG*J@hbLbG7z?RL}F?~Bcn z{x9!1O-I)%JJn_lwL(Mi*&9m;4(FZZu71G%aQ}3*2rNzcgAVvsmG=(=73FF4dvgbWks`@?!(_3hEB4 z=z10PDy!*wjn`S7&FT6an`aA;jpjVs7TF2iwt)H)JE`l7s4ufqx_*Ma%+9ct$7XYh zt+KNib&@Z$SJ?AUO!g{!fxYagD`+mF;>@#0( z$X+5hy4_r~Fp2Z}MmJ$Rk{e@*H@2b5dT(D;u=EMsQhzpc-aI%pt@HXwsADQ_(x7D& z%1EKup_MG9#tuIAZbhq?*)b1x$+tSb^pz4~0i7GBsNqk{jf;8IvdY6ybvzny3Xjr4 zVOR&JXCxVS`TnBRWXBhrc^&Svh9wp;yI4Z;Ez&INrKZNLqVGtkbH|06>iPX;t88y4 zdoz-eQNKm9+)gy&+>}VquoHFp0kU*`xjQ4v>HjASjX3|^vQU>me^T_Le~J?AKIC&c z8q6*25%@Ob9hpsFbJR!bd`}~j*W$i<72QT(WOe3?O!peEI*Q4FhOME1)0zrSE9aH< z$lO~=&FjY9#eoGEE~M6>`6-0FzkFvT9YVDl$}pD7j{~0BQ7Bb5e+@7w6oTx1Arc|T zOB#jTzbW>!0@OIF#oV-fvO)5cP#B23s(EnsTUC((=|C45jV$;}+&XGCmRtTOFm2Tx zUe`ohULHSDBV=(ek=(zztpWXR5VmabItv5&AroDpP7cH zy1m~E13!xPUDe@0MA=FD5p%b>+vR@j20T_A;pX}=bEU$K&YZ1RXC{I?9!SQQL#bF8 zsA-wPuHWWv7`GCaUbzoDnA7Ec(9zE`bCFO$nOTLY@gl9FRl2z5Uet1UEwEc6=(!bz zJ^YiH-F?@OWa0)%to$%OHv7d^7y*#GoW><0oJ`vD+4zBG(o4pP=z2k`eet;QZsf$_ zhY{=@lLg%ee$?mM`nf%K`Mz`|=f|YDr-6Ng`7j$-a-$dacofE5Zh(%u0NF5Z1Jo~X zCQ$^+x_89n?rX2@KJ>{mT-oD6*a`zzX`KTBDuavkBHqQ<^U0~ls}Ez-p7QL<`0Hh* ze9B{f4vDZ9^ANLPyiUHVAz<-{kI!`b4j(7Z9HGQe5`w_Y8A&2sJRX&zCbdqU;!^z_ z??;fKnS8|S>rTulqy^W zay>MFP{<{i36w(Ca%fUcsOk3LWR&>uW8!(YC6exC_wxZ@s~jcmHryhsAa%QmIv0Rf zL{(PGeP)PQS+xfrpl`)lR-!@3OO7a1*ZIjq9=1D5en?8?(B+tpc`%E@(>=#FbuffR zXyp1UDtniqo!Np;%xYO}3mAOiGizWEioj=E?+)I;zC%okIR5Z_WE-mEx+gQ)WesD5uCZcP;ryIgiY=M0N2e;C?0V1zFG+ zG8KM@92xD^)J!2={>;j-w&VX106dP5XJRd2?dB}lQluw+AY%@@VGLOF;g~>Qsa{uM z_jtU+J;vcEHBNIyn|mMb-B}w;M=(g&!%hh39O4@$iz383 zAXWlnH41I}D(KAO)hM=xx@d(oP7_f7FnwT`Za-3?#^N=XAb8E?Dp+r5{bBD~>DkadD4S>b_?P$kZ3I*`SKlrpR1lSmJH2Cv=gAB)d1n8Kqz#>|oyGOA~h zT9nKs%Q5SwW!k9Cn{_;`SVy9Edb}Wb#iqmYd%bAiQ_1asG7q|nxC^*-9Fw+bK`bLu zfK@tD>|x}X)6c^!VJWb(WkQm5-H!gE&6=j-Ace`suL2AYFcz3hVb~HG0US z2QVB@c>}2dzqh7SD5CgrSYT7(A}ef1{B@8pO?D|<=aZ?1aY|z*vD3&i4(Onlk&|^u z^k*zHR&$bPv80c^p*y{(n=dg;?$+|B$*Om;ENyGXlaURM!x0&n@W>@RnhlJJS~xKf z=keLaPf#?9x%ZO0h?h`gwJF;@lNigxFS~=@IfYbhbJ03jp2>cPo&JSRxc>2OK)YnR zDCnxVH*w3a;ZGn7i^u{_Xn{V;11GKm#PrDYPh z4{D&8+M$WE3OcdmXQ~K-z+ua-BO+}>> z{Keoj`kmZa#sN&3EuemiRi4;`GwSrtnRGcly^7?=thRQPr zwOld2GVfo&#;C^Xj}3K}WV()VgH?=sE~SyHkn4H$J(;ei^|T0zg#}U0Jt<>T_4RO5 zwYa4bM&qgWn}d&rdF>QrBQp+}J8CB^Bwzs7@%w#_ofx5aejY>bl(naqJj*_Y0|44Br$Q-PUJ z3ct7R+mwB2C9q|G2cwX4S#dA3Tm2})`3`NTvSJvkx8B5InzjDp%nUP|w&zFC`=WzOkZCmc6u z-4m z$4I$_!YEhGs#Ue><`M{g8N}{@@T)jcC|N}Jb?Y?hXVHUNU(m_GqIueUp-{rRDtcJs z9~|z;-e|HQ_FwSvJT180C~S$JVePHP3Y}b%yW!-r>Ur3K^dqX*J@0l=u-9 z#72Vl7vgOyA{4ZcUL@dAgvG}j8YohyPIUUHk6-CYUO?fPwq;v&d-8LN^(A_$7M*iQ zp_Tw9rXJH+5_Arg75wc$>y=CSD{3pkJ8`_Kqrky=15)ca1~SCiVsAf4f<(cIe_N`NM*$pNVjoxxfz{0}q5 BM$-TQ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/cascade_rcnn.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/cascade_rcnn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e63385c089c59ddc505a5c9300b7742ac8de3900 GIT binary patch literal 8984 zcmb_iU2GiJb)LVSot^#VlH$K8N|t1;ER%|ZrbZ$-b}Xw%YO{%D*ulceWVJI~?QnON zyfc(2=1!VsWs3$Pg+Wj>=tGd#qCrulMbM`_6lkCNl*d646zEI!5)^$3{LuD0XJ&Uv zS*i>-OU#`+_wSs0?z!iF=UjfiT(-ab@vYXMeOXcdT{-eIk$)9W_#qNkan+vESDUIz zd99~4HGb>9(KKA$HF~+e*)&nBySZMzZ#6CCO*h{w^zEk2d8=3KmzpIWq0lS$E6qy3 z+N}0#&02q|In}Q>>#E|F{OLbZT-z<)Q{19wde+bws?8Z!yRI}!kHCx4P!r4T-tLCo zL9p>iO-o8&X@%ZZFY?;aK%hnG?XPdV%-Mwz`NJ|^#Z|-(`u1EESt8cvX#_R9= z$=lZ(xunA7PAd$%oxl@`b@S$6?Ah!ab## zcTcb&)TZUmx@EU=Pi+?5Ik)Q8kh8hRlv}@NHH-HZLur;EqWPrwdMj+VT<`7IH#Rof z<&2Li-Uc2WPxvb2aR83C3SlWyzONo&QIVgge56L2REC>*P`j$!SbJZK%W(y*YW^HJotNby#=yK4e~L?NSxHL{)g9wK;7cRE4%M)CUjygHW1MN=iKR!ppw;)7kJSU>V4kTje1g`8pv95~tywn* zNiB{kE`g$+QT)w)9Td&SDC(fdYm-s@FQ$0;Kxc|&%x>xUd@;M4D}SX3Qox3m zXls`f)xid=)6R0R#~g1j5-r-c+~(uA)^82^-b+Y>&fVQsu-?Aww!-yrdl0Vo`z~*B z?EEyp-XFMLFI-QH2O?Z=r`qp`b`Y#>-~HIW((8s%d(hv0<@3ZI@oFmj7mnzF5KPr zgs7lTVvSZJv4h>d6Gp9$7m7SpWj6X|tG#t|5O}cBqnw?Z<-luiB?hNi35g=H#CE_! zNv1OMeXAurH!UiLM?SP5NbI);?rzU}6RR)kAS+G~Z5wAz0A_-=ml&iONlvtaj+f{? zFG%$L?sj6fwzs{&O{P;K$E7n%gG@86$25M9Y6>hIr@h%ptO#co=Sr-Scf4-rRund> zi4l6eO|eWotx&Q`$yrLyP%>s#(2#;CigijZQ!K@&yYodDU>IEAOuvyUh)tyS z_`4|f@m-pXz4v}Z#vDf!#@dK=KrxUpqK} zT!qEiP+5N(vtkjG<&7#nM>*2Nq`>l_Z6}_m!c|JnQH_1w+u8MksN3p^KcrGp)TGAk zkLoBn;OAv2 z({sbOTxwtNb3;HW^w(s0Q$tSzRXX5c<)IQ+14HKH>Y=`OKB5uP`>JxN-c3BbYQEx3-s zXC7l)(js6ANR*4`<+Q&b=SeDnQr3GU=|!Hy0&+{7Ta1?70`VlLsULVju54=Z)K4|Y zdUfw*x$2)DDzFHb(C2h~#b0YFCKo-!jirhq`Ej z@^gn;DDXbZoIeN7OUF1rE1zS|pUXHu3(n6m=bHEpbAAq-p9^|e!Siz2KNmm0w?Xs1 zaV@?e&-)9}dH;EN!N1@)Tmv(I9^V(;@&kZYSj@Ek1=L@9QvIQ}^ZoY}^mtL82NjGj z&qH&F{u=Y!kPS)nFUkf>%Y>}-_yy|WR*uc%5?Yw@qI^MKLW>t6yBD7*J6&9Ts_gWg zi;x}k*e|)&d1XrxKX7XkD)E{;MRSbTL4SRy{BRyKT=Xx?CAltFjsZ}Q18IX;NoFDB%G00&{XZ-(EA*S-^f7MeB|FI(1;{0T8XiS9p5=mOA~ zu6Mf&44QMJyW8GpAzdcYP^w<*CfufHQUA9IbwTR%Stu5WVwZ?Mi`suos5L$(Uc*;n zxZQR%0S}C5u}0g0_!?E4yW4In0u1lKzo5e|v60I}D<+Vl+};%eNF;SA5`%ohW}UqY zhZfVxV7B-Y#t>I1DbQ%vC?X_E^qC(;evGiY?*a02yeKiV;!D(zv@J!^6ah-zm~ODW z8-?PFR7HqA581^{w!NBPV_&{ahZNmE+>D=WMG9luJDFS*peJ^Syg|V&S zY^ovRE8GZG9O6m}BhwR#SE|IjSBOG|7ArL~+)3-*Nop+dZqRYCT@jfJ5t^fD!f3gY5`T`C%OYPA^F*_>Mb<5; zJ~qx|^`IkzF67GNuyIKW?Oy1FZ8E~CLl3Xk%_ya7872juICX+2+UjGOZnt{79>dkd zqMeWq%N}$(ESGizHx#tsf(-$$UV$5>L^wMl!sa7go{S(>!^<9Rw6JNDtCy`H88Tqv zYcx99;G}%acJHAm{CgydWostfFoMJ-erz~*nLr<6O(lO)N@r)NS<0a6A)WqNLoR`2ImCXSc6AGRK9$yAxob87hcFS<%qR&r_gR9%~B>uUHHdjrinv@?o*jTs&1Wn9h2OzAvq0m{#25Ko~`Wl zCR%mlTdY%~b^E{MstY5PE2K%1<`vFly8>-m_Lql>e-h8i5NirgM%(!pNaz$$s@7o{ z&5*7@$Du=~Xasi}vzeEtp13NhL`XOF@F!?J1K;8y^scl(mk&rcwRknK(h}s(WNm(V zU%>@{2BcoShg#OJ)d0R%yee0Li)S_%>?Q>|y3*6IFc94=Tf*f>&vQ08qD(;`91&E4 zb8)k?pXWpWX|SyZD=`XPca8$Z6fHgjE3W{8kFfF!N9|JBr3)j6LVHg_$_%X@5kv-( z)!lACvAiJk`Zs$|fq4HvHs6@Ac?{2yxnzt^v5YPj8typ|x`YWl0C1+|lIo8+oIgRT zBix*0xkB8PehO_zM=X7ZUZJ;Bg1EJrbw(n9xC&C0GPELMt5fw?54(EPr8;DVKU0|;aOKQUFKzAX| zyXhE-9`5!5Lc5**pzA`Pw|A2qC~V&qbj#N`J%M$iOT_w={D_iQDTyd~NXZW=IYiPd z(DD$b0X9m3Ru+F2F|{ECc0@GarO!X5V-Pn3qgLW1C# zwxUt=nt%uWSpX*#hqg5h&;?KgwKgKw1ZXCI1c)pEAp#z@TGJMDXYeK%L**qr7zrg6 z03w1$^rrgBAB_Ouf)Q%kD)%NxLv3c$p6W8jpwTnf10D$Ylr(~2NOeQopL>c~8DkyV z#Mn$}P?Excbn(Y=T!bIhWO`!|eU*$+AKsAX@&!yI_jJ8E?g_&m!d*9+v0$`e74>+` ze$=2%=8S%HcSCvF4}5n3Bd>^yQUff>#U=vhxky7u{0bltY>YH1#lfHjb<}BDokpdx z7U>a=Fa+9jxR(Y*+e8@bAr7i6P%Zu(L>gwg$v7DObrejZ*3iIPgFBgGHu7og)qn@t zO3YN3k`kRM2()0^;x@+ILX4vmim_&WGPeA8ps+*IAjeN(ZD0V+t5;VaTd^bLZQRah zKMoBekE@Go{FLb;jQj`3No>c#9VhOjkS{sT&Tgxh)(EUS3$8E@ogly(;^>sEeO(U0plkAQ$vtWBt^dM>eIhygo0EzQPRsk z3$~9uz`KHUII;QOca7{6JLakJCY55xoXsB`PwwN1OkU|l#YBTS$Ckmml1lUy+9|wn OfgOZ#>OY@y5o2F4N(?Pab}9_u((@KX}9Hk*|IlHWK zfH83$&{pt~%7wDr0b9eI(#c#7OYPd3w`)~i=__wYYkbUZC@7TLEclIm(o!@Qa_6N} zHQqyw3^f3CN&Eq@sT$x#vzsaaMwFIn0Jnc-S78-Fl2Anh`-dd24dc0+qkqHLZ^-D- zcIU9g;3&=Xc`0DNWRUhs9XuPATZxz!Ish=b)E;^1mi9!i;DvVkGu{)VD!oF{VC+5T z0T+mh*%(eTot4tq+E`t83|qO?6DhBmF75Axh7lRAyCmAg+8K?9+MR%lX3uL`OedU* z3EZKVOYPz?5^+c}kl&H~i#~a0-4BT*`xBAsdY36g5J#{_4OVEk)?gywkv!qE@p!v~ zA$bG9Vy+$8K8x_zu_NZfe{A53c1~eUwsYo?PaR;`4~Q9l*YSn;DE{OPKp3!gs-R?K zQqG=uVqFDQ$gL_WIx66Bl`2H*pp8l!ZncC}j`%a6t3tRzQ#F-C5u16LKXBlPEuz8U zTHICMh`nM}TPUx7G5%#>$RaxuiO;tm;VNsz!ALBTe}? zz}9Of24x$Vmn~-7|DB~)vSaGVZ!Ls{W1wfCOK{h%j;o{DadrH_3_~-CF+|D^R7kve^wO)= z)Uj+$tKM(1Bqu;Qsi13Wvr{H4Y3wv_A}mk7hkC|P zPl0+C8u=&MbLu3x(fo7j6fmNlS0{l}V4kkdP(NE&$6zNHI5dV+oi(Ekby}Sn*y`+` zZ0P))vD3M^om#IQaS^jFytZWq>01Y+KM%^qx%3z3(w~Pt*D-z;=lvS(IkgUX){7iE zzo1TLm#T};H|hM3KCLdP3)xzEIlH1RWmmInR=kA2`WXGq0g8t1Id%=3FycxMpNn zqJ)N2*4Y$C5jKMEQPBXXaF zlHz;_(?1F-+_S&8pStF{GkKwFYy0Lc&5}c@;%#(0AIY*m8BgyRCdARVc5b~V{*hWo z#m(FK1lOdz17xxIfJl^{>2UJILZnW`6eI8F_zsIEKE$**$iK)v=w%pSKB|xKvh_=k;o7GBS+OHMdwauQ|5ve?+5wjiNznxe`@4IXkA+ygd zI^R%y3|8;j@Ynk`w6)LEFGl@Td%WD9rr%}j7HtiG%6pGdRQ#{q|G01W55+O`w3DZ} z8~LbTisK~n8V!cV{7HW>gywLky7@#(?a!vXSEkw-rnqp+RF-B&gzo(GIyCns>3)um zd-91Oo5e1bHab`oo`QTwH|BLGDy`8D^nj~+m}plT86P$G^})+NULmm#j)@=8a`Izb-vzwo`-FN}J%wDeQBnR# zdpoI^NR$e=rECAvP9McbXxN!D;JPUT#5yewPzxmEwCw3{FX9ba8%um(CV@oDrMOI8 z2i1bONLZ6}4!f3V(cjiS^&r(Hkm6-Z7lvFHEOE3LNM+ny>MxsIv(3 z*>To!K(!Y&*PQ6YC1pZp4-MB#!fkghpoQI1Dc#xi@a&Ud&yuk+tUbq$rOc~5)} z=O;W{N9#LaqdHXNr#*(>qby1Ch9CZU^)SBDgfbuc&gKmBlD zHsI3q7(^@&hTZW0&N6Mvdyrz0`Iqa^r{ER$37CLxQQAuu$x>_);eD|o21FeK=;#~9 zH9pT7haLbcazyX;nt!d0zhJo}SGOAEVehN_$*b*se|QUAI8+OKn>RhG9X!gr?9*;Q zuj-;VPVuUak_4~X83ajmGfAG#dU;JT?^r3`V7m2~j~_ zO3x2TukXNVd%<~&SuC0v3Z7dgRY&G|eUqL^wcY5dsjrp*Zz*Iru`S5?aiu+*B$EDt oA&9MBuF0vGOK#ZiyRnU;O|l4X*Jry!#h8o$0(%uP*+d@rFNDv2JOBUy literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/fast_rcnn.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/fast_rcnn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d810c0f48cf9c1c8bfa94992a6d1c75d443969b2 GIT binary patch literal 1456 zcmZWpOOM+&5GM7qB-@+DY5EAzR=osK5yU`pEBZusTc8D8AiHP*BLzW=v@Kf?hork} zFMU}D$f^Au?Vsom;kBpz3psTt?Iw$s0zXkR9L@~q8$IdwAem@|DYSghxIow z0@ckE)s-G%o(VjlBfBIa|vclUVgv)~w|ol6XcDK_t%oYu#R z&iEl$JX5u}bXrApcyw~~^yH^!#~BFT4C~kqc^`nIF=lA&u;3M%V(a>4Ge2h)Wq9{?z%uts}l;b^P_-O-MAwWoa@>PW{7{~kkj z2##|`X>7s?&6bn8;>N3ZwlrNir-Cyw=aiYjWKy3IQRjr8DM4*oCTe7jIs#cN$=s2s{wEr(^vno8w$ z)vkwS&3GY)t+*C)IEBk5BCD!LtBtq~=UbQZJpj~=vGc!Yd;7XPkFImcJi8l)O~dB% zBTV@>z}e>-Gdy*)!yBNEKQ&rJ?JkG~ z_G;JS6iD+2K1QI8fk;52Ye_?%&d7LqIg=&=Bqdi=Za+HTCFNSGWWp1g6uDFnlgU~o zvsxwRyX})|T_*K3q3c=6D@7Gty-XgQ@6zPcFOw@}UP2Eg_Pj=O2{(mA(_>u|=Q6LP zqE!ZLwa1(2WV7N&LI5v=X)Q{sjHf_k+=5po{+Sl*E43%{EjI_>O38(F+!}RdUaksG zEPG7|fiQ2{+eOKw7loq(}4J$oetC{1Ro zwRqRsH*nV;(t`-WmNh;;0g3z=0O5N$#sMBUA?^XCJXjsz9(;X(17~~p4W({cr#}1w zMx!*aU1MV)#KeREgV&G^F+D)VE>g-Z7759+LPr`YcTY`s6T8(|p_o zGDhDC4$O&}w+Ibuc*|~_bEoC)unk(Lbz86X+d&()Q9Dxn#|SfHxTTZEBO6R*ele{p zuwDiEg&moN)S$!yq_TltRlT)4oz~Yv*GmD{jh2+04Le`=V~D znhK$Z1OdSo!8XAT0_qbAkr&b!A%3!lugRh=;Q+CkZ`QI(@{N)vF{|1nWvMy^mvzOwzk+ZIW3hhR}Ic?XNa^7a54uyhdQ-h&%u3-(NgSxc`)vk9wvl3XGXnvIHS) zBm@$!v1r1Mg}7YHqF?EF?Bp?z9Ew5+{Sp~Fh`HV)`xOBdkA|Tl1rg(Y@BC>zuwipm w3q!Y6(=A1XR6*JA{)H#&-#I0fgy3meC%%VKU5XqQ~&?~ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/fcos.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/fcos.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43606a8529c38b09ef5c4bfdc340edc956b5770 GIT binary patch literal 685 zcmYjPJ#X7E5Iu^NB^h?pkG;E=2uRnWEs(g~6rjqag^nXT`j@z7 z%3sJ7k}}c~xZ~pye0TSbpO(w{*XM2VL;xP)zvUbJ&6v6{PJkc{v?M14gePd@wKS&$ zklwN%Aeiv)LHJ0H^hk1F%rb}r%Lp;iAF{fu8q9{G!YdrG9D4cWO?T1N>SlF$bNw?b zP2hF06H*@yK*))J+!O2pHpFJXk8=6|402}j_?mscyw1`xI6bnvHEPTe3^0%pj^3>| zlF=LK$VPvh9cQK$Fa#uxsrJ==hf><6b;x<$)dS~opqavEqr-Jk-mUu%HS4gv)6sg} z|K?jPgpLMT)EzH3l@13~gG2M3L>EV41c*2U+Y|`zTv!Zs* zCF$9v^?@N!gm%*&9Jsfhdhe-!L4g9j^)D#qTA+n*J?BvL_hw02QraH6#6G^6dGqGY zoA)*QX07JG`}JP9wQd-HGmbqa;P2theg?n|&bmg*T8tUoAt(FDc=B2LPveD;s zOR3XxbYHn!PTiK9dMz*YTYg$;RhSW3N%bRxyWD$Xa4%wGYs^|TUb=5I{TB?2G?|*; z+3(-a!%lQ3%AUD)%E1^Lrm`Zy)`Uu*SVN zfN%JI1JBN0oHh7lqYdEsUet(5wlwPHVIKE;jsEUUP5VaU<@Cn2y*wXeH#av!aS%V= z=!?!~_#oT7@y?AK8#lJDzj?j+%{f2r4zrt$yJ40`q7n9Z{w%gT-hUmY7*TS0(0Hni>hhA3AjW1Zva#I>(GqEI-p7f1YZceU-5R+cKb4d%Nl3$iw!-2mM~8 z>|WG@|6=%Fv}1?}CAa`Pz5HRcaQ zr28X&cc)oZ_3vd_B0*>DgEK}t|=5X8MW4+7y(bFr|nlj|*XaPp^QSk&U4 z>=g6F!Y^19;^~=ZgE@XfT(qi(i2 zHQ#Kr*krrc+ZY^*W$>pZvsVF(YL)rSJoR%;m)Xo^uIT_)&7(79;p8eCQ=PUcD$qm! z^nQRhTLXYS7zrbb0A^vwCD~nrCO~hF9skv*elBX3th(<-vG9vH=W8}_8 z{s--fk(9>9$b&C>vV5A;lCf-zeChGhY|N(BY29UG$rx4O%@ta0#TZqmIOf!HCn?LS ztaVI^j?c_d9X?)OHAZuCj$04C=S=)W!ngHabR>Oh5!b}!3=}2oytIP6@XW+%PU%XZ zw@=WKhK|umJ7ACMpF*>U2;R}gy)fndw2{O*s=;RCJboNC2BHr<>}HMKzG!H#j(eRA zLD}sE9on{_Kqkn#;1Cv%Q{}~JCxCwrqNb^=H0}X}2Z%Y=Q9MvS_yrU}qD*2*bAPYpl7Q4|IfPAn4sIOY zD8>%94!4v~W792RtHtI|IftqTlVu7We-|3dt^gS33jAb&`R42Fb@L*-XjWO3Ex^5f zc8y&#eRi}w^P!VG+AhtNZVoqx(ZbEe5xKc^(d)wiEhHBs zK^ob$V1^YMOB_lwvJ|tg7>v{-!yKbz(R41QnZ$z=n4_9bO|@MUGwXV$FY8~yWP3Az zF-D6Ad()oJKl`;I=aWS__bUSqZE0B2&?Y#xM@#ZdvLu&IQ0OsTh?G%)xY8xwaGBER ztXxda%Coe~M}Pg)$j@<)N))+B6j=WpWL}moCmk#&D^DqmoP!3I(O!jX=J-D^mqAHA z7YIXuS{bcUE4e5abpqG>hHMmQFF?yHa&^~~=YMBHXBV`dFHH1&2{RDLMvIWdKOxCQ z%w3yEvNnw3hI4Y>~PSG4?BwER@G)wb1qC!hF3fXcBi zB2}e|Vj4`S_-yxkIb~xCB(pq4#VVQbeKKQ81s$mUb9V%he`7wf!JKVtw?besH?Aud zoN}_S{af8Q%iH~QaC@Ja5J$&2dTr)F2rm1f;DHWh8@IasHiFx2Ak%3lgrNwxNPJcR zv@r7{CK*{8$9q~RaT{dH?LO`XyHS{9pDZpU)amg(jE?b6q{48C$g&el&h}4rhhEaB zC6mcw#rBz?G}|PD>)W%Ib5D|bK6_X1c%QFPHpm4&9VyZtOv&TzoPJW^H%W#l3a`oLxZ zR!^pAD=6YATcBoA?(~u(eCjMu?kFhm;-g5F5P3V1atb2l6r*Sl22qa-DmW&^BZ(jP z2E#m4^Ln9UWpPpZQFT}dl!t;Oyh*clxwuI~E=76Nx2brY2EIjr>d5BRB5zqpKjJN- z^k!b7(Qfzv6``QKpj-^4VfGM7ZR#|cVh7401tD?j0=-XC&XHqL?^JAm+5xiabh56? z=Vq;a;Y>Zn2-q-Oj?UNqv|{7%(|3c$Q8u=0Lmlcg0)6Z zvZk)7*Srro1^J??od^Uw=*oTpV7R{NSdK1Tt4PbJymcb3vbt%Tu31MFK95ls3EO6M zO-Ix=`W^i{$LiLCX|pBJ_?Sgix(_~RSxD<>=@D}t*aA4XX0ZbC_RXVnA39`2N=oDHNQ z>LtuhsGhZ-m5{p1C>dPTuqIW_fTf~_?GR5YBRr&PGp8G+hdU+H3DH9uK<%4Uxr4yy zJha5$Io(5R0Tl-*7BH6zj9M~>yKIHKNTTN0LXB>Vf1X~eM2Q{WL%_*fH7+3`+%kH8 zKA)WFP(e^eDM0sOBUwzAz@sXw$P50_`5oLp8|1U_i$*x?%sv8a9Id?%^J>sNu#pZy z_2Afdf{1Ty-=&O%Ac|ZNiC3UtotAbGo`)G3tR4t<_4T)TR%{c+Sps(fP+{ti4uQS^ zQ&gjvC4N9d3&)4Z#u2DLp-~-jC}=3F8}-y2`AZrOf-J}JgGk2?3LDDPZ0Wivenc~M zka2dRUe*_a>OXy7RTc72eHo3iX05oL{gg&2)QEttDB&51Hn1!AfAdtLFGenU`FwM!`B}IHYg$%CZG?H@>r3#=8CD2 inCwk2@lw8HI(KgsyT6SHU}W?K1GfeQs4CQ5Xa5Gf&r$RM literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/htc.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/htc.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9406adf9a79366f1d42379ada08dfe3ce4c0eb46 GIT binary patch literal 9387 zcmb_iU5p&rRj$9TuCA{BnV#|d*faha+sigAcH#xH0pl#}+6fU%!mh!gwNBCQThr4u z{WHE*#|+%OU+LuA~nl3}JSSMs_!2E4CYs zu!P;s_1uPQ8p08|ZocO=JWjjaLeFpbNE;&GE%t&&&?_}cy>g@6t28RTYNKizksr@| z*$|#6JTgQfa!2;aY}ADLrcw8w;+@9YQxmjq>f&q7q}3GB8?UXcJ+-pD{GIJiS40xU zh1aj&y#CtFuf6eRJ*T}pcLoox$U#RJvs9F9cT-*F>R~fUI_-WWbt!vJn!T+q9xJ^_ zwxh7Q-41)rWHV{e`zJr*1pSgNkjQA5!f04x={;lJ6glBODm3iK5qaS~GBAUjI4KIk ze`GdXaY__LfK*;Ai;^fK<%tzh5mlrL;=M!|i7Oa_gaJCYO_~L2|j*6Hywq zh!0u%a&I7_ZgM%x9?0bKM%ucx^-xyvhBB^X4#X(Tn^iL~J=4JzSgz@sJ138b$)(96 zv`CTYM|YBt*Kj3gK~hMAFh`bLgcLxqvG$9?deaa#E?QYrO_r=mQACL{qo(0@fD#W02S$WZp{ z(6W&^^k~4!Tr!3Qq#)o(`Kq8SVW>iEziTZS${Sh3B1#?kfv|DUZMyOU++CG#n_L>G zAkIUJ4X$5A+Rf6Nw)}SL#RU~8%FSoF=nu~-_=16ceN`NpZ&}0Au&jKjh^-u`hdXkH z6+9PJNtK~;mA|r{7{jWnDo<6Az%fdqrr!9i?FkeTq`;bO%XNpJ91JJ5oq( zKG0RR-kp9s1S??V6J2PhJTd4iCA-Z#&?Q}#t$sh7W7t}6@3Rf-Q4_txiX`$EX>`9t z1M_+KRFsUTw`V-4excP(!lX5jkJISP!27To;`;1^zMYT$M9;V zkH50%+Er+_Yc4~>Yo=!fmWSt>xpVPTt!0ZAhkhhcq<^eOWvD---(X#V@gD1sms*E( zs{qqZdXyjQQ2}!AqyAAna-m05T4XILjP=Ne#`uTy2>OED;#7}Vf1pRyuP^^o7{lPf zg|w)k+pin97l$PtUkO_5tAO^|_w8YD_pfh3tH(Kc_gWRz~pe%KwfJ1Olq#c>7+?PswpC7RDvVV*ao-rkh<3_@lVs=CG#I=C?x zIFEOAl1up#k%Q(y{xaoVB667sZ>Txh?6)KNIb`S@cK+6ewp-0qejXY1TDJX=os%yU z`4y_>VHWhZM1FztULkUw$PF%~d6K$oKjV9*2XsE?Lifwi`l?yAJm|k`&f)o33vjgm z)&#W@7@Y2;KfnX@DzgB9a$^hNV&8(krUp$bwG}~ySBI892P8p zpMkc!QD0YRZFrfzx&_JlATYqv?{7vQL-H+HbV?Eg2TeZW9p7j~CCz;sK@e z;^mpySTy;B7th7wJ;CDTdGV;U0FdNOmIe#y?E^l4mKFeK3Y@zKeDJIaSUdt=Mfo!r zY?%0A@xd!;FgDsR0UVJ?1q8idlr08FfJuxi{~ds+xM0Armxe_)>Eg?&miI8s|6(TYQWDe#aTJ%ST z!G}>eRuB8J}qWXn62Qe$7y+t?}Ei2s8R~5p;eMfmf#Ux69 z3mVYdATBe*8a<)^8CVEwIICt!M6Mcr*p^{!*Lv`Sl#oH0P{OWt`|)s2&A|<&zMZ(L z=HLaK;Pecq=iA0`A+4zs@hoPun9d=Ea)_KYTmrcydaR@P2N% ztQO)WwM?AbHSfLzY-7YHF`t53U?g)w)kgWZoZ-sw^zh8^tU4odnBTbvuc(#y)W`r% zdI>yR9iA7yTHSOp{-3DxYJSe>Te$n87%!u@)sc0T`u}I{e`VK_Kf`-YW5o$0QAx9>KXP8rf?&P+CXsm)&Kzsgg9XBjpHF) z6(#H(dP6wsnQMr^3!9fQWYcDN$l5Rl{Wntfu`l@Y(VghI=<~pM9CS8I9a0E{$5SO| zmE>RGI(}SU_gFwcx_mk!N6wY#i zpk9Yi?epO5fA78iz>7Z1L}iH15E=Pn+z!q1qrKNin(TpheY#1>J81X3>Es|m6K-D6 z!8mM|L0Lz1TB-IpIDt~uzPGHli9D`?JdzvBW;rt?xonMxB|Q8$`kC3Ku_}(T*3^CM&>-v==YzUA63z_SaeOc&uAANR85I4+P*895FD zh*TEX4$6>&znWDBCZdpu1H?o@P?x~BLp1=wt(x_s?b(EtIH+O~esa)9^+5fT+}=fR zFWU3ty35CgDi4hR#uTof#~es@Nr{tx0>yrkVXB;hceLrr3&;UlVjP9DlaY=6*&ZT@ z4)5w3XifozU?OfV%N#^YWs1 zno%`ik|N@pCB$L!s6RWLODn);bErYlloRU2W1RKGGpaH&uM!7p+}?<1`JMA>HlE`- znVS|$u4U&w7~_0g75*X+5o3^r{#TWQ^0(Biz<%;Byi!qo7e^PsTSdk-vqK71pWyK? zX5$Bvn~&$!l3K)gfJSMgqx^6|Ej%{&jfpTKPG)$|-&Hg50%BCb`-J|gz-(o860>Cf z0>_o_?%p!eB^8X!^kjO9?j!S*fv6FpRbm#%=rCVOhxkJE2tn;}r#$A1E!Pil!;&|F zJa7ueD>f@mP^*djB{DcW3^Y*3ZYxfVI^J}yN9QM ztWTo`_4yxy8g3u zKg&+v((o+YH#|cYfo~yKOYyl8&?K(&Bdj&>u66gP;P3^ttSr0_D>Fw@11W|yokMC? zEPxO5K%d90YQ{^FQakwz)UII!--ZO1mkg{q=sIw5-^+54qT?>A^LOtV@0sfspS+=z zWp1?(gg#tVtH5uIYmC5jfwqGzLR}ch&Nvz17_u9M>wFG{ZJsPKor@s;Oa<6-dI?$d zojmV$pM+fYBf0keAb(~v#}UPEqVo(xumedZl;P4VdmH@D=Z~Vfq(euVA??=3M)V1L zJSA)ctYwQ%4|w8xctT_ip>Z$jW1G9vjgChr|F?lR_6E+r{DF5+yn|uoX(T!jh1>H( zfJzEgKjvc|9Hp4w+dB_Az$$W`u?}T$pLf3OJb<48uY2PSE@LL~5eFrNg!rK3o$08o zPtVS2>!&ys`B>6kc8Jt3$`*35sqU>uNXTlF>eAtC7L#D?)Tc)djZzSTQ52&Vd5a#m zL3Cv^inhXLw>vqIEYkrAjtM7-#-mzA+S~c)5!mcLHD*vm!Otwh`bSi&L`}h%EK&+g zCEL9w&QoO62Is)Thgl!mPPTghl}uG zGfN6R*3V}Vl^tsNHjyC_61gmF`-oC3>!eL`m&kXCd;o&T>-ek%XeB$w8Kck)mvE4U z@SU7S?6kkflq@|)EBrmcL=-q2T-+xdIzM6L*Xh04ArQBe-{65vzYWNOOa}u$I^Yio z#F(RG5a>W^BBXywBm5x|213PsG7*sYeTX82eyr0t$*ltF0Ol~zp^y(ne&-mX0O(j5 z)Ug0}vOfY$05}Yos@BQeDq0Z8AplhdJOY@)o!U=Y(4AT#fP_|6Yc;pZy=FltoYJB$ zohw)Qj>t2gK@otWd|aqa(apdjm@ zyEaIFhpbl*-a{n#%Yb_BljY|ede~EHcoO)0gY4d)PgL4C_EdaQ?Z;LYZ>G3nJ-4g79Dm~~({~L7dhuO@ABH%q--f9Tbr6Pkx0~IpgpcaAM-U!g zI6TbM{8bt%IUSiwlN!qJ5czu|{{W&(~jnc76J~5d?;PY_4)IKOLP>PSua{r_UhP|WBrF8#BCxuW)}}CTeZXM XcQs?^H4>48R)yq?b-`SA%I5z9hurRK literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/mask_rcnn.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/mask_rcnn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b6ac02c617a1bf57df3e321abc863f0596bef1d GIT binary patch literal 855 zcmZuv%Wl&^6rJ%Su^p!^5g=CV&7zUY76DRQAt4c@rYmno6VD`Z<42fr)CPH%N?q|G zd;~u-TUPyruDEv`3PRYLPt?r#J z5uTl2o}XTRzL;hZcznD)(=UNYl2SoZM|gK+PEqzA(8lb<(xZx~Br<7rvURS&`{M2HXu2xwiQcXfbph|os} z5RL$#AR%K}!L?@WJ9+pxUe={N0jw6=4X?)8R&YJm>spV?QrME^b=C24SqoX{aTl%? zOtBubsybe8)d==SA^ie?#1VD=^L~BuyJE+WmRu+w){?P`my$6PF$Pkb0(s2Xmklqv zjMXs1y@N~^1!L+Ns98v;dW!rx0$v*qjTaTfgc53FlcZ^^hY{YtezgK?iU|@LDtTY4cYPGg8UJDjq%vc7bm0F-uO;=4%7u8)& zW(^FZY+G`J)|Si){{bI-^TB(t{1f~Ge9nU}KJf#5;p8u}s+otdEUn~fRa9n1WJF|S zW<2`SW;6K7`|ar8=M3Xt#(~E{`%Ao{0}vZA%Zv_v&5RAqjybS8*1+!A%!sYn&YXeU zanW{SH>(Z2j;GtT%pU}ufEh`U)_-QiUhKazVn1;w)`WE$F?(pVgLmL>Y_u)qZEohf zH+kMyGn+g8EDj?f`n_SoRkQ4iXfVnW{*GZno2l7HFY<>))JyIrMba&D{?05BHBFvy z(D--1di2$uN56RRuuEd5k6ksy=V&BGhs8$6j9JHugKvy2MoW6-ckH-M3wp&mPTbJS zvyS_XVH=$q6mP25{YX50*v)x=*n4niV`J~fG_YEYtkTe05Uk?aE{c)(;re>Scl*!R za^72yHpTjlA6)-*?fUhbpWVo9QCeY%+s6J-yR?o(A4+YwLVa65@& zh$eC}AacgMH z+PH<0y7)a{<1OR2izZrjI+L~_dA69XddDRE!y9vaOtN$?wt<_IPHa5&_&@a3GKzUQ zH(_@TV4R*Yp5NV`jy!q%J44Q<$7Soc#bF zi5b7a_mo@&C5@b>)?;(LG+w4&x+3TG-PG2Ajwkh~E0^T*mMKsE(S!w0X{(>AtloHQ z@tdGJ^VSq|&?Y#f&1qoHR@$7cv^foJRxp0@t;v6d?u=zNvF|1RGVoVEhJR~5JO8zT8CT^QNCAI& zX4%m4+gk29XwcT{cG1Eam{{ZUL=UHV(5@FWoh#4D^YQ{{E@~@W)HjQD zE!cc~3G~if%p_-ToypA&#!GcjM^o-^%%mh{vJ!nrpYR zyc=cWHX75}hjgNNM!rrg0cf3O$4oN1CNbWly7BWMQ(pEw3%8P}z;aoCUq^ls*eArT zR1Ro|$iyL$AIw0GR*MWsWLgO;t-zF9nz?P3GA*8nBPP+}HuuGSA1ML0t=iB^5^sa! z$0Y6SOTs3(aq_(1O_UQCFGtDu5#1!wI8Ir#iFok8q`y5N{S(!w0<7KzWyk$)p}ZIU zVmriXETbCjsL5b5=DVsu)@}@Zz zrz4|kRZ4|m+TVi}cr@%K$}NeMTVl~4jgn!^={V>xtAgRq0C9V?QwTMyWgbW+$}mf} zqtva4I8;>gn&fceP=@MHh}1!v(cM-y(i6cc)FDmSjz&;wr#~#dP|jY$a{`orM0T2$7B2xZ`tFD{nHu1AW=>bIs63U<6e4*H`a>>$(C{v=G z*4QHNK%>(|2h#zNHKvnws?++=tL)SKFhF+~xh!Q@(``@<-JNcOr|mlE!K!))iDifz zvy(xGW;v(otJV<$Y;fL(eKbFPU6tR>gw9BkDt|*37FQC>O~K0bs`QER=p>g+$SV3! z!U#0lAI5^yHtB1ne2p);>E@K1zK5lWE6#-O=sU7MXFstc7M)`xL1QV`(_Cjibf=t~ zTF>clScv%x?ME3q_dYwim!eNaY^X~kI?f-9 z?VO75|A%JZ?Q8aq0zi9~-@}LBB0zy$)yV_tnh0)KJAA69l$9kz)greth(@6(cs~aI zr!5@ddrTsOhG>HG4C;}8yQ{S%Jl_#me z1=5!B5s^M4(mG@frmnDlfUBxaIh0O{{2AY(dD{ds0+gVYK)a6Fqg(PP;Kdq%QERY< zIm1q})6S~Cw@dnd2WXRjz-`BkH`!@Dy0?1&$h|$vxNUFXXC=bOi6+d*u81eER8SQ~ej?YnCoo-y`gSGD7gZ)oPnacgW_#^h^D;8RBAnZs{faz?pO6PuD4 z2oPT@vWb}~f9p)SYnSf2O(JpKC@uHWjcU%uUzEo|{Lz0*BXT?@`7n<9(Y+{7HX}3o Giu?tPle5VH literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/rpn.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/rpn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21847e125058e8c185d43a2e394523bae2f1f887 GIT binary patch literal 3135 zcmZ8jTaVku6`tXp6s27|j_q}wLPb-cYK`5-K~uDKoty0fMXLm{fdqmJmaCy8YDrO^ zp}H4v`%pOu`j7zazvy3@*FNoE$Wy;F)M_`T#KSZ9GvB$s-|a>pem_h9-DT{5?Aqgj z{|K)VAc6^Aun~Q&f|u6FD(#WYnXrXZIHfys4eu6S>5qKyp74vH3`e2igCZ(BqYh_T zXCD8NiBLqZnTRqs^ViOrkGg_CXM@fKPGZA>r8{-DP?da`ms6$V!^7$%DbwXLUraAJ zrVO}_KS@>gB&)M=T}f(x@%ivYrs}`sC;8&SYA5-JtGo~yjfYR3zIgiSi_gAzKBhx1 zA7{g;mtZnB;)0DVaq}BC;ldL3>u_X?w@k|sE?N#<>@^#?;+6~N_3&b6@eorcZh#uNNo4)7VywuV3;TFR`JX}-wB zpsQWAT4qv4I9LZsk}vW)Nn}9HoEd8~DpGZzmX$gvOJS@buNEzTP*x%<)Ir-_Np%4E_Lrv;hR*0! zJrL&ic+BtO_3%3*Gupp|V@zg54}G_~dW6Q6u1H55+L+sOtKlu?XgP8p)NOSM&TF}? zHrng?GR9^`3Gy~^goX}}^LmyLyjQ>sEP00}w5Z6t#P1QgM}*ei$E!%4jPLRu?h^5{ zUAoP6*ta*L+t95(#jAb{0vQ?I<=g_tFD2C=S0yFO*?(lDM7FS(@=?JF=O+MLfV0he4`@_uc% zjxl%O8i3Y_O`OxX5ZG(H#%|~uST(Wluydcr=6>S~yK!><9X}760GD!x+7;R1YWhs? zPV0oiOs0~2x~OC}2#hUi8%Cq;DLjYZGDfN+vnZk9Bndn16qQnguvHN$LI)U3%B)Ux zxZPRyX@_^rFu6*#s+Ha$j+Vm(m(<}=BU5j}*_~AKIF%wXt~J0WY8QmLyF9ic9$7bm zYs}AXkjj5B_u2R|$dLjlsvBfyU>c>XBlhaSm#lGxy$ADOLpy5B8?CY1XvggYuPml!hP0+_Tx;3u@mKFgQd9xqqgU98IGDO6*8fq<4jqZz); z%*HLa-KDG$5z-Aonp_+C&bB{E8MwZ3J-06WT)vU?tz984roPPlR+NKOjP<%ij<&f#ND!1eZd5 zfYUd1;?_&g1iL6AHKKz!BUg zCMrlrq`_Y8A}IBse^+?lqDXr6r!Sds;RgQMzn;&k<38-EzqQJKwdmKgte=2eI&qDne`9TdOYi^{0y^I|GfuThc7UnRiclCzN}=e`l&q35hdj5 zcB`v(``bQg+=pE7N9n`cz7IFBt?psvQ4a zck@!=UYiIy*lGRqLz>{hHCIKgV=_Sm*3#4hKJBd#R3Fovh{oEAPyQYq@^?h8+?&rd zKh`{Hqbi;BrulTt@$FHcVR0K9y%;_m!-sDnA>QK~KPExjnUvM=w}$vUqrWwjf57TN zhf1PFT4qV2qa;Bgu|jA{betrwR%y|Wm^z|O+mr<4FNpk_$Ri?u2GQM3^)k+i0_D}# z=m)$_;U;@i6rtA$9fZBG=LaDIP#lF(*AxeXo+Q7}ktuTa$H-4@IwF76&LN6$ zMzvK^4Xh-2gJx|_7szaiU?@)|rB`Gf)6C@NZfyy&6|WVPgww&J_V$m-QcT4ZSV%eT I&+hU60}sUJLjV8( literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/single_stage.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/single_stage.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b195e8959880bcb705ca60274d670d6a40ace1e GIT binary patch literal 2935 zcmai0TW=dh6rR~#@6C>zCcQ!hZUG@9;hAoptu&%&e7* z)-NPdd7%=&fhYdbyz-R4&?nBBv7Hp9u=bukGrKc$=KH=mGk2Dj!Y^KK$A2~n`J0@% z8sL8kw|oqS5k@mIqJ7FNW@lDz_iaj;!y1{BH~J0jyP2DNeGh!Xyv)yoen1KLM&Z|l z`7AglEa3K$eMI|BMmI?}Jb|9c#t8-6rKgTHTW~qassy{4Ul=xCvJZw62$=LOcJVnG=+KXYzRgy234(``!(8Sa zlYYa5*mv0~3t97+_C0oqEwMIuKD*2=ux0Rq=folX5Q4v=RyWgPnDNaj9xlS(NIX3{ zA^5asw|oLlMJig8BWq+G5lSk1qooEDsQmsCSe##{&=2339(rA1mrkw9QnTf-S`#mUZA zS#aeqywJQUc(S7$ZD%#xJZ36DxFh50fudwERDQ)}rM;FOWRIuA?Mik%QK?iO|7cX;C*?s{{m64V}EbN=ut z8JJTJkvmF&4SQtO)bIf%M?9&3r;%HEBmc+}-|pYS(mHwCnp*XY``yoeBt?kzMhoBt z##=CI5DG;IgK8qhCG-$b73`(ec4R0@VcIa|BH_7gZW(8uzX(E@NW^ zUD3Qt#T6`GMRN@ec6|eG0~KASZGZ>?Yg-{b?3@SHS-M|=3!%G@eZ2(+CPok_m^$PR z@dj7`L6Q_k}4YTxOyLlL6H+0Lcp&J z^=y+xs1CGRgo+@%8mbP`u5AqRF#bVu))|zdla%>*Qi0Gq1+Vr>vC|o3?_*CB`4b(`_Qv&Gk4OLQOx!i87`xASd@ZyVO)G+XS_JgAj-)~DZ8yxm{b62iE>`WQU!}nw423SkS4^t zx*NdJAlkwnqAad>ksOGd@Uq|3RcQhIO66jKzaDd)0Y8Ce5?2y&L#ss{>RLYatojR?XMF<6SmhBh<9L6O+kV@;DsTeHZV`i4wxY*;_0=Yttl+y z3YN1TibB4!|FCwQO_NAq-QtF)SyIhnlAFZNW!F|#?;{n$LYR?jd_xT6BAuEREBs}6 zH%n!O3+>)d7y&8+^7Wnb1TE8iobd=Zq2ArBOyW%5183fTCLi(MJTSfYs1Vnn(OEY< zBO-q;BIUuc0y^X318j+FaHbKx3C=WPc0?RZwRF8Xix|<&=Y^R?HP6KP2r4AzTgX`; z8juX6*kzDQ2+}!x`+sJ+&3Db)fQ(#4Yg@caCsZ_0Ql+`zgO%B@tXAguZMc3pN z<-jzhz}zW5Mx&u$;II66GSrCv0xvcC_liQiiQPL}aO4yyc}2_t$rpSGoQea1i$I0? q73+6o+vq_oFc@dY1P}A<;HPgO_L`h7u$hiL5ZTb(IKftvB literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/__pycache__/test_mixins.cpython-36.pyc b/CDARTS_detection/mmdet/models/detectors/__pycache__/test_mixins.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da56a3ea10db4cccbc7d76b51f782d3419212186 GIT binary patch literal 4810 zcmbtYO_Li(8Sb8MjYc24Ua#%MBwG{}kRiLl2_I#Qy*Mc+gm?+DVXIV_DOoeCm7`E$S|`~BhlDC_9J%Jg58w#=04m46a`KJh=o8P|tXn{MVcJJB>U zT$YMX8n;e5@kk6u!@SkcRejk>gVS8qmOWW3|2S9G?4)0G+x=Fa$Ac_y(`)9Jg~~ok zein?0x=rI$7w53VJ9tO^92oGIeFSYQhcm} zZq|vjv}mb_h8B2z+&=1<2ZtvGK6kzrad$AHj>@8=-_489nAn6<&py~Y9%kuP*g@yf zNo%mzew4KGy&TK9mt{#>r0t?R9GLoEHcZlfzGu3JBHz<5&gjGQ-O(de%e&dApBjJQ z7%f5`z%U;22!E%SR~Kvd%<#qH)$oLiLJOGx1EydS%YTW{@)-;jTsl+!4)wU>_XB`b2)^lYz<(xfy|ByYr za>zPtQXb!)&wc;eq=cSm{93`ugDJL;W|&lDgeNUoxx{D|J;Nu5OqS6b>DI3g*>Bkq z2fppc0laf%nY@FyRawP0y9a{fyV_*vBEZqt-bKmlFdYEqelX!3_6O^sbJuK_-ZS9C z+I>UuShtKhhbNwe*J)?l!)$be7BHtdJ$Cx~+L9AZ^sZrhcCYt`ZLsAAD)T279+5X5 z3QetPpgr2c>hXz%)xdhw+OBxl;DXTKu*G@&sL~v8xr*qmbI4*?75A5piwkJJf?_=8 z;FR4@2g((#K_^xAX?G;H^o;4?1T1b9sq#SnbdWS^qD*((qqGH#m*(%t^($0_b{>yX z5qCx>sq2jaFMrv$N&sFc&z1Xkebo;Dhbi*|Y+MZ3EQcE08rN4V258sUz@Q=lrhZVkux z=JD=!n=nEbL9T@IHcI{p3@~B??;e=QB~Lh5fLafIPb~1pn_8Nsj(7|GuJkdMSPRag z7RVsMs7VRGHhx3!AMM0Jy*#VCs7Lff&j-^>GL&T*fobscQy)03fNK?5J+k1pe|m*{ z-Wd-1k9H8hc1%p&`AxStM!ehsG_rJ1v?$o^8g4!%ZW>7C$E{JSq8wyN7qapqG*iOu|J5;E{Zq)+(aa;0Ds5c`x-$|j0K9QZjcm^k{Bc*(%ZVl z686MOkcm98jtHF^O;cwQ0ku(wWV-^S+0suKlmxY&JHv4WIIa>3#UEjPS;bgt2ULS6 zwMm^?y%MlklXaRISq^d>jnR$CrgVFu+#uDE9%`FfKei58Q3jShsEjjpFweQkRuRc_ za!WeB3Q85FHg$35{A636m)jcWfx+&|J~~JJ?&vA8|8~RDs1SQ-H0pwK8$smP$Q5r; zji4&NP39_@H_5z3<~w94o(b9vLCUU58a{DxER>B5N|m$LSVvog&&CBEnZ>I#dWQ@} zS@B&M6;dEJTWjiio_qz+3mA%<^c3M+e2>ic$y_7z0~qDaX7S7n&5BO)lG)^o=4}4! z^xO~0==it@lBz}y>Bw#7`w_xbZa<6U_Z^Tlcilv$$wK_fh`GV15Uw z@C$-DsTV1SCfZ+7G3l2k6CkN6LuB%$-9yq3I`NM!amz>rNX2?q&rZCHNO*M86AYwu z#jilNInn$X(flnoKsG!nfmS6=v=C3b5DZq3bhtgpffkGqL@N`OG|@^(sJL!hx<*1^ zfj$ukMg%SQB1-?HJJq3pq{~o4m4>2AVHaru1-eDTL*biN?=H%JO~G`EBepSgWuqt{ z>SRXH6f_^wud8cIycXT)9P*+ZOnj3j+J|XWlxtkY!3%_=CuUlKR4CSmmo%R#kG}c5 z@|>Zws*sxGDb5f27!+vS)8WA(0clhWZS zoltu;OU-m$Ys?bav|y^fsvtD#7`-NS@`-mnuR)4*spR<}ucbtM37{a)#eu7u(*1m}gWSh`k&Rz$Mat4vw1ic*`JQ^+;wMqR50>uvNl zarUXt=t@4~tTNfqL^%f~vNcgKN^T@!Qcj{zeH=*XP+R$bPA6YZjVH9ZGivOuZ{Q+8 z5(vJS7iY9#9}{X&c2L3fubZTE1W+w@jVW zOxDb!nt^9Xq)#oH$uwq)hjLO$M(lxNZRyM_!m$Gi1}1HSLpm#irs3DB!N#kXRjB{buuG|^$wVj$>!YN}HS|jB ngu@#?=;0p{`Usd+%;qbyNs-0Y ztLl-|ctn6{Dc&MOfqf2g2n-|$atLy;FF_7DvMi(mdJh_Hk`vfAcr51qCn9N~`KvDfx& z3uSNQkArqFF11VJupOe!7s04Jj@l8IOQXs-ZpSEFA{Z`a3-c4NHKUa_q- z9El-C# zJCnz6%gI1jOexLxM}@92d(z4CK|f2Su9|w@8Sjl!*`?jh|KeF8@1c-dZChBlv9<47 zJ)7?BX}RsDp74bK)WZ3EvCch#D4{11p(sDK+9eT*3cjJ(5V5E}wcBNJO4LOIrAV9> zD`FL;%6Bc#YR9;rGrDo_(d2H?=`XHjyIY%Wgud?Vm;Vxl!YXWK9Xdnj(6X(<9eT<> zL`&kU3TU8W(2{b5^`I=jivCbJ++S8Mw{4>>QZBbul*es5Xp5D{ZB-R;+ox!&DWBWw z+>a;4+zl0S|39k|e}Bq+CGJ}h#Ic&yghO9t4eR)>@aT{^)>IiS>nZ}>K>sOK74CDJ z;DxL3w(s1-qego&Eo5hqrJ{9O`}zJ}Dka^74wGb%4T>a@g!1{z?sU2jb|zU00L)A4 z-BF$Z2}$1VjMAjnA++grnf5>58W}ufY4?Hln6AtavfGiVNOseX(3O0tG8@4iq1?+1 z%bFQQtEP{!e_nExkjQHuwUxPzN3V_n@Dtr@ywr)tGBfg+GT%+o?e%q7KytVg^SwMu z2mRe5Z`HJyr=y-+!!60vL^g?>BXWVrMIx^exkThLkt;;jiEI$5gOG$JNp}R%@1kc~WY&=HTd(s-ZvnlJvZX zUrr}0ECq#l=u8Nk5bg(Mpw8Lmi)lH!wA;x$MIlYymak)od}Bebqol(}gJL(qqkj#x zoHUa4s=ds)y37HlxfIhk{~5@UB$EW&*xAq>I?5I{$pdPh{KLY>Hz-QO@X(PTJibh| z+t&Sf>Zq9pc=*+itPIjL42(uVsVs+}(XqKI@&Ik|f~P8ff_iOHuP;gs`H$#XS=3i^ zs;`+kOR2NnvJE=cWdjFc(Ue!IM0nF>K06CiG{^@TUOelj3`6Z?nU3zJ5BJlo7<5Lu z!MsbdWq|r7O-5*)O-2H91l(C8NrZuKlh_H(E!Vsm;Y!n|&3>SR#Y!rip!fmtES66TULvt$G68JYe9HIQIyca&zcV{3OX?rZz; zNyx@C)sytnPVq+=l+&|Wvoppdco4Z)oOF5I;-pm!_3-19l1b{(Zs(_U`Lx&hv@kML zPdUo#*{8t+GvEo>Q5Y}((3<*1p!`1FPyp_d`~hePBSkZW!}2rSiwX{%X$kd6?txYm z%na9KDt+us!)Y1whpOCjXa(?8HegiM9sW$ctMq0FuSu)IQp=QinCXSO>tl}acb)-nd{&@VPiw)#k zAeQer5wtrAUV}x^v)5S=A)IKBq5n;tI1Vm;PAHF0OWpzfk<6Fz!#!L1MNqu6k;)Km z6kHOP!lnX_1kjR^b3?c(<4aN)6>zf{9j^Qm*f z)9N&x^@aV=`-pBie&NVQaYj^VFKUB$v6D0C*;Ke|`VP;sjGw`cY@&V+r~O-e&#O&v zQp*J@ee6!pPR~uxtMjBK!ShQM%}T$38+jhLAK~84v#ehjUQ`=+8xNy#OE}2|ng=g) zR-NlP>cU?-xaW)fHWz1aQ+?seQ{Z~-g_D09JC2X-_%-x2XFG1rcKjN4+(i31obg}r zJ+GSBaWfm?o-e7*;ic*2$J=z~`*)^S)TQC+;_~o{x-z^ve4WsxE~9=;RG&jX!qA)k zH_(6mRsAoVhuKFKMzqu=tb+N~B_Na5f0Ng}N+muw=}|hhJAIw_MeW#0t}$OpT~)8E zYv6f$0KLf&>YD{fq6XpeeCpRa*2{s*$J{hs$ z#8K^m*(tjcAE5W=G@IESx_wrD$Cg~&!|Hs=h?X4A7Bkmi>%d%b%Q0)ZGR;riiKONJ z>1erMWt9lZefF5MmE`YZ##gQMZ$~TPl>(7|GU%q-7sZpk^jBHBN=pMuqs|UOg8y&z z|6W@C3&tjX2~FA)gKlwT9yx5$F7T+m(In4xu)ildMXFucTMlP*M3&cFuCbYH6DiDx ziR;1{4d-bANq&tk&9VZBp9TjwFbr=$)q!Esf$2qkZ!gV+F~uBD-NfJ%=GUxUbR=!2 zN&Re!Nh;)5KwyJ2JoATC^x*;TVY&T5R(x6e2dSLE`euk$&EB=!?G)_mwyrN(@euxA zGER$*E-%ECi{_f;H)!q7Vd{K zIAEqB&2>B%ehxhAi`tB}x@aINM7D(j;Q1}#F zy*0;kYY0jlgeMW=-BtKLHT+gl^U#Xu7ynM#4Htb5{GtlRImC;97#m<*jByeAL!O0v zCyJvz_NY9}5}}=ZDpR7FX$}g7JYJRd^?!I-e5gBJpH757k|&B&=ne9pg22>Rd98qZ zb!dNzoDjvLFkU`FA7qq-gV5(q`G0xNecrKSGtOXKd zN5{E_5H+W5S`OlAhvQ23B4BrhfCqSaC^zhb^Pe;1OOtfTOw%6)I)pGr6$uaClYX$D z$R(2d?F?PKI5~Uto3~S$LQD_%0LGMyFx#J&vbPl6b-42-WG;csXAT9U&zzp)lN-!o zH(^nDo7Pw0z}&P7Z~{LZj$)kpXbWk50Dgf(U0n|M^CMWc>kGCeOlrS7f&n{%ru-hB$Uw5X z7{GXp7?0Ycz3KAYX5|KvghTm#B0m6WSCS)0${$kmpA+E>4ina{;e3I232{wuSS3<& zo>8Bi&$v#QgEAz=8NMn-H=m;wZ!i79Ic>S9BQ1ERW#4I4De0Vb#%Yr1C`pjh+($w= z(Q%SI-0zG`4<|EWLJ&Vg=${xDI_U^VE+%wSGf`Z literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/detectors/base.py b/CDARTS_detection/mmdet/models/detectors/base.py new file mode 100644 index 0000000..f027324 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/base.py @@ -0,0 +1,176 @@ +import logging +from abc import ABCMeta, abstractmethod + +import mmcv +import numpy as np +import torch.nn as nn +import pycocotools.mask as maskUtils + +from mmdet.core import tensor2imgs, get_classes, auto_fp16 + + +class BaseDetector(nn.Module): + """Base class for detectors""" + + __metaclass__ = ABCMeta + + def __init__(self): + super(BaseDetector, self).__init__() + self.fp16_enabled = False + + @property + def with_neck(self): + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_shared_head(self): + return hasattr(self, 'shared_head') and self.shared_head is not None + + @property + def with_bbox(self): + return hasattr(self, 'bbox_head') and self.bbox_head is not None + + @property + def with_mask(self): + return hasattr(self, 'mask_head') and self.mask_head is not None + + @abstractmethod + def extract_feat(self, imgs): + pass + + def extract_feats(self, imgs): + assert isinstance(imgs, list) + for img in imgs: + yield self.extract_feat(img) + + @abstractmethod + def forward_train(self, imgs, img_metas, **kwargs): + """ + Args: + img (list[Tensor]): list of tensors of shape (1, C, H, W). + Typically these should be mean centered and std scaled. + + img_metas (list[dict]): list of image info dict where each dict + has: + 'img_shape', 'scale_factor', 'flip', and my also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmdet/datasets/pipelines/formatting.py:Collect`. + + **kwargs: specific to concrete implementation + """ + pass + + @abstractmethod + def simple_test(self, img, img_meta, **kwargs): + pass + + @abstractmethod + def aug_test(self, imgs, img_metas, **kwargs): + pass + + def init_weights(self, pretrained=None): + if pretrained is not None: + logger = logging.getLogger() + logger.info('load model from: {}'.format(pretrained)) + + def forward_test(self, imgs, img_metas, **kwargs): + """ + Args: + imgs (List[Tensor]): the outer list indicates test-time + augmentations and inner Tensor should have a shape NxCxHxW, + which contains all images in the batch. + img_meta (List[List[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch + """ + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError( + 'num of augmentations ({}) != num of image meta ({})'.format( + len(imgs), len(img_metas))) + # TODO: remove the restriction of imgs_per_gpu == 1 when prepared + imgs_per_gpu = imgs[0].size(0) + assert imgs_per_gpu == 1 + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], **kwargs) + else: + return self.aug_test(imgs, img_metas, **kwargs) + + @auto_fp16(apply_to=('img', )) + def forward(self, img, img_meta, return_loss=True, **kwargs): + """ + Calls either forward_train or forward_test depending on whether + return_loss=True. Note this setting will change the expected inputs. + When `return_loss=False`, img and img_meta are single-nested (i.e. + Tensor and List[dict]), and when `resturn_loss=True`, img and img_meta + should be double nested (i.e. List[Tensor], List[List[dict]]), with + the outer list indicating test time augmentations. + """ + if return_loss: + return self.forward_train(img, img_meta, **kwargs) + else: + return self.forward_test(img, img_meta, **kwargs) + + def show_result(self, + data, + result, + img_norm_cfg, + dataset=None, + score_thr=0.5, + out_file=None): + if isinstance(result, tuple): + bbox_result, segm_result = result + else: + bbox_result, segm_result = result, None + + img_tensor = data['img'][0] + img_metas = data['img_meta'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_norm_cfg) + assert len(imgs) == len(img_metas) + + if dataset is None: + class_names = self.CLASSES + elif isinstance(dataset, str): + class_names = get_classes(dataset) + elif isinstance(dataset, (list, tuple)): + class_names = dataset + else: + raise TypeError( + 'dataset must be a valid dataset name or a sequence' + ' of class names, not {}'.format(type(dataset))) + + for img, img_meta in zip(imgs, img_metas): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + + bboxes = np.vstack(bbox_result) + # draw segmentation masks + if segm_result is not None: + segms = mmcv.concat_list(segm_result) + inds = np.where(bboxes[:, -1] > score_thr)[0] + for i in inds: + color_mask = np.random.randint( + 0, 256, (1, 3), dtype=np.uint8) + mask = maskUtils.decode(segms[i]).astype(np.bool) + img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5 + # draw bounding boxes + labels = [ + np.full(bbox.shape[0], i, dtype=np.int32) + for i, bbox in enumerate(bbox_result) + ] + labels = np.concatenate(labels) + mmcv.imshow_det_bboxes( + img_show, + bboxes, + labels, + class_names=class_names, + score_thr=score_thr, + show=out_file is None, + out_file=out_file) diff --git a/CDARTS_detection/mmdet/models/detectors/cascade_rcnn.py b/CDARTS_detection/mmdet/models/detectors/cascade_rcnn.py new file mode 100644 index 0000000..f0564c9 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/cascade_rcnn.py @@ -0,0 +1,379 @@ +from __future__ import division + +import torch +import torch.nn as nn + +from .base import BaseDetector +from .test_mixins import RPNTestMixin +from .. import builder +from ..registry import DETECTORS +from mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler, + merge_aug_masks) + + +@DETECTORS.register_module +class CascadeRCNN(BaseDetector, RPNTestMixin): + + def __init__(self, + num_stages, + backbone, + neck=None, + shared_head=None, + rpn_head=None, + bbox_roi_extractor=None, + bbox_head=None, + mask_roi_extractor=None, + mask_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None): + assert bbox_roi_extractor is not None + assert bbox_head is not None + super(CascadeRCNN, self).__init__() + + self.num_stages = num_stages + self.backbone = builder.build_backbone(backbone) + + if neck is not None: + self.neck = builder.build_neck(neck) + + if rpn_head is not None: + self.rpn_head = builder.build_head(rpn_head) + + if shared_head is not None: + self.shared_head = builder.build_shared_head(shared_head) + + if bbox_head is not None: + self.bbox_roi_extractor = nn.ModuleList() + self.bbox_head = nn.ModuleList() + if not isinstance(bbox_roi_extractor, list): + bbox_roi_extractor = [ + bbox_roi_extractor for _ in range(num_stages) + ] + if not isinstance(bbox_head, list): + bbox_head = [bbox_head for _ in range(num_stages)] + assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages + for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): + self.bbox_roi_extractor.append( + builder.build_roi_extractor(roi_extractor)) + self.bbox_head.append(builder.build_head(head)) + + if mask_head is not None: + self.mask_head = nn.ModuleList() + if not isinstance(mask_head, list): + mask_head = [mask_head for _ in range(num_stages)] + assert len(mask_head) == self.num_stages + for head in mask_head: + self.mask_head.append(builder.build_head(head)) + if mask_roi_extractor is not None: + self.share_roi_extractor = False + self.mask_roi_extractor = nn.ModuleList() + if not isinstance(mask_roi_extractor, list): + mask_roi_extractor = [ + mask_roi_extractor for _ in range(num_stages) + ] + assert len(mask_roi_extractor) == self.num_stages + for roi_extractor in mask_roi_extractor: + self.mask_roi_extractor.append( + builder.build_roi_extractor(roi_extractor)) + else: + self.share_roi_extractor = True + self.mask_roi_extractor = self.bbox_roi_extractor + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.init_weights(pretrained=pretrained) + + @property + def with_rpn(self): + return hasattr(self, 'rpn_head') and self.rpn_head is not None + + def init_weights(self, pretrained=None): + super(CascadeRCNN, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + if self.with_rpn: + self.rpn_head.init_weights() + if self.with_shared_head: + self.shared_head.init_weights(pretrained=pretrained) + for i in range(self.num_stages): + if self.with_bbox: + self.bbox_roi_extractor[i].init_weights() + self.bbox_head[i].init_weights() + if self.with_mask: + if not self.share_roi_extractor: + self.mask_roi_extractor[i].init_weights() + self.mask_head[i].init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + for i in range(self.num_stages): + self.current_stage = i + rcnn_train_cfg = self.train_cfg.rcnn[i] + lw = self.train_cfg.stage_loss_weights[i] + + # assign gts and sample proposals + sampling_results = [] + if self.with_bbox or self.with_mask: + bbox_assigner = build_assigner(rcnn_train_cfg.assigner) + bbox_sampler = build_sampler( + rcnn_train_cfg.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], + gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + bbox_roi_extractor = self.bbox_roi_extractor[i] + bbox_head = self.bbox_head[i] + + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], + rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = bbox_head(bbox_feats) + + bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, + gt_labels, rcnn_train_cfg) + loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) + for name, value in loss_bbox.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # mask head forward and loss + if self.with_mask: + if not self.share_roi_extractor: + mask_roi_extractor = self.mask_roi_extractor[i] + pos_rois = bbox2roi( + [res.pos_bboxes for res in sampling_results]) + mask_feats = mask_roi_extractor( + x[:mask_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + else: + # reuse positive bbox feats + pos_inds = [] + device = bbox_feats.device + for res in sampling_results: + pos_inds.append( + torch.ones( + res.pos_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds.append( + torch.zeros( + res.neg_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds = torch.cat(pos_inds) + mask_feats = bbox_feats[pos_inds] + mask_head = self.mask_head[i] + mask_pred = mask_head(mask_feats) + mask_targets = mask_head.get_target(sampling_results, gt_masks, + rcnn_train_cfg) + pos_labels = torch.cat( + [res.pos_gt_labels for res in sampling_results]) + loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) + for name, value in loss_mask.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # refine bboxes + if i < self.num_stages - 1: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + roi_labels = bbox_targets[0] # bbox_targets is a tuple + with torch.no_grad(): + proposal_list = bbox_head.refine_bboxes( + rois, roi_labels, bbox_pred, pos_is_gts, img_meta) + + return losses + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + x = self.extract_feat(img) + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + img_shape = img_meta[0]['img_shape'] + ori_shape = img_meta[0]['ori_shape'] + scale_factor = img_meta[0]['scale_factor'] + + # "ms" in variable names means multi-stage + ms_bbox_result = {} + ms_segm_result = {} + ms_scores = [] + rcnn_test_cfg = self.test_cfg.rcnn + + rois = bbox2roi(proposal_list) + for i in range(self.num_stages): + bbox_roi_extractor = self.bbox_roi_extractor[i] + bbox_head = self.bbox_head[i] + + bbox_feats = bbox_roi_extractor( + x[:len(bbox_roi_extractor.featmap_strides)], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + + cls_score, bbox_pred = bbox_head(bbox_feats) + ms_scores.append(cls_score) + + if self.test_cfg.keep_all_stages: + det_bboxes, det_labels = bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + bbox_head.num_classes) + ms_bbox_result['stage{}'.format(i)] = bbox_result + + if self.with_mask: + mask_roi_extractor = self.mask_roi_extractor[i] + mask_head = self.mask_head[i] + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(mask_head.num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], + mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats, i) + mask_pred = mask_head(mask_feats) + segm_result = mask_head.get_seg_masks( + mask_pred, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['stage{}'.format(i)] = segm_result + + if i < self.num_stages - 1: + bbox_label = cls_score.argmax(dim=1) + rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, + img_meta[0]) + + cls_score = sum(ms_scores) / self.num_stages + det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + self.bbox_head[-1].num_classes) + ms_bbox_result['ensemble'] = bbox_result + + if self.with_mask: + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(self.mask_head[-1].num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + aug_masks = [] + for i in range(self.num_stages): + mask_roi_extractor = self.mask_roi_extractor[i] + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head[i](mask_feats) + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, + [img_meta] * self.num_stages, + self.test_cfg.rcnn) + segm_result = self.mask_head[-1].get_seg_masks( + merged_masks, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['ensemble'] = segm_result + + if not self.test_cfg.keep_all_stages: + if self.with_mask: + results = (ms_bbox_result['ensemble'], + ms_segm_result['ensemble']) + else: + results = ms_bbox_result['ensemble'] + else: + if self.with_mask: + results = { + stage: (ms_bbox_result[stage], ms_segm_result[stage]) + for stage in ms_bbox_result + } + else: + results = ms_bbox_result + + return results + + def aug_test(self, img, img_meta, proposals=None, rescale=False): + raise NotImplementedError + + def show_result(self, data, result, img_norm_cfg, **kwargs): + if self.with_mask: + ms_bbox_result, ms_segm_result = result + if isinstance(ms_bbox_result, dict): + result = (ms_bbox_result['ensemble'], + ms_segm_result['ensemble']) + else: + if isinstance(result, dict): + result = result['ensemble'] + super(CascadeRCNN, self).show_result(data, result, img_norm_cfg, + **kwargs) diff --git a/CDARTS_detection/mmdet/models/detectors/double_head_rcnn.py b/CDARTS_detection/mmdet/models/detectors/double_head_rcnn.py new file mode 100644 index 0000000..6e4e12b --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/double_head_rcnn.py @@ -0,0 +1,191 @@ +import torch + +from mmdet.core import bbox2roi, build_assigner, build_sampler +from ..registry import DETECTORS +from .two_stage import TwoStageDetector + + +@DETECTORS.register_module +class DoubleHeadRCNN(TwoStageDetector): + + def __init__(self, reg_roi_scale_factor, cls_roi_scale_factor=None, **kwargs): + super().__init__(**kwargs) + self.reg_roi_scale_factor = reg_roi_scale_factor + self.cls_roi_scale_factor = cls_roi_scale_factor + + def forward_dummy(self, img): + outs = () + # backbone + x = self.extract_feat(img) + # rpn + if self.with_rpn: + rpn_outs = self.rpn_head(x) + outs = outs + (rpn_outs, ) + proposals = torch.randn(1000, 4).cuda() + # bbox head + rois = bbox2roi([proposals]) + bbox_cls_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.cls_roi_scale_factor) + bbox_reg_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + if self.with_shared_head: + bbox_cls_feats = self.shared_head(bbox_cls_feats) + bbox_reg_feats = self.shared_head(bbox_reg_feats) + cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) + outs += (cls_score, bbox_pred) + return outs + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + out = self.extract_feat(img) + if len(out) >= 4: + x = out + loss_latency = None + else: + x = out[0] + loss_latency = out[1] + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # assign gts and sample proposals + if self.with_bbox or self.with_mask: + bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) + bbox_sampler = build_sampler( + self.train_cfg.rcnn.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + for i in range(num_imgs): + assign_result = bbox_assigner.assign(proposal_list[i], + gt_bboxes[i], + gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + if self.with_bbox: + rois = bbox2roi([res.bboxes for res in sampling_results]) + # TODO: a more flexible way to decide which feature maps to use + bbox_cls_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.cls_roi_scale_factor) + bbox_reg_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + if self.with_shared_head: + bbox_cls_feats = self.shared_head(bbox_cls_feats) + bbox_reg_feats = self.shared_head(bbox_reg_feats) + cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, + bbox_reg_feats) + + bbox_targets = self.bbox_head.get_target(sampling_results, + gt_bboxes, gt_labels, + self.train_cfg.rcnn) + loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, + *bbox_targets) + losses.update(loss_bbox) + + # mask head forward and loss + if self.with_mask: + if not self.share_roi_extractor: + pos_rois = bbox2roi( + [res.pos_bboxes for res in sampling_results]) + mask_feats = self.mask_roi_extractor( + x[:self.mask_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + else: + pos_inds = [] + device = bbox_cls_feats.device + for res in sampling_results: + pos_inds.append( + torch.ones( + res.pos_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds.append( + torch.zeros( + res.neg_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds = torch.cat(pos_inds) + mask_feats = bbox_cls_feats[pos_inds] + mask_pred = self.mask_head(mask_feats) + + mask_targets = self.mask_head.get_target(sampling_results, + gt_masks, + self.train_cfg.rcnn) + pos_labels = torch.cat( + [res.pos_gt_labels for res in sampling_results]) + loss_mask = self.mask_head.loss(mask_pred, mask_targets, + pos_labels) + losses.update(loss_mask) + + return losses, loss_latency + + def simple_test_bboxes(self, + x, + img_meta, + proposals, + rcnn_test_cfg, + rescale=False): + """Test only det bboxes without augmentation.""" + rois = bbox2roi(proposals) + bbox_cls_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.cls_roi_scale_factor) + bbox_reg_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + if self.with_shared_head: + bbox_cls_feats = self.shared_head(bbox_cls_feats) + bbox_reg_feats = self.shared_head(bbox_reg_feats) + cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + det_bboxes, det_labels = self.bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + return det_bboxes, det_labels diff --git a/CDARTS_detection/mmdet/models/detectors/fast_rcnn.py b/CDARTS_detection/mmdet/models/detectors/fast_rcnn.py new file mode 100644 index 0000000..64c0391 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/fast_rcnn.py @@ -0,0 +1,50 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class FastRCNN(TwoStageDetector): + + def __init__(self, + backbone, + bbox_roi_extractor, + bbox_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + mask_roi_extractor=None, + mask_head=None, + pretrained=None): + super(FastRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + mask_roi_extractor=mask_roi_extractor, + mask_head=mask_head, + pretrained=pretrained) + + def forward_test(self, imgs, img_metas, proposals, **kwargs): + for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + + num_augs = len(imgs) + if num_augs != len(img_metas): + raise ValueError( + 'num of augmentations ({}) != num of image meta ({})'.format( + len(imgs), len(img_metas))) + # TODO: remove the restriction of imgs_per_gpu == 1 when prepared + imgs_per_gpu = imgs[0].size(0) + assert imgs_per_gpu == 1 + + if num_augs == 1: + return self.simple_test(imgs[0], img_metas[0], proposals[0], + **kwargs) + else: + return self.aug_test(imgs, img_metas, proposals, **kwargs) diff --git a/CDARTS_detection/mmdet/models/detectors/faster_rcnn.py b/CDARTS_detection/mmdet/models/detectors/faster_rcnn.py new file mode 100644 index 0000000..33314f1 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/faster_rcnn.py @@ -0,0 +1,31 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class FasterRCNN(TwoStageDetector): + + def __init__(self, + backbone, + rpn_head, + bbox_roi_extractor, + bbox_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + cls_roi_scale_factor=None, + reg_roi_scale_factor=None, + pretrained=None): + super(FasterRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + rpn_head=rpn_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + cls_roi_scale_factor = cls_roi_scale_factor, + reg_roi_scale_factor = reg_roi_scale_factor, + pretrained=pretrained) diff --git a/CDARTS_detection/mmdet/models/detectors/fcos.py b/CDARTS_detection/mmdet/models/detectors/fcos.py new file mode 100644 index 0000000..4c3dce1 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/fcos.py @@ -0,0 +1,16 @@ +from .single_stage import SingleStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class FCOS(SingleStageDetector): + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained) diff --git a/CDARTS_detection/mmdet/models/detectors/grid_rcnn.py b/CDARTS_detection/mmdet/models/detectors/grid_rcnn.py new file mode 100644 index 0000000..49c4a33 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/grid_rcnn.py @@ -0,0 +1,205 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + +import torch + +from .. import builder +from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler + + +@DETECTORS.register_module +class GridRCNN(TwoStageDetector): + """Grid R-CNN. + + This detector is the implementation of: + - Grid R-CNN (https://arxiv.org/abs/1811.12030) + - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) + """ + + def __init__(self, + backbone, + rpn_head, + bbox_roi_extractor, + bbox_head, + grid_roi_extractor, + grid_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + pretrained=None): + assert grid_head is not None + super(GridRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + rpn_head=rpn_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained) + + if grid_roi_extractor is not None: + self.grid_roi_extractor = builder.build_roi_extractor( + grid_roi_extractor) + self.share_roi_extractor = False + else: + self.share_roi_extractor = True + self.grid_roi_extractor = self.bbox_roi_extractor + self.grid_head = builder.build_head(grid_head) + + self.init_extra_weights() + + def init_extra_weights(self): + self.grid_head.init_weights() + if not self.share_roi_extractor: + self.grid_roi_extractor.init_weights() + + def _random_jitter(self, sampling_results, img_metas, amplitude=0.15): + """Ramdom jitter positive proposals for training.""" + for sampling_result, img_meta in zip(sampling_results, img_metas): + bboxes = sampling_result.pos_bboxes + random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( + -amplitude, amplitude) + # before jittering + cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 + wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() + # after jittering + new_cxcy = cxcy + wh * random_offsets[:, :2] + new_wh = wh * (1 + random_offsets[:, 2:]) + # xywh to xyxy + new_x1y1 = (new_cxcy - new_wh / 2) + new_x2y2 = (new_cxcy + new_wh / 2) + new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) + # clip bboxes + max_shape = img_meta['img_shape'] + if max_shape is not None: + new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) + new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) + + sampling_result.pos_bboxes = new_bboxes + return sampling_results + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + if self.with_bbox: + # assign gts and sample proposals + bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) + bbox_sampler = build_sampler( + self.train_cfg.rcnn.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + for i in range(num_imgs): + assign_result = bbox_assigner.assign(proposal_list[i], + gt_bboxes[i], + gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + rois = bbox2roi([res.bboxes for res in sampling_results]) + # TODO: a more flexible way to decide which feature maps to use + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = self.bbox_head(bbox_feats) + + bbox_targets = self.bbox_head.get_target(sampling_results, + gt_bboxes, gt_labels, + self.train_cfg.rcnn) + loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, + *bbox_targets) + losses.update(loss_bbox) + + # Grid head forward and loss + sampling_results = self._random_jitter(sampling_results, img_meta) + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + grid_feats = self.grid_roi_extractor( + x[:self.grid_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + grid_feats = self.shared_head(grid_feats) + # Accelerate training + max_sample_num_grid = self.train_cfg.rcnn.get('max_num_grid', 192) + sample_idx = torch.randperm( + grid_feats.shape[0])[:min(grid_feats. + shape[0], max_sample_num_grid)] + grid_feats = grid_feats[sample_idx] + + grid_pred = self.grid_head(grid_feats) + + grid_targets = self.grid_head.get_target(sampling_results, + self.train_cfg.rcnn) + grid_targets = grid_targets[sample_idx] + + loss_grid = self.grid_head.loss(grid_pred, grid_targets) + losses.update(loss_grid) + + return losses + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + """Test without augmentation.""" + assert self.with_bbox, "Bbox head must be implemented." + + x = self.extract_feat(img) + + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + det_bboxes, det_labels = self.simple_test_bboxes( + x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=False) + + # pack rois into bboxes + grid_rois = bbox2roi([det_bboxes[:, :4]]) + grid_feats = self.grid_roi_extractor( + x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) + if grid_rois.shape[0] != 0: + self.grid_head.test_mode = True + grid_pred = self.grid_head(grid_feats) + det_bboxes = self.grid_head.get_bboxes(det_bboxes, + grid_pred['fused'], + img_meta) + if rescale: + det_bboxes[:, :4] /= img_meta[0]['scale_factor'] + else: + det_bboxes = torch.Tensor([]) + + bbox_results = bbox2result(det_bboxes, det_labels, + self.bbox_head.num_classes) + + return bbox_results diff --git a/CDARTS_detection/mmdet/models/detectors/htc.py b/CDARTS_detection/mmdet/models/detectors/htc.py new file mode 100644 index 0000000..0384aa9 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/htc.py @@ -0,0 +1,396 @@ +import torch +import torch.nn.functional as F + +from .cascade_rcnn import CascadeRCNN +from .. import builder +from ..registry import DETECTORS +from mmdet.core import (bbox2roi, bbox2result, build_assigner, build_sampler, + merge_aug_masks) + + +@DETECTORS.register_module +class HybridTaskCascade(CascadeRCNN): + + def __init__(self, + num_stages, + backbone, + semantic_roi_extractor=None, + semantic_head=None, + semantic_fusion=('bbox', 'mask'), + interleaved=True, + mask_info_flow=True, + **kwargs): + super(HybridTaskCascade, self).__init__(num_stages, backbone, **kwargs) + assert self.with_bbox and self.with_mask + assert not self.with_shared_head # shared head not supported + if semantic_head is not None: + self.semantic_roi_extractor = builder.build_roi_extractor( + semantic_roi_extractor) + self.semantic_head = builder.build_head(semantic_head) + + self.semantic_fusion = semantic_fusion + self.interleaved = interleaved + self.mask_info_flow = mask_info_flow + + @property + def with_semantic(self): + if hasattr(self, 'semantic_head') and self.semantic_head is not None: + return True + else: + return False + + def _bbox_forward_train(self, + stage, + x, + sampling_results, + gt_bboxes, + gt_labels, + rcnn_train_cfg, + semantic_feat=None): + rois = bbox2roi([res.bboxes for res in sampling_results]) + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], + rois) + # semantic feature fusion + # element-wise sum for original features and pooled semantic features + if self.with_semantic and 'bbox' in self.semantic_fusion: + bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], + rois) + if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: + bbox_semantic_feat = F.adaptive_avg_pool2d( + bbox_semantic_feat, bbox_feats.shape[-2:]) + bbox_feats += bbox_semantic_feat + + cls_score, bbox_pred = bbox_head(bbox_feats) + + bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, + gt_labels, rcnn_train_cfg) + loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) + return loss_bbox, rois, bbox_targets, bbox_pred + + def _mask_forward_train(self, + stage, + x, + sampling_results, + gt_masks, + rcnn_train_cfg, + semantic_feat=None): + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) + mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], + pos_rois) + + # semantic feature fusion + # element-wise sum for original features and pooled semantic features + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], + pos_rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + + # mask information flow + # forward all previous mask heads to obtain last_feat, and fuse it + # with the normal mask feature + if self.mask_info_flow: + last_feat = None + for i in range(stage): + last_feat = self.mask_head[i]( + mask_feats, last_feat, return_logits=False) + mask_pred = mask_head(mask_feats, last_feat, return_feat=False) + else: + mask_pred = mask_head(mask_feats) + + mask_targets = mask_head.get_target(sampling_results, gt_masks, + rcnn_train_cfg) + pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) + loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) + return loss_mask + + def _bbox_forward_test(self, stage, x, rois, semantic_feat=None): + bbox_roi_extractor = self.bbox_roi_extractor[stage] + bbox_head = self.bbox_head[stage] + bbox_feats = bbox_roi_extractor( + x[:len(bbox_roi_extractor.featmap_strides)], rois) + if self.with_semantic and 'bbox' in self.semantic_fusion: + bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], + rois) + if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: + bbox_semantic_feat = F.adaptive_avg_pool2d( + bbox_semantic_feat, bbox_feats.shape[-2:]) + bbox_feats += bbox_semantic_feat + cls_score, bbox_pred = bbox_head(bbox_feats) + return cls_score, bbox_pred + + def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): + mask_roi_extractor = self.mask_roi_extractor[stage] + mask_head = self.mask_head[stage] + mask_rois = bbox2roi([bboxes]) + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], + mask_rois) + if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: + mask_semantic_feat = F.adaptive_avg_pool2d( + mask_semantic_feat, mask_feats.shape[-2:]) + mask_feats += mask_semantic_feat + if self.mask_info_flow: + last_feat = None + last_pred = None + for i in range(stage): + mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) + if last_pred is not None: + mask_pred = mask_pred + last_pred + last_pred = mask_pred + mask_pred = mask_head(mask_feats, last_feat, return_feat=False) + if last_pred is not None: + mask_pred = mask_pred + last_pred + else: + mask_pred = mask_head(mask_feats) + return mask_pred + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + gt_semantic_seg=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + # RPN part, the same as normal two-stage detectors + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # semantic segmentation part + # 2 outputs: segmentation prediction and embedded features + if self.with_semantic: + semantic_pred, semantic_feat = self.semantic_head(x) + loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) + losses['loss_semantic_seg'] = loss_seg + else: + semantic_feat = None + + for i in range(self.num_stages): + self.current_stage = i + rcnn_train_cfg = self.train_cfg.rcnn[i] + lw = self.train_cfg.stage_loss_weights[i] + + # assign gts and sample proposals + sampling_results = [] + bbox_assigner = build_assigner(rcnn_train_cfg.assigner) + bbox_sampler = build_sampler(rcnn_train_cfg.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], + gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + loss_bbox, rois, bbox_targets, bbox_pred = \ + self._bbox_forward_train( + i, x, sampling_results, gt_bboxes, gt_labels, + rcnn_train_cfg, semantic_feat) + roi_labels = bbox_targets[0] + + for name, value in loss_bbox.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # mask head forward and loss + if self.with_mask: + # interleaved execution: use regressed bboxes by the box branch + # to train the mask branch + if self.interleaved: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + with torch.no_grad(): + proposal_list = self.bbox_head[i].refine_bboxes( + rois, roi_labels, bbox_pred, pos_is_gts, img_meta) + # re-assign and sample 512 RoIs from 512 RoIs + sampling_results = [] + for j in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[j], gt_bboxes[j], + gt_bboxes_ignore[j], gt_labels[j]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[j], + gt_bboxes[j], + gt_labels[j], + feats=[lvl_feat[j][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + loss_mask = self._mask_forward_train(i, x, sampling_results, + gt_masks, rcnn_train_cfg, + semantic_feat) + for name, value in loss_mask.items(): + losses['s{}.{}'.format(i, name)] = ( + value * lw if 'loss' in name else value) + + # refine bboxes (same as Cascade R-CNN) + if i < self.num_stages - 1 and not self.interleaved: + pos_is_gts = [res.pos_is_gt for res in sampling_results] + with torch.no_grad(): + proposal_list = self.bbox_head[i].refine_bboxes( + rois, roi_labels, bbox_pred, pos_is_gts, img_meta) + + return losses + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + x = self.extract_feat(img) + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + if self.with_semantic: + _, semantic_feat = self.semantic_head(x) + else: + semantic_feat = None + + img_shape = img_meta[0]['img_shape'] + ori_shape = img_meta[0]['ori_shape'] + scale_factor = img_meta[0]['scale_factor'] + + # "ms" in variable names means multi-stage + ms_bbox_result = {} + ms_segm_result = {} + ms_scores = [] + rcnn_test_cfg = self.test_cfg.rcnn + + rois = bbox2roi(proposal_list) + for i in range(self.num_stages): + bbox_head = self.bbox_head[i] + cls_score, bbox_pred = self._bbox_forward_test( + i, x, rois, semantic_feat=semantic_feat) + ms_scores.append(cls_score) + + if self.test_cfg.keep_all_stages: + det_bboxes, det_labels = bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + nms_cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + bbox_head.num_classes) + ms_bbox_result['stage{}'.format(i)] = bbox_result + + if self.with_mask: + mask_head = self.mask_head[i] + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(mask_head.num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + mask_pred = self._mask_forward_test( + i, x, _bboxes, semantic_feat=semantic_feat) + segm_result = mask_head.get_seg_masks( + mask_pred, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['stage{}'.format(i)] = segm_result + + if i < self.num_stages - 1: + bbox_label = cls_score.argmax(dim=1) + rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, + img_meta[0]) + + cls_score = sum(ms_scores) / float(len(ms_scores)) + det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + bbox_result = bbox2result(det_bboxes, det_labels, + self.bbox_head[-1].num_classes) + ms_bbox_result['ensemble'] = bbox_result + + if self.with_mask: + if det_bboxes.shape[0] == 0: + segm_result = [ + [] for _ in range(self.mask_head[-1].num_classes - 1) + ] + else: + _bboxes = ( + det_bboxes[:, :4] * scale_factor + if rescale else det_bboxes) + + mask_rois = bbox2roi([_bboxes]) + aug_masks = [] + mask_roi_extractor = self.mask_roi_extractor[-1] + mask_feats = mask_roi_extractor( + x[:len(mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_semantic and 'mask' in self.semantic_fusion: + mask_semantic_feat = self.semantic_roi_extractor( + [semantic_feat], mask_rois) + mask_feats += mask_semantic_feat + last_feat = None + for i in range(self.num_stages): + mask_head = self.mask_head[i] + if self.mask_info_flow: + mask_pred, last_feat = mask_head(mask_feats, last_feat) + else: + mask_pred = mask_head(mask_feats) + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, + [img_meta] * self.num_stages, + self.test_cfg.rcnn) + segm_result = self.mask_head[-1].get_seg_masks( + merged_masks, _bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale) + ms_segm_result['ensemble'] = segm_result + + if not self.test_cfg.keep_all_stages: + if self.with_mask: + results = (ms_bbox_result['ensemble'], + ms_segm_result['ensemble']) + else: + results = ms_bbox_result['ensemble'] + else: + if self.with_mask: + results = { + stage: (ms_bbox_result[stage], ms_segm_result[stage]) + for stage in ms_bbox_result + } + else: + results = ms_bbox_result + + return results + + def aug_test(self, img, img_meta, proposals=None, rescale=False): + raise NotImplementedError diff --git a/CDARTS_detection/mmdet/models/detectors/mask_rcnn.py b/CDARTS_detection/mmdet/models/detectors/mask_rcnn.py new file mode 100644 index 0000000..003e87f --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/mask_rcnn.py @@ -0,0 +1,31 @@ +from .two_stage import TwoStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class MaskRCNN(TwoStageDetector): + + def __init__(self, + backbone, + rpn_head, + bbox_roi_extractor, + bbox_head, + mask_roi_extractor, + mask_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + pretrained=None): + super(MaskRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + rpn_head=rpn_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + mask_roi_extractor=mask_roi_extractor, + mask_head=mask_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained) diff --git a/CDARTS_detection/mmdet/models/detectors/mask_scoring_rcnn.py b/CDARTS_detection/mmdet/models/detectors/mask_scoring_rcnn.py new file mode 100644 index 0000000..9153bcd --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/mask_scoring_rcnn.py @@ -0,0 +1,197 @@ +import torch + +from mmdet.core import bbox2roi, build_assigner, build_sampler +from .two_stage import TwoStageDetector +from .. import builder +from ..registry import DETECTORS + + +@DETECTORS.register_module +class MaskScoringRCNN(TwoStageDetector): + """Mask Scoring RCNN. + + https://arxiv.org/abs/1903.00241 + """ + + def __init__(self, + backbone, + rpn_head, + bbox_roi_extractor, + bbox_head, + mask_roi_extractor, + mask_head, + train_cfg, + test_cfg, + neck=None, + shared_head=None, + mask_iou_head=None, + pretrained=None): + super(MaskScoringRCNN, self).__init__( + backbone=backbone, + neck=neck, + shared_head=shared_head, + rpn_head=rpn_head, + bbox_roi_extractor=bbox_roi_extractor, + bbox_head=bbox_head, + mask_roi_extractor=mask_roi_extractor, + mask_head=mask_head, + train_cfg=train_cfg, + test_cfg=test_cfg, + pretrained=pretrained) + + self.mask_iou_head = builder.build_head(mask_iou_head) + self.mask_iou_head.init_weights() + + # TODO: refactor forward_train in two stage to reduce code redundancy + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + x = self.extract_feat(img) + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + rpn_outs = self.rpn_head(x) + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # assign gts and sample proposals + if self.with_bbox or self.with_mask: + bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) + bbox_sampler = build_sampler( + self.train_cfg.rcnn.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + for i in range(num_imgs): + assign_result = bbox_assigner.assign(proposal_list[i], + gt_bboxes[i], + gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + if self.with_bbox: + rois = bbox2roi([res.bboxes for res in sampling_results]) + # TODO: a more flexible way to decide which feature maps to use + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = self.bbox_head(bbox_feats) + + bbox_targets = self.bbox_head.get_target(sampling_results, + gt_bboxes, gt_labels, + self.train_cfg.rcnn) + loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, + *bbox_targets) + losses.update(loss_bbox) + + # mask head forward and loss + if self.with_mask: + if not self.share_roi_extractor: + pos_rois = bbox2roi( + [res.pos_bboxes for res in sampling_results]) + mask_feats = self.mask_roi_extractor( + x[:self.mask_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + else: + pos_inds = [] + device = bbox_feats.device + for res in sampling_results: + pos_inds.append( + torch.ones( + res.pos_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds.append( + torch.zeros( + res.neg_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds = torch.cat(pos_inds) + mask_feats = bbox_feats[pos_inds] + mask_pred = self.mask_head(mask_feats) + + mask_targets = self.mask_head.get_target(sampling_results, + gt_masks, + self.train_cfg.rcnn) + pos_labels = torch.cat( + [res.pos_gt_labels for res in sampling_results]) + loss_mask = self.mask_head.loss(mask_pred, mask_targets, + pos_labels) + losses.update(loss_mask) + + # mask iou head forward and loss + pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels] + mask_iou_pred = self.mask_iou_head(mask_feats, pos_mask_pred) + pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0) + ), pos_labels] + mask_iou_targets = self.mask_iou_head.get_target( + sampling_results, gt_masks, pos_mask_pred, mask_targets, + self.train_cfg.rcnn) + loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred, + mask_iou_targets) + losses.update(loss_mask_iou) + return losses + + def simple_test_mask(self, + x, + img_meta, + det_bboxes, + det_labels, + rescale=False): + # image shape of the first image in the batch (only one) + ori_shape = img_meta[0]['ori_shape'] + scale_factor = img_meta[0]['scale_factor'] + + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] + mask_scores = [[] for _ in range(self.mask_head.num_classes - 1)] + else: + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + _bboxes = ( + det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + mask_feats = self.mask_roi_extractor( + x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head(mask_feats) + segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes, + det_labels, + self.test_cfg.rcnn, + ori_shape, scale_factor, + rescale) + # get mask scores with mask iou head + mask_iou_pred = self.mask_iou_head( + mask_feats, + mask_pred[range(det_labels.size(0)), det_labels + 1]) + mask_scores = self.mask_iou_head.get_mask_scores( + mask_iou_pred, det_bboxes, det_labels) + return segm_result, mask_scores diff --git a/CDARTS_detection/mmdet/models/detectors/retinanet.py b/CDARTS_detection/mmdet/models/detectors/retinanet.py new file mode 100644 index 0000000..0e5b6fd --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/retinanet.py @@ -0,0 +1,16 @@ +from .single_stage import SingleStageDetector +from ..registry import DETECTORS + + +@DETECTORS.register_module +class RetinaNet(SingleStageDetector): + + def __init__(self, + backbone, + neck, + bbox_head, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg, + test_cfg, pretrained) diff --git a/CDARTS_detection/mmdet/models/detectors/rpn.py b/CDARTS_detection/mmdet/models/detectors/rpn.py new file mode 100644 index 0000000..51043af --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/rpn.py @@ -0,0 +1,92 @@ +import mmcv + +from mmdet.core import tensor2imgs, bbox_mapping +from .base import BaseDetector +from .test_mixins import RPNTestMixin +from .. import builder +from ..registry import DETECTORS + + +@DETECTORS.register_module +class RPN(BaseDetector, RPNTestMixin): + + def __init__(self, + backbone, + neck, + rpn_head, + train_cfg, + test_cfg, + pretrained=None): + super(RPN, self).__init__() + self.backbone = builder.build_backbone(backbone) + self.neck = builder.build_neck(neck) if neck is not None else None + self.rpn_head = builder.build_head(rpn_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + super(RPN, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + self.neck.init_weights() + self.rpn_head.init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_train(self, + img, + img_meta, + gt_bboxes=None, + gt_bboxes_ignore=None): + if self.train_cfg.rpn.get('debug', False): + self.rpn_head.debug_imgs = tensor2imgs(img) + + x = self.extract_feat(img) + rpn_outs = self.rpn_head(x) + + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) + losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + return losses + + def simple_test(self, img, img_meta, rescale=False): + x = self.extract_feat(img) + proposal_list = self.simple_test_rpn(x, img_meta, self.test_cfg.rpn) + if rescale: + for proposals, meta in zip(proposal_list, img_meta): + proposals[:, :4] /= meta['scale_factor'] + # TODO: remove this restriction + return proposal_list[0].cpu().numpy() + + def aug_test(self, imgs, img_metas, rescale=False): + proposal_list = self.aug_test_rpn( + self.extract_feats(imgs), img_metas, self.test_cfg.rpn) + if not rescale: + for proposals, img_meta in zip(proposal_list, img_metas[0]): + img_shape = img_meta['img_shape'] + scale_factor = img_meta['scale_factor'] + flip = img_meta['flip'] + proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape, + scale_factor, flip) + # TODO: remove this restriction + return proposal_list[0].cpu().numpy() + + def show_result(self, data, result, img_norm_cfg, dataset=None, top_k=20): + """Show RPN proposals on the image. + + Although we assume batch size is 1, this method supports arbitrary + batch size. + """ + img_tensor = data['img'][0] + img_metas = data['img_meta'][0].data[0] + imgs = tensor2imgs(img_tensor, **img_norm_cfg) + assert len(imgs) == len(img_metas) + for img, img_meta in zip(imgs, img_metas): + h, w, _ = img_meta['img_shape'] + img_show = img[:h, :w, :] + mmcv.imshow_bboxes(img_show, result, top_k=top_k) diff --git a/CDARTS_detection/mmdet/models/detectors/single_stage.py b/CDARTS_detection/mmdet/models/detectors/single_stage.py new file mode 100644 index 0000000..c2f542f --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/single_stage.py @@ -0,0 +1,95 @@ +import torch.nn as nn + +from .base import BaseDetector +from .. import builder +from ..registry import DETECTORS +from mmdet.core import bbox2result + + +@DETECTORS.register_module +class SingleStageDetector(BaseDetector): + + def __init__(self, + backbone, + neck=None, + bbox_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None): + super(SingleStageDetector, self).__init__() + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + self.bbox_head = builder.build_head(bbox_head) + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + super(SingleStageDetector, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + self.bbox_head.init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmedetection/tools/get_flops.py` + """ + x = self.extract_feat(img) + outs = self.bbox_head(x) + return outs + + def forward_train(self, + img, + img_metas, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None): + out = self.extract_feat(img) + + if len(out) >= 4: + x = out + loss_latency = None + else: + x = out[0] + loss_latency = out[1] + + outs = self.bbox_head(x) + # return outs + loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) + losses = self.bbox_head.loss( + *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + + return losses, loss_latency + + def simple_test(self, img, img_meta, rescale=False): + out = self.extract_feat(img) + + if len(out) >= 4: + x = out + else: + x = out[0] + + outs = self.bbox_head(x) + bbox_inputs = outs + (img_meta, self.test_cfg, rescale) + bbox_list = self.bbox_head.get_bboxes(*bbox_inputs) + bbox_results = [ + bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) + for det_bboxes, det_labels in bbox_list + ] + return bbox_results[0] + + def aug_test(self, imgs, img_metas, rescale=False): + raise NotImplementedError diff --git a/CDARTS_detection/mmdet/models/detectors/test_mixins.py b/CDARTS_detection/mmdet/models/detectors/test_mixins.py new file mode 100644 index 0000000..975d475 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/test_mixins.py @@ -0,0 +1,163 @@ +from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_proposals, + merge_aug_bboxes, merge_aug_masks, multiclass_nms) + + +class RPNTestMixin(object): + + def simple_test_rpn(self, x, img_meta, rpn_test_cfg): + rpn_outs = self.rpn_head(x) + proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + return proposal_list + + def aug_test_rpn(self, feats, img_metas, rpn_test_cfg): + imgs_per_gpu = len(img_metas[0]) + aug_proposals = [[] for _ in range(imgs_per_gpu)] + for x, img_meta in zip(feats, img_metas): + proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg) + for i, proposals in enumerate(proposal_list): + aug_proposals[i].append(proposals) + # reorganize the order of 'img_metas' to match the dimensions + # of 'aug_proposals' + aug_img_metas = [] + for i in range(imgs_per_gpu): + aug_img_meta = [] + for j in range(len(img_metas)): + aug_img_meta.append(img_metas[j][i]) + aug_img_metas.append(aug_img_meta) + # after merging, proposals will be rescaled to the original image size + merged_proposals = [ + merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg) + for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas) + ] + return merged_proposals + + +class BBoxTestMixin(object): + + def simple_test_bboxes(self, + x, + img_meta, + proposals, + rcnn_test_cfg, + rescale=False): + """Test only det bboxes without augmentation.""" + rois = bbox2roi(proposals) + roi_feats = self.bbox_roi_extractor( + x[:len(self.bbox_roi_extractor.featmap_strides)], rois) + if self.with_shared_head: + roi_feats = self.shared_head(roi_feats) + out_list = self.bbox_head(roi_feats) + cls_score = out_list[0] + bbox_pred = out_list[1] + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + det_bboxes, det_labels = self.bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + return det_bboxes, det_labels + + def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): + aug_bboxes = [] + aug_scores = [] + for x, img_meta in zip(feats, img_metas): + # only one image in the batch + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + # TODO more flexible + proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, + scale_factor, flip) + rois = bbox2roi([proposals]) + # recompute feature maps to save GPU memory + roi_feats = self.bbox_roi_extractor( + x[:len(self.bbox_roi_extractor.featmap_strides)], rois) + if self.with_shared_head: + roi_feats = self.shared_head(roi_feats) + cls_score, bbox_pred = self.bbox_head(roi_feats) + bboxes, scores = self.bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=False, + cfg=None) + aug_bboxes.append(bboxes) + aug_scores.append(scores) + # after merging, bboxes will be rescaled to the original image size + merged_bboxes, merged_scores = merge_aug_bboxes( + aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) + det_bboxes, det_labels = multiclass_nms( + merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, + rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) + return det_bboxes, det_labels + + +class MaskTestMixin(object): + + def simple_test_mask(self, + x, + img_meta, + det_bboxes, + det_labels, + rescale=False): + # image shape of the first image in the batch (only one) + ori_shape = img_meta[0]['ori_shape'] + scale_factor = img_meta[0]['scale_factor'] + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] + else: + # if det_bboxes is rescaled to the original image size, we need to + # rescale it back to the testing scale to obtain RoIs. + _bboxes = ( + det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) + mask_rois = bbox2roi([_bboxes]) + mask_feats = self.mask_roi_extractor( + x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head(mask_feats) + segm_result = self.mask_head.get_seg_masks( + mask_pred, _bboxes, det_labels, self.test_cfg.rcnn, ori_shape, + scale_factor, rescale) + return segm_result + + def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): + if det_bboxes.shape[0] == 0: + segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] + else: + aug_masks = [] + for x, img_meta in zip(feats, img_metas): + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + flip = img_meta[0]['flip'] + _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, + scale_factor, flip) + mask_rois = bbox2roi([_bboxes]) + mask_feats = self.mask_roi_extractor( + x[:len(self.mask_roi_extractor.featmap_strides)], + mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head(mask_feats) + # convert to numpy array to save memory + aug_masks.append(mask_pred.sigmoid().cpu().numpy()) + merged_masks = merge_aug_masks(aug_masks, img_metas, + self.test_cfg.rcnn) + + ori_shape = img_metas[0][0]['ori_shape'] + segm_result = self.mask_head.get_seg_masks( + merged_masks, + det_bboxes, + det_labels, + self.test_cfg.rcnn, + ori_shape, + scale_factor=1.0, + rescale=False) + return segm_result diff --git a/CDARTS_detection/mmdet/models/detectors/two_stage.py b/CDARTS_detection/mmdet/models/detectors/two_stage.py new file mode 100644 index 0000000..1b27197 --- /dev/null +++ b/CDARTS_detection/mmdet/models/detectors/two_stage.py @@ -0,0 +1,347 @@ +import torch +import torch.nn as nn + +from .base import BaseDetector +from .test_mixins import RPNTestMixin, BBoxTestMixin, MaskTestMixin +from .. import builder +from ..registry import DETECTORS +from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler + + +@DETECTORS.register_module +class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin, + MaskTestMixin): + + def __init__(self, + backbone, + neck=None, + shared_head=None, + rpn_head=None, + bbox_roi_extractor=None, + bbox_head=None, + mask_roi_extractor=None, + mask_head=None, + train_cfg=None, + test_cfg=None, + cls_roi_scale_factor=None, + reg_roi_scale_factor=None, + pretrained=None): + super(TwoStageDetector, self).__init__() + self.backbone = builder.build_backbone(backbone) + self.cls_roi_scale_factor = cls_roi_scale_factor + self.reg_roi_scale_factor = reg_roi_scale_factor + + if neck is not None: + self.neck = builder.build_neck(neck) + + if shared_head is not None: + self.shared_head = builder.build_shared_head(shared_head) + + if rpn_head is not None: + self.rpn_head = builder.build_head(rpn_head) + + if bbox_head is not None: + self.bbox_roi_extractor = builder.build_roi_extractor( + bbox_roi_extractor) + self.bbox_head = builder.build_head(bbox_head) + + if mask_head is not None: + if mask_roi_extractor is not None: + self.mask_roi_extractor = builder.build_roi_extractor( + mask_roi_extractor) + self.share_roi_extractor = False + else: + self.share_roi_extractor = True + self.mask_roi_extractor = self.bbox_roi_extractor + self.mask_head = builder.build_head(mask_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + self.init_weights(pretrained=pretrained) + + @property + def with_rpn(self): + return hasattr(self, 'rpn_head') and self.rpn_head is not None + + def init_weights(self, pretrained=None): + super(TwoStageDetector, self).init_weights(pretrained) + self.backbone.init_weights(pretrained=pretrained) + if self.with_neck: + if isinstance(self.neck, nn.Sequential): + for m in self.neck: + m.init_weights() + else: + self.neck.init_weights() + if self.with_shared_head: + self.shared_head.init_weights(pretrained=pretrained) + if self.with_rpn: + self.rpn_head.init_weights() + if self.with_bbox: + self.bbox_roi_extractor.init_weights() + self.bbox_head.init_weights() + if self.with_mask: + self.mask_head.init_weights() + if not self.share_roi_extractor: + self.mask_roi_extractor.init_weights() + + def extract_feat(self, img): + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + if len(x) >= 2: + if x[1] is not None: + x = x + else: + x = x[0] + return x + + def forward_dummy(self, img): + """Used for computing network flops. + + See `mmedetection/tools/get_flops.py` + """ + outs = () + # backbone + x = self.extract_feat(img) + # rpn + if self.with_rpn: + rpn_outs = self.rpn_head(x) + outs = outs + (rpn_outs, ) + proposals = torch.randn(1000, 4).cuda() + # bbox head + rois = bbox2roi([proposals]) + if self.with_bbox: + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + cls_score, bbox_pred = self.bbox_head(bbox_feats) + outs = outs + (cls_score, bbox_pred) + # mask head + if self.with_mask: + mask_rois = rois[:100] + mask_feats = self.mask_roi_extractor( + x[:self.mask_roi_extractor.num_inputs], mask_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + mask_pred = self.mask_head(mask_feats) + outs = outs + (mask_pred, ) + return outs + + def forward_train(self, + img, + img_meta, + gt_bboxes, + gt_labels, + gt_bboxes_ignore=None, + gt_masks=None, + proposals=None): + out = self.extract_feat(img) + if len(out) >= 4: + x = out + loss_latency = None + else: + x = out[0] + loss_latency = out[1] + + losses = dict() + + # RPN forward and loss + if self.with_rpn: + rpn_outs = self.rpn_head(x) + # return rpn_outs + rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, + self.train_cfg.rpn) + rpn_losses = self.rpn_head.loss( + *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) + losses.update(rpn_losses) + + proposal_cfg = self.train_cfg.get('rpn_proposal', + self.test_cfg.rpn) + proposal_inputs = rpn_outs + (img_meta, proposal_cfg) + proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) + else: + proposal_list = proposals + + # assign gts and sample proposals + if self.with_bbox or self.with_mask: + bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) + bbox_sampler = build_sampler( + self.train_cfg.rcnn.sampler, context=self) + num_imgs = img.size(0) + if gt_bboxes_ignore is None: + gt_bboxes_ignore = [None for _ in range(num_imgs)] + sampling_results = [] + for i in range(num_imgs): + assign_result = bbox_assigner.assign( + proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], + gt_labels[i]) + sampling_result = bbox_sampler.sample( + assign_result, + proposal_list[i], + gt_bboxes[i], + gt_labels[i], + feats=[lvl_feat[i][None] for lvl_feat in x]) + sampling_results.append(sampling_result) + + # bbox head forward and loss + if self.with_bbox: + rois = bbox2roi([res.bboxes for res in sampling_results]) + # TODO: a more flexible way to decide which feature maps to use + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + ''' + bbox_feats_cls = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.cls_roi_scale_factor) + bbox_feats_reg = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + ''' + if self.with_shared_head: + bbox_feats = self.shared_head(bbox_feats) + + cls_score, bbox_pred, loss_latency_head = self.bbox_head(bbox_feats) + if loss_latency_head is not None: + if loss_latency is not None: + loss_latency = loss_latency + loss_latency_head + else: + loss_latency = loss_latency_head + # cls_score, bbox_pred = self.bbox_head((bbox_feats_cls, bbox_feats_reg)) + + bbox_targets = self.bbox_head.get_target( + sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) + loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, + *bbox_targets) + losses.update(loss_bbox) + + # mask head forward and loss + if self.with_mask: + if not self.share_roi_extractor: + pos_rois = bbox2roi( + [res.pos_bboxes for res in sampling_results]) + mask_feats = self.mask_roi_extractor( + x[:self.mask_roi_extractor.num_inputs], pos_rois) + if self.with_shared_head: + mask_feats = self.shared_head(mask_feats) + else: + pos_inds = [] + device = bbox_feats.device + for res in sampling_results: + pos_inds.append( + torch.ones( + res.pos_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds.append( + torch.zeros( + res.neg_bboxes.shape[0], + device=device, + dtype=torch.uint8)) + pos_inds = torch.cat(pos_inds) + mask_feats = bbox_feats[pos_inds] + mask_pred = self.mask_head(mask_feats) + + mask_targets = self.mask_head.get_target( + sampling_results, gt_masks, self.train_cfg.rcnn) + pos_labels = torch.cat( + [res.pos_gt_labels for res in sampling_results]) + loss_mask = self.mask_head.loss(mask_pred, mask_targets, + pos_labels) + losses.update(loss_mask) + + return losses, loss_latency + + # Noted by Jianyuan, 2019/12/30 + # For two-stage reg cls roi scale test + ''' + def simple_test_bboxes(self, + x, + img_meta, + proposals, + rcnn_test_cfg, + rescale=False): + rois = bbox2roi(proposals) + bbox_cls_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.cls_roi_scale_factor) + bbox_reg_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], + rois, + roi_scale_factor=self.reg_roi_scale_factor) + if self.with_shared_head: + bbox_cls_feats = self.shared_head(bbox_cls_feats) + bbox_reg_feats = self.shared_head(bbox_reg_feats) + cls_score, bbox_pred = self.bbox_head((bbox_cls_feats, bbox_reg_feats)) + img_shape = img_meta[0]['img_shape'] + scale_factor = img_meta[0]['scale_factor'] + det_bboxes, det_labels = self.bbox_head.get_det_bboxes( + rois, + cls_score, + bbox_pred, + img_shape, + scale_factor, + rescale=rescale, + cfg=rcnn_test_cfg) + return det_bboxes, det_labels + ''' + # END + + def simple_test(self, img, img_meta, proposals=None, rescale=False): + """Test without augmentation.""" + assert self.with_bbox, "Bbox head must be implemented." + + out = self.extract_feat(img) + + if len(out) >= 4: + x = out + else: + x = out[0] + + proposal_list = self.simple_test_rpn( + x, img_meta, self.test_cfg.rpn) if proposals is None else proposals + + det_bboxes, det_labels = self.simple_test_bboxes( + x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale) + bbox_results = bbox2result(det_bboxes, det_labels, + self.bbox_head.num_classes) + + if not self.with_mask: + return bbox_results + else: + segm_results = self.simple_test_mask( + x, img_meta, det_bboxes, det_labels, rescale=rescale) + return bbox_results, segm_results + + def aug_test(self, imgs, img_metas, rescale=False): + """Test with augmentations. + + If rescale is False, then returned bboxes and masks will fit the scale + of imgs[0]. + """ + # recompute feats to save memory + proposal_list = self.aug_test_rpn( + self.extract_feats(imgs), img_metas, self.test_cfg.rpn) + det_bboxes, det_labels = self.aug_test_bboxes( + self.extract_feats(imgs), img_metas, proposal_list, + self.test_cfg.rcnn) + + if rescale: + _det_bboxes = det_bboxes + else: + _det_bboxes = det_bboxes.clone() + _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor'] + bbox_results = bbox2result(_det_bboxes, det_labels, + self.bbox_head.num_classes) + + # det_bboxes always keep the original scale + if self.with_mask: + segm_results = self.aug_test_mask( + self.extract_feats(imgs), img_metas, det_bboxes, det_labels) + return bbox_results, segm_results + else: + return bbox_results diff --git a/CDARTS_detection/mmdet/models/losses/__init__.py b/CDARTS_detection/mmdet/models/losses/__init__.py new file mode 100644 index 0000000..531e5f1 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/__init__.py @@ -0,0 +1,19 @@ +from .accuracy import accuracy, Accuracy +from .cross_entropy_loss import (cross_entropy, binary_cross_entropy, + mask_cross_entropy, CrossEntropyLoss) +from .focal_loss import sigmoid_focal_loss, FocalLoss +from .smooth_l1_loss import smooth_l1_loss, SmoothL1Loss +from .ghm_loss import GHMC, GHMR +from .balanced_l1_loss import balanced_l1_loss, BalancedL1Loss +from .mse_loss import mse_loss, MSELoss +from .iou_loss import iou_loss, bounded_iou_loss, IoULoss, BoundedIoULoss +from .utils import reduce_loss, weight_reduce_loss, weighted_loss + +__all__ = [ + 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy', + 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss', + 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss', + 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss', + 'IoULoss', 'BoundedIoULoss', 'GHMC', 'GHMR', 'reduce_loss', + 'weight_reduce_loss', 'weighted_loss' +] diff --git a/CDARTS_detection/mmdet/models/losses/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/losses/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69a2d63f193cd9b64cc00fee8408af5fe753e072 GIT binary patch literal 1025 zcma))OK+P%5XZsBHpT{H{7m9LZ@v_zy;glBO4>?E54C#P%Zf16M%{sk1y%k?{Yo6$ zQ@=t^-2pc*`;hHnY)TPUW}700(SdW}#yDXUoU&=9cf}0nY>s|DjnWRYR7xHt zd6r&o%kf2=1lg9~RXq&k_j{UqOo=CT-kCDh5uS|Kn8v`rrC}hrP-@vdC40BQk(j3W zhKs{G*k4ut+2Jm3eE4{NToS*ia_&VSf+URKdcSkDQ?4^ilu=!)bG|yM${3{?r$sHg z7ip1zs&K7zKBZr2Tj?Ctp`E{Mmc1;3BHWepenjzllk?k}U2D{ks*p9*kImFvX_-N2 z5!!?f!6bAE7NJLQ2m^vk7!o|fh%hEh2z`Q0m=b7Kv$?|O<>Ka4=H0oax~B&^DXp99 zc&IC8t~Xd`-<qpv=+fM0+Shdk0eDb30RL8wRDJ4pPdTP;eL%< zq@NG3|3#7k>d6~6E>zJdoi(|zUeRb9=)S9j-G8E#kh*A_@1$&{_3G^0`nv5-Eoe){ zne~4!#4|e==UvutmMI&osy5TsQ+;j2tm{O>tzY*Qv%V@WMY3o8P6}>AoyoP((lsGn zNTGY#FwA0&VSNmP5m*{KQ&wfaiOSCMR!WhhYblh4({#Q)V5cvMd=K5cI$EBzwfF*M zv%cuFW|?2`Of6N{s%2esq47Cl+33r<<)Tu{szr=yndNyev;5+qyRgx2y8?)m;>t;Do?PfEX}@QI5S&;5hMYp!!Zo8!=Wig=TCj zZbGs0qFQY>VCRhGRi+ePJ7n^GCmy1B2U$(2Be2Uici9~5yxsg8mx;5R{>?rr_)EZ0 zoKG0NO)hjQ$|j^IKSCS%mJ@F~F@?Q}ZD*@iKFcI`WmFStxV(K6eS7&5wo@%XL9--u zAe*cfjMf$?!E?|AULZ+T`}w{lm#|?8SHg1C&NA%K!iX literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/losses/__pycache__/balanced_l1_loss.cpython-36.pyc b/CDARTS_detection/mmdet/models/losses/__pycache__/balanced_l1_loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..059becfa9ee5fcf6089398a3f1d90b62e943166f GIT binary patch literal 1824 zcmZuxOK&4Z5bmD0C$@vKVH550Fo;J6tvq%>Lc2l)SgnvC7_^ZPvXDk&cl(ujnC@}1 zi6$2uX>VM)a6#h2U+NPQ2b3E}v?r>1?63+ut^T^Ys=KGF>Z^Xg(eMwRP2yi&LjEMr zeKyFSK$qVGFv4g;`c#ib-_WBuv-%b@n3>qA(|0Ig7PAvK_4*zq++j`TvyDr-r2QKA zX8z}d)mh_`um-ng8%q=Pbw-a!uz3Z4AiXOJ7*N$X<l}W4boA)x zFI-B<-v=wKQ*K}}h6!I^T3Sd4k0tN|}#Wyi^U0>d~? z(^%DD!D5K#D~nA>BSitDFSy=a z$zhr^p2!dn;4&P<3FHIw*wv|Q&(G9LHySBi!`ub z zPEmdWnVB-Koa0ldKUw9Wrt>fula6&XU@!bX=^QQqEV}IsMb; hLj7O7y^kvQKgQ=)V$FEeqrOq6b<08sumyPze*>v{rdIy^FDr@+pSjP-ka^@j}~Kpv2$Ml z`#zF+flM;VGuE$4J{Eo9JlY^s;bL>(@Awz6`QxQt#JcugaQ?Uh;lJ)@9=* z>o3VQxg?wLH034Pl5JQmRUfy%VRBinykv4kiShE>oAdsP1ex%wUV=einFOP$D(QqAOEnm^>EAj6P4rx+C6>YB0WH9K1WutF)vxkC70s0cNCO3 z$xdBgE5%sM*^wx`628(aed)dS(H>wIe$V>(G*{R`-7Q_D)4c21u-cA}FtKeUd!u-m z42r4l`j$Vkt%1h=VwD$qx_e-QVKz+)?V)cswEiw8vO$(SQ<<*A=O)n&n#reg{mJwy zDsO(=+@4OYg9DkEO|v^So0ExDg&NRqDtmJ>l`1ovG=Vak7dGI~+SonN zO-xHG8Zwwz@gg2^!P~;M3}}P&5p)EjgN{g0vN0t7Tp7}sDgX2;9{f)EUa zgI`npHAVXPUwqxjM!DLLhqElRzDW<24FLOK+tv`KL#3y-W}eTKqK14nQRuaos(l-( z7rRL=fq&?bH@4-Z#{skc(55tzn1ua$Kzv zqi3QI7Q?Tg$xu7vSNIxl@(xfBMU%I=z6S5RDf<&z2=_TqqkPBHHw#_@e|Uui=5z0< zIO0cQLF79ivge45C=k)9hBjPLqH_^~-b8NSk)OyN_8hRfhNZgP(XcDd*r+d&zh*s| zPTWTIWjHkCg6Sn>_S)i@7st+3$N^}>BGDsNR6x>6E6s>N z^H@()qu+yO|aAf0~x!ZIJiP^^!8 zjEA89iVZo*{>ysblZ}Ps`>&YK`oym#yYhg9@L{Eck6}L2gxKHJbpPBt4IIghw@Bt^ z7zG2_8KeQ)%$;f@eAhu|PG#XCjA)O2x(+cBo5f*?5XLox5%N<+uH}BU8VKMSkXptoaSi}$NDB!mu5!ACLK+tsk9MQ2tIc`>=McBLIoTH zA^DjBv}vI)Yh5YkH4u zogg!VsaEH-!99a(r%!D>-9va%iEFj95Bi%5|H$Ew@o!VSHk@kolTWaK=^$gZrf9oY zPJr10;5GQ+)&GIF7}oV4RSeaiU})Ft^?GF=PP=(4arypyqkFInqH$7Ez|iL%Z)~x{|A+YZ{h#| literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/losses/__pycache__/focal_loss.cpython-36.pyc b/CDARTS_detection/mmdet/models/losses/__pycache__/focal_loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e188d7e25f4118f32829b47639f226aeccc9ae54 GIT binary patch literal 2096 zcmZuyOOM+&5GM7O?DamAZr^UurgeeZMcQMLqz#fjkUEFW$rnNuxn61Ya7j4}2lS;5 zkW+q0|4q-l_E6;3b1vxW8J*C^vof0569;@NcMEUgQ^IZT;r%tPK;2D3NzVYkDOt4rPdsbZ-`oQ#m zOUqa;SRw%uCdyh?vjw|I>oY)Nn$}7>uoEdCrygHUsQ5&xX&A8LJC}QKr`Rpu~ zh=YmZCsTGBCv_z!U0lRAhg-9H(rpP?FL#iLKZ$%77QXsvbXFDO7T9vSn8)QPS@2km z)T~mYqTr$y38vHfqoU#>S0mg&sL>^@!`Z@IgB0E*Jc2{i0T|*3G@{3}ORrOh?$I8V z??ZG;X8;ddI=_X78VJ=?+u8xReQj}a9?BaUqzk0bm9?~LOGA83h_8Jh#BK?B`5#;Z z+Z?{-i;VLm6&EeSf!v3aBz~VfM014Z8XAOEUPp6`2El60d+4E7KY*byKN9w=01|_g zFrHv&b74w&sV~3?g7r7Cc;`7erQGBGvv6WVRfRl)x^kWqhfG{ZW5+rvK5)Ti63x83LIMiJ*HL0-@tjt;wifX3kf%>SnxwWEyfbKP64gg4)3osMr zL0u5$1E7QjoHTGgetPp!nJ@ZlX8R52eH1`n&1bVp)`IuT>S8zujSCSW<;QTc2^dSu zv}Vloz6Y^L6w!StD``4&B{sBN!P|6jW7g^vD#%eHKb1FOzbh}w&mo5O##oZau!OzF z&8IN;H4N1QL%be{&0F7354NxlHy=3O;)fDMKJUR$zkty$ch5c5tqGvz$|LQTfKW!4 z9#~8M0M@RdmjH%Apu;px0=nq!=s z=+c+Fu$>8;<~gips-@~f47&3=1!S?gn?Q9b%}mes zbg!#=yk66DVIoCBiijc(9Jz8qNQf&Z#2-;N#3AwlDHnv43;e$7c|syAA`m^QudD0v z)mM-2_x);KYBs&ApY_Avo?-0IZ0T{3zlJ2c5D|-vgzcAaV_@!^y2Kio1AE_z%*aYA z!|Hz3V38d;@3VcER|mBlEUHA+_gPfs=D?d+6Jx(VEq9|D%4_rTMr7P(ZSMibV!IDa z$hM&>x8A*d`}*yUMWuKuNMzOyk{m!3Y(U9~jEPa0T@!C}Y~|*_Dp+J5Ium1HPuSQl z?13}E6b@~1GcpUSunSh$kE6{Q)-|uN!WdK{D>5hMpo;udA9Oir>vT<+X736EBRd)>8om*rT4kWR8`&?aZVNKXvop(Zp@EWh2J|0)2Llh zl_WgiiBwJf4vu-;>*rFn%Fj+eOjD}%4&pSJHnrn4l4@gKGVAm!CN^Q-XK!xzvmt*G zGVPs=!gRZH5`}VG9%XWSIE;ACJ9(U?Wqx~@MHpZ^$)x0RyVoBEl-)WyQI7tTy z#mRJdmlnPS!Hh?ZQ%1wsL~0r0QIvG3c=p)SjC>v$&i0Ln;m8>KR^)xax`sX$wS7C% z$6(+2fZ1%nf>o|7`^L?8cEUeknUV2#l2FRH8+Sso+P_8Cd7h8w4W5T_BK@rE=Y38o z6MX1*!qh+DJ}oql{5aK>N8u3{y4TY;L>R?9&HbC97-lJUE%I;2X)odDWghl8>BSOd z;^{5jp`YhR^78g}DDK5~w=&V&4iDt^#cy7`xOL%Wyh>wU6FqrZwmm2xrSTEXi( z*ZqS~Vl8O@{WLpXD4~++DOhM0H{T_ z!^oXqfik!=@CdH{BlPkLK!a}0f;ya;1r4-s$fLaZ6N6wNXhjSM&)F4ZO7FwJ{O*^3 z`{Qq3)iw*Vu)U@nIl=)FkKs$XK@ejXgFxA|&&r)`kRaz!6>Ln}gU9RLQg&w)h05Vk zk4xncC`eTeJDos|hN`}_KkD>umcKPTN;qvdkFE=m2~}TO{I&y>;Yn9KiE+eJB%USl z90ahk76ct~mLSkTAReK*vk*@sNpesIPp#nhBs|wO)^*-C?l;Ndwq~>H%sh7k9SCpV zMUs1PN8lpEqMIZSE%6+-ijWk+AgWA^iS>PR4D3a{g;o|xlj#yRu*VfFx-+ik6~fnv zF?Nx2(YJ=J^hoO&Z0xvJ0D}+hv6t5ik2DM#NKGKO1uU*d=D2b17kLZ1)+WXq><2#` zH;Yflt(*9s-gm7JxggF4mEO)aYId)a`csBs}3_feF+*4@HmX zU)F}#b$BU9oT(&WkT3!p%rj4?)fFG~E` zR8|xZb&M5Nv=m<@@ih_}1vKt>8e4)SKIFgyqBk1VR22sV zdZTTW$V(8cwQiW;BBwN_dDf$*3w~k)<9zTF*YGq`*)$t?dxmS;XnDV(ac`=sZ42<< zX`~X5BipVL)TfBDfO;GHQ08z{!VMvo_zDRPWj^H=ct(Up zJVQcL8%KkUxJ3mE>Ql0w(xK+apodA(P!fKo$AjYG)&fc8s?9Q1eH77^hL6W=h z8LR`tPXCv}irWFq>LY-^3aeHBIN5AUeoH6X+COp2IG#^#4k9W(WPt6P-)` zx#t&br~O5Aha%R3=Nx4B=r%S(#**atJLSFGM;M~3+w?|xX>KE73q4>dm;9wc{fHm& zG@_drVD`Ar0Z_ung%3U4{6o{?3TFQ3;zXX1@~ohQl)uTd%c8@1;%L3WXn4u7w6Gj5S0)V zWyJ*&8kUGFiHjs&B%!fq0ZXS!SbCYNzCq#@2<3%$dqFp(KM4PkwNv|u$2jsNXt zc{_IXA7oo;s!XKQv#+i0DAj~FOq93Ci^_Jni-7@|GaHU)V literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/losses/__pycache__/iou_loss.cpython-36.pyc b/CDARTS_detection/mmdet/models/losses/__pycache__/iou_loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52c9cfce46ce4fbf8d63e64a739fcc44c9b46ec0 GIT binary patch literal 4014 zcmb_f&5t8T6|d?qx7#1{vBS=UAZSG+F^0jO*(^yI!cI1;4Ir}4$1DQ*(&};5*zIX| zo9gy_spoXTmfrX!`NK#$_9W38uA!c?aD`q=0h zP}iB^SH@<~q=Xx6m6@#igwANM3awkL&Kk01adTArK4DF^^n|b_u8it46|@?8|1w*F z{*|)7$>;%TtzAJZq}!rtZR2|VoVd_(QsV3@3nUNZ~)DjJX z(AgV^<7dIO}Oke~-Zny0^W^?z?_}>bq#?md-nS;ei%%TLNRt@d+Tg>+dhYKb_d>fTHB=$^b-x;3AeX`ZToI3iw%48 zkmvp%ghKFW5(W$xVNlFw`*ZC1B(k@T{LqadwujJ&L)$$GJ%-cT+JP_0AU@>rR0Q)$ zWz*TS-^Hbg<=pc1g59^g$Z>sd5O4-9b?gN#RbURJdf#`)lT-!YEj`t6X;LGbLaIV2 zspf@KkhtQX5CKpipS{x=hGTvQWH30Nxbiw zfJYthCNr=-IZw@^QN=7XK#=9z)S}`R)SmtBxA^>d|JlDk`R89A9lpQc*H8nVH7E@z zGCA+TG!nodqrXs?a-_U8Z>iW?Crn$z1TA#*=X8t(9lg|%mSszN-2nWqlS`xQm6l~o zdd+nzF&WVr`S{l~gUmcK1Q50sBQqteus4gW&{>SETIytx6*`NNS*1=ES)sESS?wpV z3LlUU$xlgQCfWwMtS8k(hj)YN5Vew6Oik(|bq0&PK^9xGG#_tb*5adf=)GAO_XEU|73NI$fCRqZ)htxyP0}cga5B=S_=jVtA8zu2{-EOAV6?mKoJQ)U1;Q8ymz%%0t1yT$1##jG=M{Hn+ zmFjUQ`opvmgw8;?47kTVOI28q)Vkmzj2z!P<^l;I)uY($AE(-h$4^r&@-BF)pANa; ztu;{x1%a4PHJplQLbX>#wsQKhIGb0`Q$zNi=I>#zma$o;x6nB6nfb_RSsSJbJA-oG zs~}%F?5wDiJ*P#_uve8j{cwCJmN7mg$4snZq|1j{dN>8meHD76yC6tylV(3Ff43Er znu!We;6m%GEK@TTA$_QLOsvnUf$~2}wsrFQ6u?jPv z(4O`Y(MV5+WUr+aNLS3(^3+eYF?WM!ncsixmpqFfza1&ix~)B-&so zIv^mYs*VGP6zqmGUtIAm82b^F$OJ)PeGK?rEay{e7oY6*<*@=Pm|_(u(&z{(iOR@v zRcr!LP^i;mZAR-vY{InEHK4*7OeTRV+p!Vt#Olv!qKh{YJ%EE`q+M1(>ahX3N}?XY z)x`LX@+%5Qj+vOS9F+_+sTPC*7vF#nr)o4E!}*IjEGL>;?#aM8a{GXl zmev(_Z~!&Ms~}pYcnMwrn&4b^q|N(qlnSJ!heE*RqFgL_a7w?es@OpXbKDr&G7HSV zbhX?Ac~KM|PSSmIm zw}I)lz#yB|H=)*2yIo8aNR9wV$#GK4ap0;j^|4-eoS#iyKO2#IE%v}DZRDxY=e}R0 z1EQ`)#V)!>+{vW-0aRpam};{Mxk_d4{{SCL!Uwip!l#zq(HG!TPn$XG7bv=RPmz_EffB+_LTLn*|2ve($jsrS-3+G}F`~V=9GK=eV5usxB)$!!5=-JW ztjz(0Bs+I?4MpO097o^c4HVx&A+2u!j7(Vp8qrrk+ys^qF1KG;^8z)Rcnd7WeH4-q z-iBHZ9r0c4eG|p^P{^b}l!$vMl#z|0qLf(WWeK~w4? zR?xyreZhpMf-+1)N`x;0H7KJrqC|$$FNQms4rNrtpNI&>;FgGiq{Y$BgZ+`9bCN{& zP?^l`Dfooi@wHqpSB(^0)yCWd9^4HtKhNjq^BhwT%ijm)H3X6%sFaE5jx4C~gnv6s zJu!4mY3kn*pQHh_i|ycQetrqHtzXKl`qNkWdzO3qdicb)_%tuUHDm{*cZ8CTc7(Hz zf%gP~YIK(PHZbi*Y8!FBuGS6bcBrLj^JZOF_6S+Ssk-$=F4M^IFczyY%%=ef!t zhyNlE7t>W;$~WMv<+jbLX}%SinVL;)re!H)BVE-#o|d(c%1p6?gmWXEWRuNS$549= zWEd?dMO(l_JZgq?g$C!69{{EikpK)t*G!1GL$~3Tqu@IJhxbUb=v`0pJ$^|BuUAY* KYy=j2$MheSAn;NE literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/losses/__pycache__/smooth_l1_loss.cpython-36.pyc b/CDARTS_detection/mmdet/models/losses/__pycache__/smooth_l1_loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8aa068e0ac810092b55a259d9d2cee7d6dac13b GIT binary patch literal 1445 zcmY*ZOK;mo5Z+xbDN>dcAXVD5=_7|02!X;vQuJ2TeIV$eu6j^Ux)8K>_2k3ZrQ+J4 zFO`AZdhMlupnqvk1$roa>p7=>L&~wU#LUj_&JJh3Z^$qD{qE80W%{Mh*gx#fz?yvn zvA;kmrg+At?abF=DwI%O=H+rKIa5;knV$#KfHN)Cf$FI6itqTeql0z$15=UeUNP0x zVjb-~>~|HPu()@P$ZUMgp=0j)XL_|*R$3)lY3;RWcZ1`fCzI!sf60}x|Bhx}Yca|% zfP4?J3aS>0Z=~&2>^EMsHQ%wClimpP1eVyL2AqFR=asjXHO?Qh9l!Lj@@uaa8>U3< z4Ot`N5oWB#kipBll(j_8-q=vO3kb&FM{(c=-&(6pwJM9}#+1fMySmWMFSfbPoL`k@ zwsc;4Vx51s)JDg?lcz>27gVWP=*r0xU8PQ{)qL(GmHrnq_YwT{@n~7*`WbYwINzql zXm+krJF=&x9p$;wm7dXCTYHq3N@sRN0W_TQvaFU#_Ot=>@bugb-rfrF2HT?|M2|n_ z1O5NV6{y<@B8!8bnkAqGT7n*y_=+Jo9|LsXZcnu_4{*ta zNwO+dRg#!{w2T1mwn4@n5E@C_SFr>$I-566<^y<|M^I?IC`o2n3df|u-Mm9*??X{^ zOF%JT6X3acaJQ_(oA-`q)QB`9NC)yG#C``=`^w)0`=<2Gu=Wcy-kM)_phe}OMzq`< z)uACfwN&y?uMYkYzw;rx4C@f}24g3Sve4!|#B-kA=COBMgzKg+7s))Gq0wSFHXp;O z@id>nihV=r+?Lxg+}w^M^C2DeNe!S}&;n&1)8hUc6qE8LkX|Y6g3TFP);7^x)L?sZ zQvTYaMlIVw=Q{J!z)=yr_FX8}=?MZ*;E{L)>@L3;z6Iv~5^AF>Z3$%IlN%G`V7W+h zog}WCBxv?+Ms_brUTo8>J!wcYpW%}0?>U|6EW6>AGK{(Th{87<6Th_Ca2$Gr2u;VE zo`M@u8XuT0CKone@nKOo0r9uhDzk>V)`dndR<<(dhWzlVD-nc@@ zU!>Ap7(akV??5MrpfNeJZ+haKxJRCFg!_sd)kIU&MEw;#s*8s3VWlDH0SQ`fU?1s+ z4!)qFJ{&)SN8g7oBPXC~N@w)anL2`=I_l<>91s{gFm6xDi954%7v^8X+{3x>Kzr@w zpHHR8L&@S)YsNLx6B$N>NQ!Q&#h^V@L%nN8I5Sf1Lz(DQ1-tBVEXOj*&Ldqa$jq1S zLSCWrEJ_pB)*0^jeu6Wwq$Jx&(nM}_*v44$WP|ZUunnD$H%iM-WtOXCY1>-eW!7O~ zeW@B$6vFYwbQqz2P%G%SMeEpXK(mEEl-aRN@^Rsz&B6t9fm?WZtfIMawpd>bjr8wC z0j%WhquwYT%N^*G;YH4qUU(t6?&(RYd*iW?nKWm$-x6I%w7F(2>>^$EP0k&7Z<0)qpPa@FO&QBlHILJ`AQ5pl+(GQyASKmt^X~+*hAx z1Xi4xb4ijf$`sfI$mt9uZh>5K6;|C-SACZ`!kf`wC?-o#;|CD9hm%Qs!K6j?u7UMj zM#E8Nz_E^-xc`sjuxjwM%d{w!zLQ8*lNIp=8uKhV;A4r&`XZ0w>~548pQL8#fIYc5 zOjS5q+Tv%!;{gw|RI&CTPI(4d`0$J?K9sCnKOf12h3R+#ASFYVjU>bLKy?q6xGb(Dok^vR2@Ln~6eCQPEs?Pra!ADB+3=m@0E}ZU6 zDvdM%A?0isok74cv}}+kre?|rAEp=#Kp@gg$S_qLaO)UER>avj*BKk}GnAD~N5g~z zTL241r_!*`bCf?u_3a598+335)!$)v?wp>(PzTSu>=5?OJz!}9xt~ZWG|SR59jvaE zf-rs{SZR)Bak0b<^;P`_NaL7s0oa-%GJYY;El>s;9OlTN#w4C(?v3@W3<1P%zHW8m z!Hu?e56}Mb;MR$~J|tpi>B8P7rut^bI=85mQmhgzdmZdP{+W*_vD~%O{r!FRU=FgO zG@}QXg@o$aHft*g1eVcJ1e-D}&x{DWTcQ+n`3v0+mNpPVwgILIS5JGpdk@;uhxcvhlo?>9790O-(;`py zHjzFySGGUf?t-iE>~t#!`#0+ywq%ICofT!<-7mMcw(OE+tPpD+8(u>=z6FnF(7nbR zh7O!*?KgT^hx?{Jb*Aprn|YV@X&pF#_5;fyL8I^xxuR}?Q6Iu~Lsklzr>NUXG+Y+Y zqPZy93UxKk6`xF``T;hW`;z$Ube+Duy^?j^`*9lbSnqdB@xZmm=m$>GG{uvqu`X)( zSqF~#2tB!p9p3EVuURl;xwfMDE3}LJN^biup@d#-C=A8YfUiD=d2!t)XOeX1waVkd sNs^+qoGZM>Eu|yJ8buYZMpu;a!M_i`9`ac3BMWP!f8POuzR~*ZZxm(Y!vFvP literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/losses/accuracy.py b/CDARTS_detection/mmdet/models/losses/accuracy.py new file mode 100644 index 0000000..20d0ad8 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/accuracy.py @@ -0,0 +1,31 @@ +import torch.nn as nn + + +def accuracy(pred, target, topk=1): + assert isinstance(topk, (int, tuple)) + if isinstance(topk, int): + topk = (topk, ) + return_single = True + else: + return_single = False + + maxk = max(topk) + _, pred_label = pred.topk(maxk, dim=1) + pred_label = pred_label.t() + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / pred.size(0))) + return res[0] if return_single else res + + +class Accuracy(nn.Module): + + def __init__(self, topk=(1, )): + super().__init__() + self.topk = topk + + def forward(self, pred, target): + return accuracy(pred, target, self.topk) diff --git a/CDARTS_detection/mmdet/models/losses/balanced_l1_loss.py b/CDARTS_detection/mmdet/models/losses/balanced_l1_loss.py new file mode 100644 index 0000000..8593396 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/balanced_l1_loss.py @@ -0,0 +1,69 @@ +import numpy as np +import torch +import torch.nn as nn + +from .utils import weighted_loss +from ..registry import LOSSES + + +@weighted_loss +def balanced_l1_loss(pred, + target, + beta=1.0, + alpha=0.5, + gamma=1.5, + reduction='mean'): + assert beta > 0 + assert pred.size() == target.size() and target.numel() > 0 + + diff = torch.abs(pred - target) + b = np.e**(gamma / alpha) - 1 + loss = torch.where( + diff < beta, alpha / b * + (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, + gamma * diff + gamma / b - alpha * beta) + + return loss + + +@LOSSES.register_module +class BalancedL1Loss(nn.Module): + """Balanced L1 Loss + + arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) + """ + + def __init__(self, + alpha=0.5, + gamma=1.5, + beta=1.0, + reduction='mean', + loss_weight=1.0): + super(BalancedL1Loss, self).__init__() + self.alpha = alpha + self.gamma = gamma + self.beta = beta + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * balanced_l1_loss( + pred, + target, + weight, + alpha=self.alpha, + gamma=self.gamma, + beta=self.beta, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_bbox diff --git a/CDARTS_detection/mmdet/models/losses/cross_entropy_loss.py b/CDARTS_detection/mmdet/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000..fe10b86 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/cross_entropy_loss.py @@ -0,0 +1,103 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .utils import weight_reduce_loss +from ..registry import LOSSES + + +def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): + # element-wise losses + loss = F.cross_entropy(pred, label, reduction='none') + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def _expand_binary_labels(labels, label_weights, label_channels): + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + inds = torch.nonzero(labels >= 1).squeeze() + if inds.numel() > 0: + bin_labels[inds, labels[inds] - 1] = 1 + if label_weights is None: + bin_label_weights = None + else: + bin_label_weights = label_weights.view(-1, 1).expand( + label_weights.size(0), label_channels) + return bin_labels, bin_label_weights + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None): + if pred.dim() != label.dim(): + label, weight = _expand_binary_labels(label, weight, pred.size(-1)) + + # weighted element-wise losses + if weight is not None: + weight = weight.float() + loss = F.binary_cross_entropy_with_logits( + pred, label.float(), weight, reduction='none') + # do the reduction for the weighted loss + loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): + # TODO: handle these two reserved arguments + assert reduction == 'mean' and avg_factor is None + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, reduction='mean')[None] + + +@LOSSES.register_module +class CrossEntropyLoss(nn.Module): + + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + loss_weight=1.0): + super(CrossEntropyLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/CDARTS_detection/mmdet/models/losses/focal_loss.py b/CDARTS_detection/mmdet/models/losses/focal_loss.py new file mode 100644 index 0000000..7a46356 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/focal_loss.py @@ -0,0 +1,82 @@ +import torch.nn as nn +import torch.nn.functional as F + +from mmdet.ops import sigmoid_focal_loss as _sigmoid_focal_loss +from .utils import weight_reduce_loss +from ..registry import LOSSES + + +# This method is only for debugging +def py_sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + # Function.apply does not accept keyword arguments, so the decorator + # "weighted_loss" is not applicable + loss = _sigmoid_focal_loss(pred, target, gamma, alpha) + # TODO: find a proper way to handle the shape of weight + if weight is not None: + weight = weight.view(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module +class FocalLoss(nn.Module): + + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): + super(FocalLoss, self).__init__() + assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + loss_cls = self.loss_weight * sigmoid_focal_loss( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + else: + raise NotImplementedError + return loss_cls diff --git a/CDARTS_detection/mmdet/models/losses/ghm_loss.py b/CDARTS_detection/mmdet/models/losses/ghm_loss.py new file mode 100644 index 0000000..95656a2 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/ghm_loss.py @@ -0,0 +1,167 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES + + +def _expand_binary_labels(labels, label_weights, label_channels): + bin_labels = labels.new_full((labels.size(0), label_channels), 0) + inds = torch.nonzero(labels >= 1).squeeze() + if inds.numel() > 0: + bin_labels[inds, labels[inds] - 1] = 1 + bin_label_weights = label_weights.view(-1, 1).expand( + label_weights.size(0), label_channels) + return bin_labels, bin_label_weights + + +# TODO: code refactoring to make it consistent with other losses +@LOSSES.register_module +class GHMC(nn.Module): + """GHM Classification Loss. + + Details of the theorem can be viewed in the paper + "Gradient Harmonized Single-stage Detector". + https://arxiv.org/abs/1811.05181 + + Args: + bins (int): Number of the unit regions for distribution calculation. + momentum (float): The parameter for moving average. + use_sigmoid (bool): Can only be true for BCE based loss now. + loss_weight (float): The weight of the total GHM-C loss. + """ + + def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): + super(GHMC, self).__init__() + self.bins = bins + self.momentum = momentum + self.edges = torch.arange(bins + 1).float().cuda() / bins + self.edges[-1] += 1e-6 + if momentum > 0: + self.acc_sum = torch.zeros(bins).cuda() + self.use_sigmoid = use_sigmoid + if not self.use_sigmoid: + raise NotImplementedError + self.loss_weight = loss_weight + + def forward(self, pred, target, label_weight, *args, **kwargs): + """Calculate the GHM-C loss. + + Args: + pred (float tensor of size [batch_num, class_num]): + The direct prediction of classification fc layer. + target (float tensor of size [batch_num, class_num]): + Binary class target for each sample. + label_weight (float tensor of size [batch_num, class_num]): + the value is 1 if the sample is valid and 0 if ignored. + Returns: + The gradient harmonized loss. + """ + # the target should be binary class label + if pred.dim() != target.dim(): + target, label_weight = _expand_binary_labels( + target, label_weight, pred.size(-1)) + target, label_weight = target.float(), label_weight.float() + edges = self.edges + mmt = self.momentum + weights = torch.zeros_like(pred) + + # gradient length + g = torch.abs(pred.sigmoid().detach() - target) + + valid = label_weight > 0 + tot = max(valid.float().sum().item(), 1.0) + n = 0 # n valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i + 1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + n += 1 + if n > 0: + weights = weights / n + + loss = F.binary_cross_entropy_with_logits( + pred, target, weights, reduction='sum') / tot + return loss * self.loss_weight + + +# TODO: code refactoring to make it consistent with other losses +@LOSSES.register_module +class GHMR(nn.Module): + """GHM Regression Loss. + + Details of the theorem can be viewed in the paper + "Gradient Harmonized Single-stage Detector" + https://arxiv.org/abs/1811.05181 + + Args: + mu (float): The parameter for the Authentic Smooth L1 loss. + bins (int): Number of the unit regions for distribution calculation. + momentum (float): The parameter for moving average. + loss_weight (float): The weight of the total GHM-R loss. + """ + + def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): + super(GHMR, self).__init__() + self.mu = mu + self.bins = bins + self.edges = torch.arange(bins + 1).float().cuda() / bins + self.edges[-1] = 1e3 + self.momentum = momentum + if momentum > 0: + self.acc_sum = torch.zeros(bins).cuda() + self.loss_weight = loss_weight + + # TODO: support reduction parameter + def forward(self, pred, target, label_weight, avg_factor=None): + """Calculate the GHM-R loss. + + Args: + pred (float tensor of size [batch_num, 4 (* class_num)]): + The prediction of box regression layer. Channel number can be 4 + or 4 * class_num depending on whether it is class-agnostic. + target (float tensor of size [batch_num, 4 (* class_num)]): + The target regression values with the same size of pred. + label_weight (float tensor of size [batch_num, 4 (* class_num)]): + The weight of each sample, 0 if ignored. + Returns: + The gradient harmonized loss. + """ + mu = self.mu + edges = self.edges + mmt = self.momentum + + # ASL1 loss + diff = pred - target + loss = torch.sqrt(diff * diff + mu * mu) - mu + + # gradient length + g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() + weights = torch.zeros_like(g) + + valid = label_weight > 0 + tot = max(label_weight.float().sum().item(), 1.0) + n = 0 # n: valid bins + for i in range(self.bins): + inds = (g >= edges[i]) & (g < edges[i + 1]) & valid + num_in_bin = inds.sum().item() + if num_in_bin > 0: + n += 1 + if mmt > 0: + self.acc_sum[i] = mmt * self.acc_sum[i] \ + + (1 - mmt) * num_in_bin + weights[inds] = tot / self.acc_sum[i] + else: + weights[inds] = tot / num_in_bin + if n > 0: + weights /= n + + loss = loss * weights + loss = loss.sum() / tot + return loss * self.loss_weight diff --git a/CDARTS_detection/mmdet/models/losses/iou_loss.py b/CDARTS_detection/mmdet/models/losses/iou_loss.py new file mode 100644 index 0000000..011ff36 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/iou_loss.py @@ -0,0 +1,135 @@ +import torch +import torch.nn as nn + +from mmdet.core import bbox_overlaps +from .utils import weighted_loss +from ..registry import LOSSES + + +@weighted_loss +def iou_loss(pred, target, eps=1e-6): + """IoU loss. + + Computing the IoU loss between a set of predicted bboxes and target bboxes. + The loss is calculated as negative log of IoU. + + Args: + pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), + shape (n, 4). + target (Tensor): Corresponding gt bboxes, shape (n, 4). + eps (float): Eps to avoid log(0). + + Return: + Tensor: Loss tensor. + """ + ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) + loss = -ious.log() + return loss + + +@weighted_loss +def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): + """Improving Object Localization with Fitness NMS and Bounded IoU Loss, + https://arxiv.org/abs/1711.00164. + + Args: + pred (tensor): Predicted bboxes. + target (tensor): Target bboxes. + beta (float): beta parameter in smoothl1. + eps (float): eps to avoid NaN. + """ + pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 + pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 + pred_w = pred[:, 2] - pred[:, 0] + 1 + pred_h = pred[:, 3] - pred[:, 1] + 1 + with torch.no_grad(): + target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 + target_ctry = (target[:, 1] + target[:, 3]) * 0.5 + target_w = target[:, 2] - target[:, 0] + 1 + target_h = target[:, 3] - target[:, 1] + 1 + + dx = target_ctrx - pred_ctrx + dy = target_ctry - pred_ctry + + loss_dx = 1 - torch.max( + (target_w - 2 * dx.abs()) / + (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) + loss_dy = 1 - torch.max( + (target_h - 2 * dy.abs()) / + (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) + loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / + (target_w + eps)) + loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / + (target_h + eps)) + loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], + dim=-1).view(loss_dx.size(0), -1) + + loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, + loss_comb - 0.5 * beta) + return loss + + +@LOSSES.register_module +class IoULoss(nn.Module): + + def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): + super(IoULoss, self).__init__() + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * iou_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss + + +@LOSSES.register_module +class BoundedIoULoss(nn.Module): + + def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): + super(BoundedIoULoss, self).__init__() + self.beta = beta + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + if weight is not None and not torch.any(weight > 0): + return (pred * weight).sum() # 0 + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * bounded_iou_loss( + pred, + target, + weight, + beta=self.beta, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss diff --git a/CDARTS_detection/mmdet/models/losses/mse_loss.py b/CDARTS_detection/mmdet/models/losses/mse_loss.py new file mode 100644 index 0000000..a50f459 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/mse_loss.py @@ -0,0 +1,25 @@ +import torch.nn as nn +import torch.nn.functional as F + +from .utils import weighted_loss +from ..registry import LOSSES + +mse_loss = weighted_loss(F.mse_loss) + + +@LOSSES.register_module +class MSELoss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super().__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, pred, target, weight=None, avg_factor=None): + loss = self.loss_weight * mse_loss( + pred, + target, + weight, + reduction=self.reduction, + avg_factor=avg_factor) + return loss diff --git a/CDARTS_detection/mmdet/models/losses/smooth_l1_loss.py b/CDARTS_detection/mmdet/models/losses/smooth_l1_loss.py new file mode 100644 index 0000000..75d71e8 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/smooth_l1_loss.py @@ -0,0 +1,45 @@ +import torch +import torch.nn as nn + +from .utils import weighted_loss +from ..registry import LOSSES + + +@weighted_loss +def smooth_l1_loss(pred, target, beta=1.0): + assert beta > 0 + assert pred.size() == target.size() and target.numel() > 0 + diff = torch.abs(pred - target) + loss = torch.where(diff < beta, 0.5 * diff * diff / beta, + diff - 0.5 * beta) + return loss + + +@LOSSES.register_module +class SmoothL1Loss(nn.Module): + + def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): + super(SmoothL1Loss, self).__init__() + self.beta = beta + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_bbox = self.loss_weight * smooth_l1_loss( + pred, + target, + weight, + beta=self.beta, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_bbox diff --git a/CDARTS_detection/mmdet/models/losses/utils.py b/CDARTS_detection/mmdet/models/losses/utils.py new file mode 100644 index 0000000..5c16e06 --- /dev/null +++ b/CDARTS_detection/mmdet/models/losses/utils.py @@ -0,0 +1,97 @@ +import functools + +import torch.nn.functional as F + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Avarage factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + `loss_func(pred, target, **kwargs)`. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like `loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)`. + + :Example: + + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper diff --git a/CDARTS_detection/mmdet/models/mask_heads/__init__.py b/CDARTS_detection/mmdet/models/mask_heads/__init__.py new file mode 100644 index 0000000..ed71edc --- /dev/null +++ b/CDARTS_detection/mmdet/models/mask_heads/__init__.py @@ -0,0 +1,10 @@ +from .fcn_mask_head import FCNMaskHead +from .fused_semantic_head import FusedSemanticHead +from .grid_head import GridHead +from .htc_mask_head import HTCMaskHead +from .maskiou_head import MaskIoUHead + +__all__ = [ + 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', + 'MaskIoUHead' +] diff --git a/CDARTS_detection/mmdet/models/mask_heads/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/mask_heads/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..731cfebd7a525b8d278a4a407c24718430bfe738 GIT binary patch literal 458 zcmYk2zfQw25XSAMX_6)x5KqyCNFX)@NR>)KqDx`%Vp%!2iLjF@PKG`buhf-^S772U zNu}EIr{DShe3tLhbpG^LsC&-X54%`C@-rT{K>-HbFv|t!#1k{Ip7471%+&hA?|Euw zHV{G2z6ot4BIFqa5JCj=y)WhvZ`mUGMK`wKr+Bs8yes#$)(SAZT6IptR$EoIWjCT7Ocunc&EVUfp;kQ<53VW)X{Ymsiqau`JG|;(AAB(v5CIhFV3m zN5>(iDIzV}{GyvYL_*Wbx*N*K=M+e(jFIx2H6g~m{JmvGZS^f`wLf;M%JL&9m$^gj vGHXG%I&aIm8aT5xXoHFKOI;>qS(UAnH;3aXe87BX^dtSAOHdL%ZZG8jX#;m> literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/mask_heads/__pycache__/fcn_mask_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/mask_heads/__pycache__/fcn_mask_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7232a32bb0223d2889e6b5adeaf02d623286511 GIT binary patch literal 5492 zcmbVQ-EZ8+5hr;(9*_5_FI%#mhIP`^mnu?Xw`t(Ic2Xx!+B!-aI|+J?OHkas6Hh$y zyyV4_!UKw)-2g=mxJCQg_x?+Zz7;6omm<%3DEgb_NjfRcODS=<-#arqJM){n(r7q0 zzTWZwb6(T_rOiEMl;6ag{tkp`Opml~@zuMAs+R_4x2#H5*HYig!0y`2V5O)!bh?hN zp=?IAVZB>d<#N;*HoHxfHD*PvVY}NNE_4@$i`_+CtPTBi*UNm- z=b3bRi3qsY8(q09tNu7kP<-w6fL1+x+OtBshXU8SI@2JjPVz7lZ<$%d9jme`FENMJ zSe-QzkfFks*$P^1US${9*~dCtWoxR( zVdvO+^whr8Os!joq|eFPt(#l#gZ1~g&z_p#IPI{X8R*9?QzOIQi`mR=DY_y?C)ejqZp_y;_Kk0J<>m7 zw@l0QT$>sLV~Wjli#2yE;>8Tw7?`;=)u;OJ^&BI-a&CZHc?qM#cT;Duc5TEcQ-qeYlHGse_OkEYtqOY%-S`? z-_cfyvCE)3!Hxr)RaRj!=d?)^wUxoyY?U|}V5fndH?dcZRUd<2UAzCMPqFdXMNH}i^Gc5ev2~g+nOw3?uC3IZ?npjboU(U-%(aYns{Du;xY|Z z(b^JZ1hNuyU+^@O_I4PBR5|$Y=ke!$6f$=_O8wy|;_i@VI|*~Yn5?^g5b#mP*{m&f zG26XZ*w4jv_u_0hDjqI)@s_~ZYfp(N6NCL!Mm}R<+?O^Po)`2`iW4!U67HRqW+G&~ zW5{|M_z|v|A7qJ;<}hLWE(TP>cog{o@0f)gVwo0S0qJ~KmecWw3sFT)+MXB2Vdi<# z`NWUL{2d{%j)luR^fOtF$Ji1aq_QEB(CcwO^V0Bu%UT$F!HyrtJW6F-F}ceHYDM>g zYDvb~@u1eBy1YVNvU$`X{47jlO>GwWY06XC45;Aw{WwXpFc7Q6{~B>zCqLrx*x!yg z6Y73e)$LLj&4^>^6dvhzh+``k+?RzP_qnwE(TK-PkgvtY+G`Z+H2SJDkt466^CS?JgAj%#Fa&S3R%Lo%avz>kl?} zk|Dng8u$0de!Lm%Ge6xVIX8zx#xov3^>I<&BAj+q$8fpw_qiIfkRRjb*)wenX8l)whYZI_A|en^rLOreR{_V(E6&V@&{kpQ)PSdIeu^lGn_u3Nm7X7a(6o313wI200Uue9leq8zVYaVsY{wRt(IZ|b z@(PVH=`P9Et{)C@c|E$Ze&k83aQw0gBT6$r&OB*uhklBsuhKMqD6Z2Qirk=pOMiguI?@}H`$bkqhdVpb_HK~qNITRLU2iNy%5y`+Wp^4*K zInxj`DTbD&=ETY^$YO4tFfCHs9lR;U=swKqo<44wT4V6rrY7;^*y0}P|4HRW$|bCM2U;(Skax!({q3 z*hyjMo-=sK>XpS`fK9=w^-KqLgu{ZVmZsXk#9M~s zy6UqAm8p@gXEve8K{c}m4tap$reuV&>a4Vi`=aiQog0VqfyT{8$Shd-k8v&Z;uL?$%NSujLjPyRq&BI?Z(xT8S}SY7vddUk?b*bhwOLQ;8$ESqPpl{U z-Y>vUbquJ2+%vJ}V!TQ{$VCqIaMXj{7lEqlJp-jBU{qtW zlr0Zd^2NNJFZ6I0ljVFlHwR~SO~vaHc%_yl@JG@Ybm6S_$S63T(Kfa8jB8qUcCd=I z*Yc(L-O+NchxwEfJvOE~$vRm9;&0Iju4zEXD++)A6W-m14LAJ1!9?EWnVa(dkjEK8 zuGm%g*X@a9sEW9>fUbVb+`_4CIF37e-V*)v>QRjvk6gKX=`N4cM7a2mnJHgKc(zMl$RVVcG!_6OcPV7zN3$O9a@h1lZ zd3rNl7h)c-eP!J}IkxcAP%gP?_Y!$CjHcE@m6M>ouz!M`M1;i-S8>8&KaBmz4TpZ8 z?-d$i(^vv zPCHYIVt>VZ7-e6Xcc2tzVE;wfdRCwn#UGW5DV#?k3C~wI1ZD0BIAx~Nd;J!b z6T!IXT#!x(6DD*QaM8s$X%)vID+s~EBxJG_jK;F8j7J*r2)Si(S!DAq#3AMmPIm-nJkAf+L6i8C6lG0zo(*-EUQQ)%VYGvfpl;fd@k*9 zh>(qw>b8m)yd%7A9HQ8t!Le}`M$#${N0yaYQ{MZ)-wRXm8i{=7IH*y=b7;*)c^2Oq zCyYy0kE}-Emo6mI;DJ)rZZ(KfI)q^=t-{L)!h8@3hP+dJ9*Cb%lS%!utTvGP1F7$H zog=T~A;}%8lk8S$oGJo;Mf{XW$i|6d{{kzg1iQ7xIx>xRscx+57m#_F`l4}8Kc}0j zZliS007Y%tXak1Y`Z}_dOZpk(VC`Ft3aCk;k_Khgu|MRLS$iJ55`H5?x$b%Q$9`1w zh+l&Xl~M^xVFZPCahu4eL@2ro0tey`AhIz_>;fJ|-kjZI%b(I9N*q*DPga^z=4qRh zaXssA^~bi|tl8DYnwo(F7Bss@gv?#GNBcn%Bw3O~=?3W>#sn%d8llYFR@i4Fj*HZ% zqJAO7&k6*cr7d_LcV6rZ0_dgYfeXY>!b;=7<`jNxNInh};Dt)Ny%cZ9NKVm(Q`v3p{ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/mask_heads/__pycache__/fused_semantic_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/mask_heads/__pycache__/fused_semantic_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33d5bbd2992937963f199ef5f052be3a18b8b162 GIT binary patch literal 2952 zcmZuz?`|8%5#QUpBag>FmSjhX+}0oxq%hNptfj<7fWmHT#|?r+K;sk!h!ltO?NU1W z?nvz(9m+U;qKbUd1GG=j_5}*`4f+cEwI2$-LO#^}&61L3sY~2&ZgysNW_RYdx7%)q z4}Um_zvwXbZ+2<;;D3!rk3a+yPR7R0(6JmJJ7ag~f#;c@*M>ERG~BG72g4w53>$d& zgr9|ZbJ(VhPBvU&oQkRahj*a zev}qz+2f`W&&o+On%=u_!qG$}68!GY8HYNw{m(!9&EuzF!bg+hyJY5o==IN*doY zj}1cve9NpqnQ1AW$~-R0H2GY{B0g~#`)Zbz>2@Yx%B(vgjjmp)fZ5MwQO0FDDY^$R z=!an!BQ2tP-R%e6d-HqU1Zi})x3}#CO!_T-Tu*#UEuoUiTZQ(fVaP*8_c(^rxY(4na!+m&Y3hxD1|0L^z zSN<_#8{e`%y~-EE4qw>=Ns_CZIZvlF_~iiiyfCS5J#+7AI1X&KYeg6zkJi2(+`hfp@h=LVk%yzo+}~ zAkpqZ&U?3wuV+)K6!onMB8vi1Wa`B%2d_;FJ%wtEY=fBwi76Cqn&_hBATA1-X|ry_ zo6O2LqNY{NVyz{-tQO7U=Q1cK95VWO(0aqSoDKf!_BFKapBG0$ z^+tpK>0Wq*N8?sQKck`@>Lpvh&K_!y6x3aio}+$7eE)(Bz@ap7@2rYJ{~i)e7R-5d z=XwN}RTHZQO*(pdY!rV7gBWZaTNY8q&KB;LV_x!#k6rXB`xCbkcmCVbs}L;^;{Lb% z*gLj(Gj?9MYNzyp6Mp3iE?nWA(i|ZMt2}rGCj62e^TVBIY~C$_NaMhI*|3<aLLG{Fo=$Azd_X3)ia#Wd4n+MF zq*u46Vthh+ZJhZqTvZ%`P(O!;@o364Y)#VrbM*x4_)PdNR^l5dYw8xpH=PaqLKIfV z+2m^|s1R?jwyq!P%2qvq8CoYmKW9f#IY|i5XwcO{yggnD8^?%mq=kb*!y#Z(IW7*q z;Kwdq8e{iUMjSSm?fYKAUUA32tZqezY;qVc0 zHDP!Gs=-DRYBJt&T9n=9_3TKo7pMlb6y1DFLieGf%TTpJ_V5+9OutD0T*SEq0Ebb8 z0>IZ5_*N7h&f;t}Q;#AsNutO?fcgcwq5V-0iO_DT#~`M?0-;1^8I&)&4vTG*5;dPi zeTvH-ThR7=^z@})rysbTb*t4|H$jQd!GnHLs4h%~H_u=Dqgi1wEY9%Bc)_brp=E+R zPhR#D7*W5P&_Z4x^(>cBQ@|!ravvD4j#Q0a{aKl2dWq>bmQzx#w!_qxcY+P-O;$_J a0!r|!<;FcAc+yk>Aq&i?=>b=_$I literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/mask_heads/__pycache__/grid_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/mask_heads/__pycache__/grid_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c3d262b2417566d40d13c3c5414b80abe53fbc4 GIT binary patch literal 9482 zcmb_iU2GiJb)GvjJ3Bl3!{u`MPf2TAR7VYzc|X7?u$QTe;)q-r;hW z`^P)8(vqDqf>5rVz)aPr<|!!hP#}HqQxWu~g^}kz7=0=RLE)e;*{mz}) zKbmoaqD#!V|M#Bzcg}atZRXTlz&ntehI{H;By~>aD}s$QeiSwE2<2& zre4uyEYZ}PMkR^3+A`azN~&#DET(Xc>#cM1 zw)%}0w_9D;MJjRUwO4Q5qwJgA&ciplyx($a)Y~W&D)~E?>JR{oB}_`CK2N7SYaR<#cmMML0%8@Hw9+*TE`RJ z-u_)g-#(r}L0JoRZ@=e+>AvgOZlm7rHh7q(m9QT!RE5xwQ;3=r@m#;xw!)Kt07{zH+EG)kE;(BW`QH3aa}WXu{V)4L<>z^bJtcSDN%` zMFpvpkGO657HHZ}gJ!&}pZ0Qxs(8;&bGD50vP*HLnL=7yRu&Yl@2c*5-pmg%T2teR z-GuuQrM$d1+bmGtAv+{a<`9=3jn&k4HSyP8ksF6hwoLGZTq^M2GPx2&vQLjGZ=_6XhbEs3I zwQi;kX|10cwPC-d#nW13U_kIY1!2g7j|r^iv6&tMJD z`?Iv4OnJig{_Z}}zr~t8%QHOtnD+IVTNqXD_XVuOSL9A!9L36mPog^S{4$u0a)Uw~ z&JF(9TlOwR{bR2m21UPs5wRcmzKVA8wBkye*axKe%#krwVnPzBa?W#K+Ajaw+C5Qe zPU!n0R=Oa2QkrGN=j4ply}!g5@(Ys4TFdD`bgS!ELe>){RF&lO?_A#Lww(>oPJO>$?Od+y^QwE#Xo~<}X87vCc!-U)KVNFRahvu-OFg>vkVTuGPt*RSl zMYmyZJ5^6EVVG`o?AlJXgEe)-yj=K+6%Oa*Mn?5V)n;Y2Zr>ZX%+*>|2=Z#Z({;T@ zO{{_$u|^zO4Y$#8y=tfCggS53yfF8w>pFrK?=>O1BGhnHLbcNib$3sAVM;Pi`x{!{ ztG4=1R4eyrr`vMucC{BVp2srPY`4E<3#X30+%P?vNJO(uh-#jcO~Lxrv2&8x?79oedr)xkIh5hN;`6dp28{qU3VSnD`r}7TAKCV+H(~T1K-} zgXPsCQdF+uPsiUd%(0wG=_toy7R!%PDpHn4BcK*?Ew&)P976im)CHAVWSEJ+f)@2N z>`Rro)Nc4kV@A0}v?-_uvtPK8wMmge8*>w%djK-H0->oKDiGV}Ay}BtJQa#xbuYwm z9T39u5+rb-X07|m;Nqyo+NgzrTGzZJR4Z`Zk&2qvAaK>DDHX8QOnaGTmM0Eb8XyO- zlmxJ`S^v*p{OjMpvGwM45@D{#>mT3#%|Y?wf4N@&qasBgU$28jMfv3WH_Ev%3HbzU z>58k!64!~`1qqG2jgC_lg5;5Sl?cJAP?z{Mg-bve*R#cIRCI>O*Qi`;b?d-rn-pj5 zvCtHBBEk%{jxn8kgdA7GSZvUengPvVLl5Rh3q4u8MvTxei;uvjT_mtICEDP}Sa4`li9q}Z zYM=+$CRhf_`zr8~3Z$WP1}uQs`|7reaW!ARrVKXG4qJgO{}x&qL6Y)ON=S)9gE)-) zNy>hb)BPM$ud=VbC-;@T%6$beUO&N~Iq)X<+y0&SGbMj7jre;m<`4OjKWoIF<|mi2 z3gAPw0h+l2PCmz>OE)vViDPISsR5zCENT59<>i_)NaZO-21i^vD@%c!n*|`XPkj~; zvIKTU8{p@-4N`F%DxH<38DLl|$ar%=-9Vy8Dvoq!Q!HV_B{Z12b95r_;dApKQjMDd zmN-)3Wwt4gA5P=b)cP$Vr4f-5>RBXzhHt2K&yS8C9;B zjWF5h^jg)L0|=7XhGhip)9=X0@qgoY{9hpDB6R#t6-r);{TFC@*NBkV7chwxsTsnQ z12yl6s^^H;Df8Pzz5@c~)D^XzP^(ov@h!?=uZOyDT77Yg>d^HLO{gW@#ZFypAX(1F z*Lx7MT16k&SgY?+H6wy{;!PwfsR$l4_<*Xo-B2BDRFXhF9`rO4S1U7d!j5NkVspY| zya#`V?pzvJVV6`W2&ji5J1S)QPRl{K7oh@7Df}U{Rb4&EpWM*V=8*^?oiD}6 zKO=%82q(b`OuC{HFqj(Z;8QXmVLZ~M{#Brn(b&{rKuPnlsVyiF4La9ge%<0)V02!V z1{LgYm=~(}K90T7xiHRK#f4r{f~2QIx98Lv+l^XDI6dLG zz&=%iU`R^zv~F3Y_;=57O0=S-x1PVbxmkL%)ptu5cRa7>zI^#|RSX&r*Sn&Axw_?E zzWU88UthoSwXeVQQn`eUECCi*8!dNz)R(jg3vnH}ezMMzH{<?8QByT=KT6_!$yi!7|F1 zWO=-g5wD~2mBCNJ<+5KuOVCAd0uPG{ar$1cI(UdO_!UqFI#Ko@So2pQ4iir_fta6x zuZCi4qrA0I-m1S$r*uy7*Zh?z2J|wAl|u{6JL?y*Q|M(I810F+w+0`Lwx@uqdz+Fj z+=ti+&J4aiUe9-~VHXJ1VBH_+!P($kPzs*%&j7(>V+2>MGyk0b6foggzXTizBqe>)<*&p`NQH%A(X04)iKd-z;(YDxrS0h|KRMi@p4C_n)V z=BK^2)g3r)3}-?OlKQ0lA~^Gp)EjjTC&E`olBw=^wpSGhPD3}83) zx;M?Cc!b_gLe2#`vzFSaQ80woi$Lu$KW zs^5Vh$#LNNA;l3U5b3%R_(^QHxZI27sQ>_7$xe`W+J?dLfmqx^EB^iYqQq934m$mJ|D7Igb{+$SZ4JH%& zp;{Z@+ZPS$kfA2&!FXWd(olVnN~A3}qHurX#3qK;_%!_*)m_>fMa{!{)7cWsYcI0s zZ-HH47hrT7D6|gf<$0^s5W)30ubXh1lk;0Ikv2S;3r3R^^a-94;ktv zF#b>F_<*1vDjzaf%4h>Up~{{i4SC{-K|h($)hXYA?zAX`Hcbz{=b6-p)U$W+szKJD zLOJhi31yjL(9d{p!92f#ZN8>JXP;gJk~pfd_YaW7mWQXV4={gv5PNI_7+Ze2F8$&U z0JQ@PIAFLc5r!J3kG>N)qMkRQ0 zlqI>iB=1|z?4iO_&|5%geirLCgL4Ucc$A+&eh&F~;|L1$pa-u2tyhlL3**jxpyD|% zgM4!;zbNywU&x=A`MEFTFZi>uew&kM(40&amF5igP(C2S7obRwh^#lxUnN{oh|7vo zeaO_QeaMojK9)v&%un?pQ>XSJOQ!k&-iY~FnCe5OPVGaMO!cwi<>8lD1jbngM$zl= z$KVXB!7A{HoIfzUR+nC&W`PqL$NA*5_;3oz4CDoJ+l^}L+&r~5wVJ36Ni@GXemEIx z$&)%7vO~(eIfO1feUgl$yLU*29@~UM)gLyTM`%YPAD#sEmS`dtiK6p}h32(ZwY|lw zuiPL>gq+F82a@y=?wG_4QGG!wJGR(9yM8)um~_ne@!QH+3=Um0WU!5MrK?a7mbptD zor-0?$kXsiWQq@xopboy1rQ0ZawHc&l&1F-#FF;qBtKX(a$-ho(Lza%-WYRq`amS5 z04}WX$>G$zKBVFZl|qvk zY{TCWTD6vIyR|OhTgY}o_DJ-oLKOe z>JsjhClGIZPiX$~?#gp1@2*5*QKuJZ%O0SPB2g4P74U%3Q^*?2!AnNS!qoEYX?8(V z+0z=38GIKjsJn#HC7`sNx}=p*YmI$@;?bqZ*4esx0)a&%moxOX*r~Q1+YT+;jvf>c z&)7Dec3V-2bREhkA@K(^lMWGbxQLI4{DjEQLBeeOlvHzCEjxZrDxm;}_a0S}caZ=S z$vgJ~h^a@9HIr1A&qqspre$6*&zZMOL%xENzZBrRw=Z8S)ZW~e8|N^pf!$*Z43$E-mX1dhcOU6kPz?;lkz`!@ZOq`HOPI2_v?K@s4YzNJ-EN3 xXU3VR2=9H-T7^dR6ewAYBu)u!#dT4v|B?i)Kk)+tP-Amj<7APRJAOw;A)sI8UO?IK} z#S7%r9QzJ^i#`XhJ?$&x)Nd&7t|`oD$f0J2AHUIeqtW2`$3^z9VC+A3Fe2D5AodxQ zVu}~6Rs1tkk&3UGiZ$PbJD&Db?=>4IHylC6+#UUN zI(?Pd^_kApP0+1HXFvY%@->`;)4KZORjsy#&f&Tn;oh=OVQ7|e#nRwCn{mZeaGj)~ zw?idh3)NRWm0a^Qdd~z)V`Lw=qqFnV+q5~ALW9EgASVzXx?v4(*)CWGEpLMlIBFBN zY$Zq(JgW#2tKcuZWlu4L4yVSSEO1GV;$=RYyQHd3N!B=Vk=<^zF)>oOM9O8gY@{>< zt$W}UUu%32}Yq{KbQ1unu)7ELbircX8(W`H94 z5PJ;8_(L9Wfrs?vrw3p@xjoYzsssliOJcu*x<_@#I0J&=Dj>__$ERaqC{YK|&;#^V zFXxNKnjzN5+>E-{C$#GUsfVNTU2<=FF!x5>oCytJSW6y5V%y?Da8H_)dq^K z`{qm31$)P@{M~=XuK2shSmQ6*@58I8jam*LfAj6Hti_%=yNcV;Y_J3S$wTK-d>xqI zV2Qi6?z>(auj0111EX4D{`52C0>9wdfy8TJKD3}}8>{dO;+2@XSZN}G=hqD?MHhKQ zTtv&(CEJb7%1xo&$*m8Znmb9-Tw4wKP8_N6v+v2)w;gt$;N2aUB=ywxZbQbV5fSF*!F8E#uGk(4Dk?q z1ZKAn*_V$FZ*2cHTI&3K+@o%(vQkUw22z5t_$k01O8I7+6`jYk)DyvcgKTcJZ;4zN z1-i^vMhYDA)A1k+2xc~qF literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/mask_heads/__pycache__/maskiou_head.cpython-36.pyc b/CDARTS_detection/mmdet/models/mask_heads/__pycache__/maskiou_head.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e11b167060b728be08c0d35bac9fdeb58c79505 GIT binary patch literal 5878 zcmbVQ&668P74P|yMx&2iuh;h42{42Ltl+E@hbmyPofzA(L)nX2JA_hV#-NExxj@Z2mS^wRP`woCto?{0Q_E$q}6U*MGC3r&Fh~2c>VhK zKDytoR-Jb~@A%hVW$d49;b(z=7f3(DQfEyfGH_=NFdV0Nn$W1=-`T)WTe_6y9xHtV{q?D%0n zjM`opg{dq@vFQ6fRd}JJNnMl9_Au;muNNl?O4jY0*KXWzQuo=fJY|wDm_P{)fwhLg zbrOzS_(P1qw&inBPw*#M#wyEHvV_KF5qq-eG}J@xZ8|{2$De@acISDr}?LKHQEakD-=Xt)QB*Ru@K_07e7T& z7VqA_c?W80qk!LM??At1dN2rNnY8Ea1!23BHcR}ww+3}+&=q2(*Gmdt>c$gkmEw!3QE6UNZ?=^dVk_3s=qewb;V?OK!cfur<77dU{ zOZZVckOhA*2qG>E*HL?kOQY3HWZ_O21-_7F3{2qi!gwgnt>DfF(iA~&C`)(!{d;lT zLkGL>?|TFAf_#9q6^jskItRCAih86s;$ zz6Bz+P@KRUrd#sz1$o}5?DNfwowy%d0*%@SLqEFMJm7wE5x`F__WL|YgJv4WQO;lN z#~g~gs1`!jl(P`gqtg1|K&)a8nkV@#2rJdJLjKnst)|&}MK5Tps4+ENJ-Sw-FS@Ip z(^oWVGg>A4of`A{Y~z}=<@BQsxA9!ZliUH>KMfc$K)*89U^XxVfXx1o?YpSgQGa@@ zqaKcE=7yeIXyF2e23nZplAi1EnFKIK||YWAx-5XxvCrUbn<67+R1w6<;GlOA%iO zsT<`S;k^B%f38|TB74_Gl%lu!_Mr(`OTkO`P>F7t= zBiE%g_ykO*ciDuEVTfJmsS8a_@E;o~;!LJxMvH3nvAO>$T3FEN%2=5$T#~6;nvdW7 z5N@hl#A9=qIhpUYeMz+^2ULLbnzes~_N?nrd0fnj(7c&zKDY1x;0Mqbld#>7LoU7v zDP?IGC69+eFbbrV#-iDgM$=DorGA4Nn7d)H2TLI{PzqMMwUurgC52hC6PdFV2O7(& zmRMzX0hijo_#Orm-v?>HAajd7z9B^bVb2%*7FtdB!q%4cWxb-U;7`=iMkg2dV%lTA z4P@f;ZMcjAL3GC)8<|1dLA-cfZ3(vEEWQ#YwMI6)xV8$vp6Z#&_2(4GjodQ=YgOen zW`frZfN$8(U4Th}4>(j{tInEuw0xOsf-E`pMLS3nX~PA;26!TFK)T}^Flm|(X!#Wo zrrB7uqg8ZU8!ay`o0>HX)C9jO9$L48-tWPXQ9fZAA{T&AfZsnX6;pzU2GA4{bIh@w z>Rlr>DITD$4X9Zh@X74_y`v{vY2jDURc3XII{+deO}a#;f%YZTo2!U*yowTl2X2{q z^Wquy3LBR*2hp*7hGotgl`9!#7AO20*rMxke=tk~H&={{e1TL)xx+NMKz`xUnpHY6 zZH07WA;BPans`i;4EKB(B?wur?{06$`*q4WRBl0oQS)iWLDRi76>-l~tG!O#3tSA9 z`cX5e=jjOz@c)ZIQ%-Bcgp!-vw^p@w6F zC~04&F4YcQXbng8vanCrL_67-%R!n-{QdwwybUj(40}in&-dWQKiY`5yBm-0Pd{#{ zZ;u|qTyXamz(<0s8K=HvVbZA} zmkOWqy;j>juUy}T`+#aFp+~4~g>4sJ+1+~M`sSuP@I|_SldT{fis%?ra@B3*_77h< za|!2rLDYs#T)E;xCG#~cXk?zhQ200S+{crwfVAP}9y()WE?xFp{jqsJceU{34mn`5 zlpsNhtFON)4M6UgBmUgEOmHQuzPd|e(gX!IdYv57EQ^9apStz+%e}bi_mZn%X5IcD z*;Yw5B*kGnLhJ0%Rt7e#7`R4csCF|e=sXzw$`xM%ZuVR1~{f|=^XAj-6ACCK-gMMuUR%Cbxm8- zYno%!j1?UTxHdZdaZ&0HpkAm zJg$tZ6rHmwoo7GM5vwaXM{UQotkyMfdPJU(kH#$()0dD5*x8ag5c~+~eyQtV)N+QC z?L_NVAZZm6*C3Y_;r!fP;w9oU3z6R$?aacy3M=z?v=|^4BIs;C7lP>AX%ZS{p6h5R zpT+HC*06ch9KHbjx~4kRmvamo&B0Jwa6CAE5l#helyHRc6LsaFM+0dg6L|ZQ_zHa_ zGmuV8#C=b7!9h9on>*rXRIjJ8Ebw4AYz8=N274Y-={UhelroBsh!9@I&#AFGf&$Fq zV=7fSr~wHB%Gza{kpJo}gMzSKB5{Qze&HxN2p!#s}x zTx?;W-{47J17U^(RGV}dQpe1S;cBnqpt%BUuWAnJMz1U){FrB$!+e&KlYdSgfu!si zQqbvnh|WUhGu*nJVFWvX4Od#kt+HQh&oqKP!4SMM0mZYY?5T$H6Ry9W*ean#!oG%N zml9sUtT1~2R*&h=0AA1Ert6aOp)k`ZM2A!5+5b!vOe4?5MOBj@ zTIlw&@S~hNP9=m|1!zGoOL#;`=IMb&&{->&Cy$9@KozDsp-`}aeE7y-IgS5%MhD9XXzdLqG0*z*ju)@zC#0zZ5->$4$=@@6p_J zHWxo2LjF?xn8-sS1X_`T$m;Y8rWwGEdxzkLWnZ91O0h+74xZ$zAhu;1vtxe#UVF{1 z*fqOkzh$3O*If0bTxmXypv>G>=j8P$5@%6|`=x$!cO8jQehEP^mqm4}i;FVCiL^z~ xrr3BO$lc4Oycme-!et@9{8G~$N}Y2`<@V)VMOQWSPii0(a2%YCVVp)y`!BsW4GI7N literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/mask_heads/fcn_mask_head.py b/CDARTS_detection/mmdet/models/mask_heads/fcn_mask_head.py new file mode 100644 index 0000000..af5cee8 --- /dev/null +++ b/CDARTS_detection/mmdet/models/mask_heads/fcn_mask_head.py @@ -0,0 +1,179 @@ +import mmcv +import numpy as np +import pycocotools.mask as mask_util +import torch +import torch.nn as nn + +from ..builder import build_loss +from ..registry import HEADS +from ..utils import ConvModule +from mmdet.core import mask_target, force_fp32, auto_fp16 + + +@HEADS.register_module +class FCNMaskHead(nn.Module): + + def __init__(self, + num_convs=4, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + conv_out_channels=256, + upsample_method='deconv', + upsample_ratio=2, + num_classes=81, + class_agnostic=False, + conv_cfg=None, + norm_cfg=None, + loss_mask=dict( + type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)): + super(FCNMaskHead, self).__init__() + if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']: + raise ValueError( + 'Invalid upsample method {}, accepted methods ' + 'are "deconv", "nearest", "bilinear"'.format(upsample_method)) + self.num_convs = num_convs + self.roi_feat_size = roi_feat_size # WARN: not used and reserved + self.in_channels = in_channels + self.conv_kernel_size = conv_kernel_size + self.conv_out_channels = conv_out_channels + self.upsample_method = upsample_method + self.upsample_ratio = upsample_ratio + self.num_classes = num_classes + self.class_agnostic = class_agnostic + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + self.loss_mask = build_loss(loss_mask) + + self.convs = nn.ModuleList() + for i in range(self.num_convs): + in_channels = ( + self.in_channels if i == 0 else self.conv_out_channels) + padding = (self.conv_kernel_size - 1) // 2 + self.convs.append( + ConvModule( + in_channels, + self.conv_out_channels, + self.conv_kernel_size, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg)) + upsample_in_channels = ( + self.conv_out_channels if self.num_convs > 0 else in_channels) + if self.upsample_method is None: + self.upsample = None + elif self.upsample_method == 'deconv': + self.upsample = nn.ConvTranspose2d( + upsample_in_channels, + self.conv_out_channels, + self.upsample_ratio, + stride=self.upsample_ratio) + else: + self.upsample = nn.Upsample( + scale_factor=self.upsample_ratio, mode=self.upsample_method) + + out_channels = 1 if self.class_agnostic else self.num_classes + logits_in_channel = ( + self.conv_out_channels + if self.upsample_method == 'deconv' else upsample_in_channels) + self.conv_logits = nn.Conv2d(logits_in_channel, out_channels, 1) + self.relu = nn.ReLU(inplace=True) + self.debug_imgs = None + + def init_weights(self): + for m in [self.upsample, self.conv_logits]: + if m is None: + continue + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + nn.init.constant_(m.bias, 0) + + @auto_fp16() + def forward(self, x): + for conv in self.convs: + x = conv(x) + if self.upsample is not None: + x = self.upsample(x) + if self.upsample_method == 'deconv': + x = self.relu(x) + mask_pred = self.conv_logits(x) + return mask_pred + + def get_target(self, sampling_results, gt_masks, rcnn_train_cfg): + pos_proposals = [res.pos_bboxes for res in sampling_results] + pos_assigned_gt_inds = [ + res.pos_assigned_gt_inds for res in sampling_results + ] + mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds, + gt_masks, rcnn_train_cfg) + return mask_targets + + @force_fp32(apply_to=('mask_pred', )) + def loss(self, mask_pred, mask_targets, labels): + loss = dict() + if self.class_agnostic: + loss_mask = self.loss_mask(mask_pred, mask_targets, + torch.zeros_like(labels)) + else: + loss_mask = self.loss_mask(mask_pred, mask_targets, labels) + loss['loss_mask'] = loss_mask + return loss + + def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, + ori_shape, scale_factor, rescale): + """Get segmentation masks from mask_pred and bboxes. + + Args: + mask_pred (Tensor or ndarray): shape (n, #class+1, h, w). + For single-scale testing, mask_pred is the direct output of + model, whose type is Tensor, while for multi-scale testing, + it will be converted to numpy array outside of this method. + det_bboxes (Tensor): shape (n, 4/5) + det_labels (Tensor): shape (n, ) + img_shape (Tensor): shape (3, ) + rcnn_test_cfg (dict): rcnn testing config + ori_shape: original image size + + Returns: + list[list]: encoded masks + """ + if isinstance(mask_pred, torch.Tensor): + mask_pred = mask_pred.sigmoid().cpu().numpy() + assert isinstance(mask_pred, np.ndarray) + # when enabling mixed precision training, mask_pred may be float16 + # numpy array + mask_pred = mask_pred.astype(np.float32) + + cls_segms = [[] for _ in range(self.num_classes - 1)] + bboxes = det_bboxes.cpu().numpy()[:, :4] + labels = det_labels.cpu().numpy() + 1 + + if rescale: + img_h, img_w = ori_shape[:2] + else: + img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32) + img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32) + scale_factor = 1.0 + + for i in range(bboxes.shape[0]): + bbox = (bboxes[i, :] / scale_factor).astype(np.int32) + label = labels[i] + w = max(bbox[2] - bbox[0] + 1, 1) + h = max(bbox[3] - bbox[1] + 1, 1) + + if not self.class_agnostic: + mask_pred_ = mask_pred[i, label, :, :] + else: + mask_pred_ = mask_pred[i, 0, :, :] + im_mask = np.zeros((img_h, img_w), dtype=np.uint8) + + bbox_mask = mmcv.imresize(mask_pred_, (w, h)) + bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype( + np.uint8) + im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask + rle = mask_util.encode( + np.array(im_mask[:, :, np.newaxis], order='F'))[0] + cls_segms[label - 1].append(rle) + + return cls_segms diff --git a/CDARTS_detection/mmdet/models/mask_heads/fused_semantic_head.py b/CDARTS_detection/mmdet/models/mask_heads/fused_semantic_head.py new file mode 100644 index 0000000..550e08e --- /dev/null +++ b/CDARTS_detection/mmdet/models/mask_heads/fused_semantic_head.py @@ -0,0 +1,106 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import kaiming_init + +from mmdet.core import auto_fp16, force_fp32 +from ..registry import HEADS +from ..utils import ConvModule + + +@HEADS.register_module +class FusedSemanticHead(nn.Module): + """Multi-level fused semantic segmentation head. + + in_1 -> 1x1 conv --- + | + in_2 -> 1x1 conv -- | + || + in_3 -> 1x1 conv - || + ||| /-> 1x1 conv (mask prediction) + in_4 -> 1x1 conv -----> 3x3 convs (*4) + | \-> 1x1 conv (feature) + in_5 -> 1x1 conv --- + """ # noqa: W605 + + def __init__(self, + num_ins, + fusion_level, + num_convs=4, + in_channels=256, + conv_out_channels=256, + num_classes=183, + ignore_label=255, + loss_weight=0.2, + conv_cfg=None, + norm_cfg=None): + super(FusedSemanticHead, self).__init__() + self.num_ins = num_ins + self.fusion_level = fusion_level + self.num_convs = num_convs + self.in_channels = in_channels + self.conv_out_channels = conv_out_channels + self.num_classes = num_classes + self.ignore_label = ignore_label + self.loss_weight = loss_weight + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.fp16_enabled = False + + self.lateral_convs = nn.ModuleList() + for i in range(self.num_ins): + self.lateral_convs.append( + ConvModule( + self.in_channels, + self.in_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=False)) + + self.convs = nn.ModuleList() + for i in range(self.num_convs): + in_channels = self.in_channels if i == 0 else conv_out_channels + self.convs.append( + ConvModule( + in_channels, + conv_out_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg)) + self.conv_embedding = ConvModule( + conv_out_channels, + conv_out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) + + self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label) + + def init_weights(self): + kaiming_init(self.conv_logits) + + @auto_fp16() + def forward(self, feats): + x = self.lateral_convs[self.fusion_level](feats[self.fusion_level]) + fused_size = tuple(x.shape[-2:]) + for i, feat in enumerate(feats): + if i != self.fusion_level: + feat = F.interpolate( + feat, size=fused_size, mode='bilinear', align_corners=True) + x += self.lateral_convs[i](feat) + + for i in range(self.num_convs): + x = self.convs[i](x) + + mask_pred = self.conv_logits(x) + x = self.conv_embedding(x) + return mask_pred, x + + @force_fp32(apply_to=('mask_pred',)) + def loss(self, mask_pred, labels): + labels = labels.squeeze(1).long() + loss_semantic_seg = self.criterion(mask_pred, labels) + loss_semantic_seg *= self.loss_weight + return loss_semantic_seg diff --git a/CDARTS_detection/mmdet/models/mask_heads/grid_head.py b/CDARTS_detection/mmdet/models/mask_heads/grid_head.py new file mode 100644 index 0000000..39e69b3 --- /dev/null +++ b/CDARTS_detection/mmdet/models/mask_heads/grid_head.py @@ -0,0 +1,359 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import kaiming_init, normal_init + +from ..builder import build_loss +from ..registry import HEADS +from ..utils import ConvModule + + +@HEADS.register_module +class GridHead(nn.Module): + + def __init__(self, + grid_points=9, + num_convs=8, + roi_feat_size=14, + in_channels=256, + conv_kernel_size=3, + point_feat_channels=64, + deconv_kernel_size=4, + class_agnostic=False, + loss_grid=dict( + type='CrossEntropyLoss', use_sigmoid=True, + loss_weight=15), + conv_cfg=None, + norm_cfg=dict(type='GN', num_groups=36)): + super(GridHead, self).__init__() + self.grid_points = grid_points + self.num_convs = num_convs + self.roi_feat_size = roi_feat_size + self.in_channels = in_channels + self.conv_kernel_size = conv_kernel_size + self.point_feat_channels = point_feat_channels + self.conv_out_channels = self.point_feat_channels * self.grid_points + self.class_agnostic = class_agnostic + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN': + assert self.conv_out_channels % norm_cfg['num_groups'] == 0 + + assert self.grid_points >= 4 + self.grid_size = int(np.sqrt(self.grid_points)) + if self.grid_size * self.grid_size != self.grid_points: + raise ValueError('grid_points must be a square number') + + # the predicted heatmap is half of whole_map_size + self.whole_map_size = self.roi_feat_size * 4 + + # compute point-wise sub-regions + self.sub_regions = self.calc_sub_regions() + + self.convs = [] + for i in range(self.num_convs): + in_channels = ( + self.in_channels if i == 0 else self.conv_out_channels) + stride = 2 if i == 0 else 1 + padding = (self.conv_kernel_size - 1) // 2 + self.convs.append( + ConvModule( + in_channels, + self.conv_out_channels, + self.conv_kernel_size, + stride=stride, + padding=padding, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + bias=True)) + self.convs = nn.Sequential(*self.convs) + + self.deconv1 = nn.ConvTranspose2d( + self.conv_out_channels, + self.conv_out_channels, + kernel_size=deconv_kernel_size, + stride=2, + padding=(deconv_kernel_size - 2) // 2, + groups=grid_points) + self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels) + self.deconv2 = nn.ConvTranspose2d( + self.conv_out_channels, + grid_points, + kernel_size=deconv_kernel_size, + stride=2, + padding=(deconv_kernel_size - 2) // 2, + groups=grid_points) + + # find the 4-neighbor of each grid point + self.neighbor_points = [] + grid_size = self.grid_size + for i in range(grid_size): # i-th column + for j in range(grid_size): # j-th row + neighbors = [] + if i > 0: # left: (i - 1, j) + neighbors.append((i - 1) * grid_size + j) + if j > 0: # up: (i, j - 1) + neighbors.append(i * grid_size + j - 1) + if j < grid_size - 1: # down: (i, j + 1) + neighbors.append(i * grid_size + j + 1) + if i < grid_size - 1: # right: (i + 1, j) + neighbors.append((i + 1) * grid_size + j) + self.neighbor_points.append(tuple(neighbors)) + # total edges in the grid + self.num_edges = sum([len(p) for p in self.neighbor_points]) + + self.forder_trans = nn.ModuleList() # first-order feature transition + self.sorder_trans = nn.ModuleList() # second-order feature transition + for neighbors in self.neighbor_points: + fo_trans = nn.ModuleList() + so_trans = nn.ModuleList() + for _ in range(len(neighbors)): + # each transition module consists of a 5x5 depth-wise conv and + # 1x1 conv. + fo_trans.append( + nn.Sequential( + nn.Conv2d( + self.point_feat_channels, + self.point_feat_channels, + 5, + stride=1, + padding=2, + groups=self.point_feat_channels), + nn.Conv2d(self.point_feat_channels, + self.point_feat_channels, 1))) + so_trans.append( + nn.Sequential( + nn.Conv2d( + self.point_feat_channels, + self.point_feat_channels, + 5, + 1, + 2, + groups=self.point_feat_channels), + nn.Conv2d(self.point_feat_channels, + self.point_feat_channels, 1))) + self.forder_trans.append(fo_trans) + self.sorder_trans.append(so_trans) + + self.loss_grid = build_loss(loss_grid) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + # TODO: compare mode = "fan_in" or "fan_out" + kaiming_init(m) + for m in self.modules(): + if isinstance(m, nn.ConvTranspose2d): + normal_init(m, std=0.001) + nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01)) + + def forward(self, x): + assert x.shape[-1] == x.shape[-2] == self.roi_feat_size + # RoI feature transformation, downsample 2x + x = self.convs(x) + + c = self.point_feat_channels + # first-order fusion + x_fo = [None for _ in range(self.grid_points)] + for i, points in enumerate(self.neighbor_points): + x_fo[i] = x[:, i * c:(i + 1) * c] + for j, point_idx in enumerate(points): + x_fo[i] = x_fo[i] + self.forder_trans[i][j]( + x[:, point_idx * c:(point_idx + 1) * c]) + + # second-order fusion + x_so = [None for _ in range(self.grid_points)] + for i, points in enumerate(self.neighbor_points): + x_so[i] = x[:, i * c:(i + 1) * c] + for j, point_idx in enumerate(points): + x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx]) + + # predicted heatmap with fused features + x2 = torch.cat(x_so, dim=1) + x2 = self.deconv1(x2) + x2 = F.relu(self.norm1(x2), inplace=True) + heatmap = self.deconv2(x2) + + # predicted heatmap with original features (applicable during training) + if self.training: + x1 = x + x1 = self.deconv1(x1) + x1 = F.relu(self.norm1(x1), inplace=True) + heatmap_unfused = self.deconv2(x1) + else: + heatmap_unfused = heatmap + + return dict(fused=heatmap, unfused=heatmap_unfused) + + def calc_sub_regions(self): + """Compute point specific representation regions. + + See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details. + """ + # to make it consistent with the original implementation, half_size + # is computed as 2 * quarter_size, which is smaller + half_size = self.whole_map_size // 4 * 2 + sub_regions = [] + for i in range(self.grid_points): + x_idx = i // self.grid_size + y_idx = i % self.grid_size + if x_idx == 0: + sub_x1 = 0 + elif x_idx == self.grid_size - 1: + sub_x1 = half_size + else: + ratio = x_idx / (self.grid_size - 1) - 0.25 + sub_x1 = max(int(ratio * self.whole_map_size), 0) + + if y_idx == 0: + sub_y1 = 0 + elif y_idx == self.grid_size - 1: + sub_y1 = half_size + else: + ratio = y_idx / (self.grid_size - 1) - 0.25 + sub_y1 = max(int(ratio * self.whole_map_size), 0) + sub_regions.append( + (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size)) + return sub_regions + + def get_target(self, sampling_results, rcnn_train_cfg): + # mix all samples (across images) together. + pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results], + dim=0).cpu() + pos_gt_bboxes = torch.cat( + [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu() + assert pos_bboxes.shape == pos_gt_bboxes.shape + + # expand pos_bboxes to 2x of original size + x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 + y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 + x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2 + y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2 + pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) + pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1) + pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1) + + num_rois = pos_bboxes.shape[0] + map_size = self.whole_map_size + # this is not the final target shape + targets = torch.zeros((num_rois, self.grid_points, map_size, map_size), + dtype=torch.float) + + # pre-compute interpolation factors for all grid points. + # the first item is the factor of x-dim, and the second is y-dim. + # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1) + factors = [] + for j in range(self.grid_points): + x_idx = j // self.grid_size + y_idx = j % self.grid_size + factors.append((1 - x_idx / (self.grid_size - 1), + 1 - y_idx / (self.grid_size - 1))) + + radius = rcnn_train_cfg.pos_radius + radius2 = radius**2 + for i in range(num_rois): + # ignore small bboxes + if (pos_bbox_ws[i] <= self.grid_size + or pos_bbox_hs[i] <= self.grid_size): + continue + # for each grid point, mark a small circle as positive + for j in range(self.grid_points): + factor_x, factor_y = factors[j] + gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + ( + 1 - factor_x) * pos_gt_bboxes[i, 2] + gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + ( + 1 - factor_y) * pos_gt_bboxes[i, 3] + + cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] * + map_size) + cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] * + map_size) + + for x in range(cx - radius, cx + radius + 1): + for y in range(cy - radius, cy + radius + 1): + if x >= 0 and x < map_size and y >= 0 and y < map_size: + if (x - cx)**2 + (y - cy)**2 <= radius2: + targets[i, j, y, x] = 1 + # reduce the target heatmap size by a half + # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688). + sub_targets = [] + for i in range(self.grid_points): + sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i] + sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2]) + sub_targets = torch.cat(sub_targets, dim=1) + sub_targets = sub_targets.cuda() + return sub_targets + + def loss(self, grid_pred, grid_targets): + loss_fused = self.loss_grid(grid_pred['fused'], grid_targets) + loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets) + loss_grid = loss_fused + loss_unfused + return dict(loss_grid=loss_grid) + + def get_bboxes(self, det_bboxes, grid_pred, img_meta): + # TODO: refactoring + assert det_bboxes.shape[0] == grid_pred.shape[0] + det_bboxes = det_bboxes.cpu() + cls_scores = det_bboxes[:, [4]] + det_bboxes = det_bboxes[:, :4] + grid_pred = grid_pred.sigmoid().cpu() + + R, c, h, w = grid_pred.shape + half_size = self.whole_map_size // 4 * 2 + assert h == w == half_size + assert c == self.grid_points + + # find the point with max scores in the half-sized heatmap + grid_pred = grid_pred.view(R * c, h * w) + pred_scores, pred_position = grid_pred.max(dim=1) + xs = pred_position % w + ys = pred_position // w + + # get the position in the whole heatmap instead of half-sized heatmap + for i in range(self.grid_points): + xs[i::self.grid_points] += self.sub_regions[i][0] + ys[i::self.grid_points] += self.sub_regions[i][1] + + # reshape to (num_rois, grid_points) + pred_scores, xs, ys = tuple( + map(lambda x: x.view(R, c), [pred_scores, xs, ys])) + + # get expanded pos_bboxes + widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1) + heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1) + x1 = (det_bboxes[:, 0, None] - widths / 2) + y1 = (det_bboxes[:, 1, None] - heights / 2) + # map the grid point to the absolute coordinates + abs_xs = (xs.float() + 0.5) / w * widths + x1 + abs_ys = (ys.float() + 0.5) / h * heights + y1 + + # get the grid points indices that fall on the bbox boundaries + x1_inds = [i for i in range(self.grid_size)] + y1_inds = [i * self.grid_size for i in range(self.grid_size)] + x2_inds = [ + self.grid_points - self.grid_size + i + for i in range(self.grid_size) + ] + y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)] + + # voting of all grid points on some boundary + bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, x1_inds].sum(dim=1, keepdim=True)) + bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, y1_inds].sum(dim=1, keepdim=True)) + bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, x2_inds].sum(dim=1, keepdim=True)) + bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum( + dim=1, keepdim=True) / ( + pred_scores[:, y2_inds].sum(dim=1, keepdim=True)) + + bbox_res = torch.cat( + [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1) + bbox_res[:, [0, 2]].clamp_(min=0, max=img_meta[0]['img_shape'][1] - 1) + bbox_res[:, [1, 3]].clamp_(min=0, max=img_meta[0]['img_shape'][0] - 1) + + return bbox_res diff --git a/CDARTS_detection/mmdet/models/mask_heads/htc_mask_head.py b/CDARTS_detection/mmdet/models/mask_heads/htc_mask_head.py new file mode 100644 index 0000000..9ba3ed7 --- /dev/null +++ b/CDARTS_detection/mmdet/models/mask_heads/htc_mask_head.py @@ -0,0 +1,38 @@ +from .fcn_mask_head import FCNMaskHead +from ..registry import HEADS +from ..utils import ConvModule + + +@HEADS.register_module +class HTCMaskHead(FCNMaskHead): + + def __init__(self, *args, **kwargs): + super(HTCMaskHead, self).__init__(*args, **kwargs) + self.conv_res = ConvModule( + self.conv_out_channels, + self.conv_out_channels, + 1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + def init_weights(self): + super(HTCMaskHead, self).init_weights() + self.conv_res.init_weights() + + def forward(self, x, res_feat=None, return_logits=True, return_feat=True): + if res_feat is not None: + res_feat = self.conv_res(res_feat) + x = x + res_feat + for conv in self.convs: + x = conv(x) + res_feat = x + outs = [] + if return_logits: + x = self.upsample(x) + if self.upsample_method == 'deconv': + x = self.relu(x) + mask_pred = self.conv_logits(x) + outs.append(mask_pred) + if return_feat: + outs.append(res_feat) + return outs if len(outs) > 1 else outs[0] diff --git a/CDARTS_detection/mmdet/models/mask_heads/maskiou_head.py b/CDARTS_detection/mmdet/models/mask_heads/maskiou_head.py new file mode 100644 index 0000000..bfa1764 --- /dev/null +++ b/CDARTS_detection/mmdet/models/mask_heads/maskiou_head.py @@ -0,0 +1,181 @@ +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import kaiming_init, normal_init + +from ..builder import build_loss +from ..registry import HEADS + + +@HEADS.register_module +class MaskIoUHead(nn.Module): + """Mask IoU Head. + + This head predicts the IoU of predicted masks and corresponding gt masks. + """ + + def __init__(self, + num_convs=4, + num_fcs=2, + roi_feat_size=14, + in_channels=256, + conv_out_channels=256, + fc_out_channels=1024, + num_classes=81, + loss_iou=dict(type='MSELoss', loss_weight=0.5)): + super(MaskIoUHead, self).__init__() + self.in_channels = in_channels + self.conv_out_channels = conv_out_channels + self.fc_out_channels = fc_out_channels + self.num_classes = num_classes + + self.convs = nn.ModuleList() + for i in range(num_convs): + if i == 0: + # concatenation of mask feature and mask prediction + in_channels = self.in_channels + 1 + else: + in_channels = self.conv_out_channels + stride = 2 if i == num_convs - 1 else 1 + self.convs.append( + nn.Conv2d( + in_channels, + self.conv_out_channels, + 3, + stride=stride, + padding=1)) + + self.fcs = nn.ModuleList() + for i in range(num_fcs): + in_channels = self.conv_out_channels * ( + roi_feat_size // 2)**2 if i == 0 else self.fc_out_channels + self.fcs.append(nn.Linear(in_channels, self.fc_out_channels)) + + self.fc_mask_iou = nn.Linear(self.fc_out_channels, self.num_classes) + self.relu = nn.ReLU() + self.max_pool = nn.MaxPool2d(2, 2) + self.loss_iou = build_loss(loss_iou) + + def init_weights(self): + for conv in self.convs: + kaiming_init(conv) + for fc in self.fcs: + kaiming_init( + fc, + a=1, + mode='fan_in', + nonlinearity='leaky_relu', + distribution='uniform') + normal_init(self.fc_mask_iou, std=0.01) + + def forward(self, mask_feat, mask_pred): + mask_pred = mask_pred.sigmoid() + mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1)) + + x = torch.cat((mask_feat, mask_pred_pooled), 1) + + for conv in self.convs: + x = self.relu(conv(x)) + x = x.view(x.size(0), -1) + for fc in self.fcs: + x = self.relu(fc(x)) + mask_iou = self.fc_mask_iou(x) + return mask_iou + + def loss(self, mask_iou_pred, mask_iou_targets): + pos_inds = mask_iou_targets > 0 + if pos_inds.sum() > 0: + loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds], + mask_iou_targets[pos_inds]) + else: + loss_mask_iou = mask_iou_pred * 0 + return dict(loss_mask_iou=loss_mask_iou) + + def get_target(self, sampling_results, gt_masks, mask_pred, mask_targets, + rcnn_train_cfg): + """Compute target of mask IoU. + + Mask IoU target is the IoU of the predicted mask (inside a bbox) and + the gt mask of corresponding gt mask (the whole instance). + The intersection area is computed inside the bbox, and the gt mask area + is computed with two steps, firstly we compute the gt area inside the + bbox, then divide it by the area ratio of gt area inside the bbox and + the gt area of the whole instance. + + Args: + sampling_results (list[:obj:`SamplingResult`]): sampling results. + gt_masks (list[ndarray]): Gt masks (the whole instance) of each + image, binary maps with the same shape of the input image. + mask_pred (Tensor): Predicted masks of each positive proposal, + shape (num_pos, h, w). + mask_targets (Tensor): Gt mask of each positive proposal, + binary map of the shape (num_pos, h, w). + rcnn_train_cfg (dict): Training config for R-CNN part. + + Returns: + Tensor: mask iou target (length == num positive). + """ + pos_proposals = [res.pos_bboxes for res in sampling_results] + pos_assigned_gt_inds = [ + res.pos_assigned_gt_inds for res in sampling_results + ] + + # compute the area ratio of gt areas inside the proposals and + # the whole instance + area_ratios = map(self._get_area_ratio, pos_proposals, + pos_assigned_gt_inds, gt_masks) + area_ratios = torch.cat(list(area_ratios)) + assert mask_targets.size(0) == area_ratios.size(0) + + mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float() + mask_pred_areas = mask_pred.sum((-1, -2)) + + # mask_pred and mask_targets are binary maps + overlap_areas = (mask_pred * mask_targets).sum((-1, -2)) + + # compute the mask area of the whole instance + gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7) + + mask_iou_targets = overlap_areas / ( + mask_pred_areas + gt_full_areas - overlap_areas) + return mask_iou_targets + + def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks): + """Compute area ratio of the gt mask inside the proposal and the gt + mask of the corresponding instance""" + num_pos = pos_proposals.size(0) + if num_pos > 0: + area_ratios = [] + proposals_np = pos_proposals.cpu().numpy() + pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() + # compute mask areas of gt instances (batch processing for speedup) + gt_instance_mask_area = gt_masks.sum((-1, -2)) + for i in range(num_pos): + gt_mask = gt_masks[pos_assigned_gt_inds[i]] + + # crop the gt mask inside the proposal + x1, y1, x2, y2 = proposals_np[i, :].astype(np.int32) + gt_mask_in_proposal = gt_mask[y1:y2 + 1, x1:x2 + 1] + + ratio = gt_mask_in_proposal.sum() / ( + gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7) + area_ratios.append(ratio) + area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to( + pos_proposals.device) + else: + area_ratios = pos_proposals.new_zeros((0, )) + return area_ratios + + def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels): + """Get the mask scores. + + mask_score = bbox_score * mask_iou + """ + inds = range(det_labels.size(0)) + mask_scores = mask_iou_pred[inds, det_labels + + 1] * det_bboxes[inds, -1] + mask_scores = mask_scores.cpu().numpy() + det_labels = det_labels.cpu().numpy() + return [ + mask_scores[det_labels == i] for i in range(self.num_classes - 1) + ] diff --git a/CDARTS_detection/mmdet/models/necks/__init__.py b/CDARTS_detection/mmdet/models/necks/__init__.py new file mode 100644 index 0000000..b6303a4 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/__init__.py @@ -0,0 +1,9 @@ +from .fpn import FPN +from .fpn_panet import PAFPN +from .bfp import BFP +from .hrfpn import HRFPN +from .nas_fpn import NASFPN +from .search_pafpn import SearchPAFPN + +__all__ = ['FPN', 'BFP', 'HRFPN', 'NASFPN', + 'PAFPN', 'SearchPAFPN'] diff --git a/CDARTS_detection/mmdet/models/necks/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/necks/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3921e1164def4bf507f58ddf78a97d577ad64855 GIT binary patch literal 433 zcmX|-ziz`I5XQm8|LwjMMEe)Uj)(9#{&9 zkKe&}cZmDCE}kx}_(KT&z`BkE=oyy#0Fg*yjSOZOdyaHu2qS@sUYMB0ffqV4DN6&# zIx{)T11GvLB`bkbnaNxhvXs>!VU?_RXnFICp~VtUWWC+M7H?NGkeBt=$8Ya*oNQLR znaZ6KL(>N15JC2^@Dh9R4Y5vI9oKnt()|`K%p@2BZ<(0ObQFoyBa6So3o3j*-y1sMN7^z0pjK|K- nOid@1cGRlo%Tdmc_Be8W+n=ZG0j^G3J^D{^Hy{sX0`BFVvsjRv?PB`EIR9Z%$v z*GXk%}mwGd7U*Q_I#0pp>5b0V2uXBxn!T^M=E7s zymv5}u-$_{J^h01?>+or_<(64T4@VoYW-+DPSt6CGOE;kT=UuZ;Rk!a8SU-=cJJW` zrfgOz#)V1qLXW!Oe5mI7Q7ZsLmCGbsq*97Pvt4k1_=ruK@(BE>W?hun_4=i|Wm;=o zPm6-JvDH`dP^Ml~O~KhBJ?X-w(^6bX8IG=aWm6{I_Wz~&Qenq+y)%PEOS0LV?eaWB zo1ep}0CASj8B$uek*O*glUI@z>6uUv0W=RES*h~>3P7f`$j{T)is0!%&5o9ex9=t@ETk>6)^|%9^ z|1|xU%je-0SM(C~Z*c+*C7xIhy|uk7Ulj#MUZG2`5F5HStW(aR5!eV>Fd3R2@N-L<1)K>9CZ&ATUx5OVeG&=Na^GCn0zDh`NHTY zK+$*>go5YvdJvGr*OJT+uR*HAP|NO*nAIN#Daxp1(ImhL@B{54$t zocz_fM=spciNPWOeu%*13lGa_=^hg`HNi53J>EUCaxn@QzPtmp=!yov2QS?Vf8{KD zD}PNPO7DM3puC(l1q%SaO)jvwpj?i7>hFN{SHYTI1Yk{gdUQkYbI{vr`jKsIX}7lg zyVV=4BK#!*ojo~dtwRc0)?0;OL$8zZCTVT?9&FiOg=_o~@h5HfffL)R8UvShqVscM zLn%_YT+Psd1V@60X*e8Mw-B;R^xF{EF5B+|W_^{)xv=!m-cEU1oBTv1WqO*_RaNYB zEIhcp>#9hNut8eaLUQZsrtE6vr>zLn)P)$vT@nP{KZN!1lG4+y?mwb4kLEV&=7LY% zBR#VoSg&mau;Ylp`kXgu@!y#gLV|viD*g#*)V~Bk=q=Zycj*okiAQ%F_~^jdabp?- zeTUuw`YmTb?>gsOm}r~*L$7PwlgTi`h9lt;hK3U-Ntfw>_mkvnuwb(iCJC>yBx#FR zy$g!f`vC0rrZr@uC}8y}@rP8sg-n?LZCk@+*4X-@+nx{Y8{WasBEMZ5?%2pwDqD=C zRKEb8>FvwuXx7NKpQMG2uW|J&;IdI!W+$TzHety?PcE7{lxN%AEW~h*ReYwfFIXRH wxzJq`e4|@}|JKf18+_jWV710v`*5Rzqu+I$zsG*lx)DR|_8s_W-yP8Z08r*0umAu6 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/necks/__pycache__/fpn.cpython-36.pyc b/CDARTS_detection/mmdet/models/necks/__pycache__/fpn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5c7009774023ee8eaceaf52b717964237cc25a6 GIT binary patch literal 6607 zcmcIpU5p!76`ngY9*@UAYwwTuCuy37P}Oa-o2HQR5YmvOTS(hgY0?InrX#QK?5@4` z*uFDPHmk8z)e#9uD=nzv0r61rfP_@>fXYjS5aJQ_0iiy?eQYbF0ICpAP#)krcWkdW z-4Y3@Ud@>^_nvd^x#ymH&bfD=%x0aZKiQ}}yQFC!YGc0y@N;;A5P)l(HMBY_Gll7O zRyWEfaJ`XeT4jq7h0#bh?XnHr69IXTa9!xQ_e8$S&b*TeM{rE$LiU>QO=j^gpO5erE+DeJZ^j771%@c0TuZ8$bRk~rzU28va`mPQNv?NxZ zx$xX&AkKxBfBl6P?>4+DaSwmy00|xj;%PLGvd*pB+8X0JH*TfM25EguE1S19Q!6Jh zeo`i1IDfv;s&3w8;HugK8EL%fG4KSt=nA!vMOt64>wS%B5pdUtbU-sQ0TWRY&_cU0 zt@Z3E!P%S^S<|rIg4Rog$-0dvg?^he{26%6D9IB$)b2!fpW&V67EkW5z5!X85sw`~ z9!5z;7I^Ic!IK^1(GMI)@3TH>o!fdh(&`g^?V@(==RGIPM-F&XeSMdSM}QUTlaW)O zf}S?L>(iXgYlpPHfw?>N8Ln-nKq(YuQMDL-ooW81fuva_X|I2ZW88hbt7E(vmuaK# zIj&?4BsqOuNQ^KK35;nSc@=($m2*}EEta5F7qoWErG&JCP}I05lkE!UHGf^&yw<3M zwU#f<)mkNx*1BkQ+rbsMQL^T@8PE%)y+$B2t!_B_5-CuMERpNJFP+O?r|bD)tt)Va2a{(xiyxRIk z2t_w4&@@9Mryz`|Lufi02qR}i*Y{x!x9L@UIqe2*uUcEX>2lt7*V|p!?>0+mX$D?n zO%!MzlS-Pm)vovfY^NqC)0>{a#ND8_?Frf&@pLoI8s9U6L!7Kh8`yQLjY<%>?$5MO z&Mj@Un%*fufBj~+;xAQi@=CB2v|GVav&p^CtI{IH_!6cM126erbu(C6Yx|4sn<5Xv zG|Au$fW~sn8vNNTubXU+;cbk+b4CtSM_7hU;+@Ak56TSN&JQ;H;&5536kzB_o1O3r zc!F~P*do~Keb&R4sj~>1M`8L1!$z%}o4PoIH@3XO^oY%Bpwr=`dhDbnOUyx>m?yA6 zU}R=x+ma!^h*rtAqyeuIj}eIik5hAUttD<$1pg8~0y2We9Q9YkthXnI^BGW;^mt>? z?kHL0y?&+X!FwIojU5Yk#&tX0N+a%3#uZ2on#>MXt?D%zuDhp5iHS$iFGwG?E64!? z+Iu$3J)&Cn#bVkC^b1JmQ7q1>SWNkWVzJ5yWz$RaHJ;{~TN<(gqBSCO5~iDw#`23R zYGdT=;Qyunn!wp74QhBmD!f?z75 zKvKrV3mOuH1G+S&mL0n{8RZ6HdKXw?lS2C?$UY1%^5BxeuK8~ij zzL_9wYZa?D-DgOyuk;F0f!k`FQ&ePebz= zP?H8Tq`|JNO$LYM?kRsEh!Cqr`xir-ZA9 z(=^Oe1inImG!S0~0Fg?Zit+%WyZ^zjbWBVT1BH?}R2>E;%|?Hh%;lu zyDcBuwn@z?nxhk!xzE!~v%~RYMN_0&j3z|IU-yJX_L(6zqvnTVT4C#67*{b?rQL=W zrJ_nTdv+2RiS0Q8&l7lozzTt92;8$M&r;jh0VJjmU%62WJedw#H(lYWYD#>AI$5o3 zHI#N`d#GnQ6Hio`C&0NtS$)_$OfZkp04mE+#y^KAcpE?~P&TKW?~F3PssMBwIdIyr zD7z1PbYw?;j_LT;@#iRLf{wCghAjY^co$hwKaA=?N6&&zlnTQaVJ2k6Z6+kgEjH?h zyg?)7z&DAyBc|AcG|F}ciEFRApxTsvR9Vo0OH~((K(-#iuGUluei*wSIV#laD1C(! znh_lm^c|GN9cQrDjTM>f`Zahgl~)-KpAfaxF1bE-{vikyv@ew7C^rx^BDN%z7NSHz z8Fyr1nW2)>jSI=(mmmnpg{ZiqGKnTJPB1EyDCws0#EBO*K&ah8`T*4HNUTV7#ZZ5P z-PUgFYjg_HZ`q-NK9d4spM94oRZ)eugh~;H2o-K1#lCU}6uU;}9o2&33TbPNCm=-& zt(Y^G(=)a{3X#&eh4eSa=lcf2?eBZ$9Tu7MT0O~=eKx{zzS6Z`qGxTrJucN@(-rFs&&3c$!5$Rlq_|T;}ZwkBO2Lbfge)# z*toAfX!gp6k*O?sSn)mDOMa%7d8A__p35%l{NXB!lFpCr(Q{Ji`P$am(Bi4V8d(F$ zYK6Epi`}5?L^j5X(b<=5Jdmt+9*5+iA51}>A>QFgu?P6U--dJG5AY*ult12g-oKz# zCoR;f{{()pOf4fnI6d?O*!!Ocd4iw6*AtMK-yivcfkVN4eE~(|7|mz<0wn(y*3dgY zi0$m3Bg+o#n&J=cx9r@|vd+M=NuK93Bg-0{?_8z5irWUVYVe-IsYEfvpKmj@Ym2b6 zO(P8Un&TI5Gu)%h-rEk|Oo%%H?0a+96o0{YcGnCpZM`2_)EA+|Q5HVhGjWQ~DVsU` z=-zs(lE&;DzpD9ffnx$vuHPa3^5l;wkJ7}4??R-be(vC zmYF3P87i8uEyEl1`XnU3H{ycFL6@n>;m2amd!(WteP0+#-8DOJUDd*Ankh~v$5*Pu zxK3e^;wB~R;26L<>cVRaucD%(Q{Ww(An&lcHjAnB+4uF1c{vWm$av3zx@1Uw@uc_` z&~j!x#g}wx4Qc@u!AEiX6Xy(-KGjyUefBqC3nmh;u(}`l}2zD$dDE{ z6D?^}8jX?a#7OF2CMlF}mFjj6H)pY;(2=I85(hHLtAvxL_hb_B8rpt`CsswUcqj8LpyPQ~|aNtF16>HBWTgCkEZysZrDCqtl0bI$`lM0T$46*HF2mt|guJjY_42 zaycr^)w&gX%F!a+)REJD+TFeh%JEgiAojrHg1AZ;`IuVKe}R6mqWgxXW4{`jN=!9$ z21BbM$Nc+KP6KD6ySCVOREbA!^KU#uw>d}M{~svLhzrE6E>_M1lc>47O%&`X+Fzs| zDon*o1Za^ZTb53KET_X}+a2^vlMX6?qP8Ae z(GVmyHlGRx%6xQV^Ddqs0?_oLnPEB8M)BsL3RU$j?!Qo{;;X1nqxHDHIM%bkP7b~n z_fb2bE-LC%%mS8Y3+5auL~6OD zW|y*TEPF7GN-bc(z4s9GR-lLe0sRwt>}jt(seeIX6li~MmefaTFR^dle7raBy`6c# z$IVhH|H;b(_xuK9|72Gl3;Yl9hCL9;B==d5x4Gs-kN1qW310YCKhw_iv+Zo(Zrhwz z8h);yZ|A|A((()aV!NpMj9==P+hxwYVz2TeCbQCh#-#1>UUgzjczaIrFIh8pj{R7x zX(Yu%x7+Uq`%X9LMoFnN2*St>qO@B$agV#6(w!zxa_%S^ID5knJ~|f=$B5PX{QghB z1e3o%2#&uP$dT`LNcHR~PMPorU_3^ywT0wuLsm}N9+$SvJ%qP42Q7DwGuF^-nIePAZ!=E#0*4yk2(ms;suC@gQwlx@m2`UiS7? z+1u&9Y17h8>$a9v2nstVvi-1iZlLW{LFie+OP_!S!w4TOViAwoMD)ajaTXadkIab( z*COjFk1^w9VgZ_s?3hoepNkF9d|Uu6#5QPA(s(H@ftKSUXeBO#R^tljTx`kQp-{h# zSuclvjpV_Z%k0n!|3do&@5Yv%vC>@*dH6@YzkzXDC(?DXiB(Nqg zdBVW0TyU!w+>Hxv?L%%S;$tG`aQ zm!*NREBnUV=?ABf&v%U zkb>fd6*J50;3*n)ZG$+f?;um*`sQ2PDvuMWG9V9|R+1gMQX=UjHuBA})7b+T3{+op zdAAdFkKL#{2$F0!82WC<(?`6SNit!iy3$j0rd5eZnMj2Qoq1INIsY%7W+Sn}(a=*W zhoQuFbP{u%B;O6YI>&Xq#Pqvil$7s@y8NLUj zf>9sZLf8xj^Px5%SyG-qa(2DFf$|*hBvP(J?m-8oOvek{UEh;Qes35!hn|A^q&!Vr zvwn4V5JiK&GaAlDazl5<)^^>_;qD-S@<2MicMJsuWNQ^oJGsm83Icr1hQ%ueIfDT9 z4`FyBf*{GKdHZJ+i_*-X)bbqJnzEHk)6xMOmZhAf++g2RHU&E44u>$Kxs;fp=kKXy z`YJ0#)`+}ADjR241v2e5@8==OW%P24U4QFY^q3c}-6+*Ebk z#~ZGLoZP~#WfO)hu!h?W6o^F6m-O zCp+wv>tbf6-A=WSP3)ADrq2BD#HKV?*ghXyCr=|w(p(;IfiP%_6ua0_^ok^*{spc? zy8!_~fyO4;vII+z%`-s*_LS2(*aOskZd_jxTfGW&RJD%G1YY7h{%waLkDFr)GjkDi zBSt2&eI5PU3mzVy{3FtHF`*Y>Lt_kF?p4y=WC7kA>26b`c>s-HLwX+Q_NOz7d!GCO zVq~Wl*s*{Y+0=qy`P!%@i{u;WI%%h0V2O0hY2@72@mPT7e_u!;7O-Uzbw(+bB-txk zb;M+OrWIKD_Z=41$x~oarFC%?=*ZA1o5vechS>*Hezp25fZP}bz!{7A8Sf$3r0Ngi z$ow)ru}!Yi7>Z}H{=WJEf=Tge+H8s>lcraF+AmM}%luseoX(&>-1-e|N{2YZpS+cx zZ``Rzu9E9_{Xxg|!!0ng)hT?^9o85M2=ZLzg%P$YhFI)5dq7QKmr1$lnum1RwTF~{ zwTBB=J^Y4m+SsO{KB2A1HMI$Xn{H1%3@_bLpJM71Z|VnDsX3CUy@oWG)GAGBNTA9z z0bq^#DBMxJf0t$`A0>R75Ew(lf!_0bG((?9eT#@j5Dg07r`7qsd*TcS10N}7gXZ5O zqOs;W!Xzfab7DoKq3<>A^wZoU6$_)#lgdXcct`EED+KsM${jIRo(qr zs7Qy}CPKMQ>2#Kze#=J~(SLoIQ8$acENr9BZ;55_dA^3XAsT#Dm`2qA9`Lt8%e;j+*LL}A_Pg4jb#JQ3Wl9z4FbVnG}9z! zc7C|sO^H&dOSphfdkT~pbv`205 zcH-oIu{GMNc{lGA-BFhryqontGLlx}y)qJyo7tU}wPK^~gne%G+gGq>9QT>_<0+3{ z%&N3h=;K{NdA2K95Pu^+UeUBlkhL zF=B}U57@{`I$s)NmbgjlReNM7UebPLz#l)^!q@%Mu#Hg*d+KWE@rWJr_k(q>1eNet;(uU!T%B*Rp`NIpHN2k;0V!v3j#>@_a0+BKYgG!!JGv4*&eo<*k7j zHyy^~NwAlsG5JFu9DgLv162hWhS7Pff~ZWWH%=$?ISumYoQqpMWhILLR}ZcEuAZ}0 zO~ZH=1bZiN9sBu{P=Skzy6$QAqa8$=fu}6`Pr;X;LS!nX#Aj(i|X{^l5Tp7$z z){-fEWy*Jz^EbA{Na!w2(3bL+Y(@R{(gO9DEzqr{1KL@-pk38dcj`F+(bC8|lmK4S z__lf@+gTalb{CJ-n;7#~?DxiB9$tFjceA%t4>ruix>;wxc2|rUFE5@OOQeezrudh- zo4pMU_eu}kec}=WAaswf7`*6Pdg}!j07WR%3$DHEkkEdFP&h?&Rpk{q-M7USKDsrF zk_0CvI_L@HIUE&2tN)I6zopX+D~y=i0m@v+9ufP-l3FSkm3)8sD$3y zK&I|&!c1E^FLjH610CACMv!i;Bcno&CSVqp?iWSh+G&6$t7);rS_Yw_6OQT zdpv7Kq;>%G3D<5koAENy>`41Xv2ILovelZwzehD$Ko!X!P`XMA+I}| zh;0t&T^jrz4YCyJv>k?V9!VL7|1`e--EfL%-3Kiv=kur>#^*^ShjLcQVNoPp@!Elg z9~M=D)HN)D205Gx?0-OVX6ZIvDhKWg=sAHWf1Gyu%l3sdeWli-=K`^JP_nu~p9C4gzhR%Q(>Yl#}H zoXiD;t^26HOKZ2TysuGH;oSSr4b{p#fX+H(^aorbp7w*K+lm>8GN;*Ur!M%JyNe{_i*(#~M@D@c9uRc-YS$(j*Z zcgr1&ZOYiCyL2*p*`$a6z>Dprv!Xu)?^w`n3rcQ+?pk{%DasCyMBmgM8Amw}$4CZ1 zs!cJ7D7KQc=$lPGa8V*sNZ%0mNW*<1ZxMM&KO(Bc;27 zznrIn%Wxv1q{fP%%M=fY5U#|JiTn@*3+FTZm$aI&anlmzS8AKes-XDP7FgYC3aZpC zRm`a7emA;WD4%DMNIt<5N!ksz1I+9I5fADR>j5h+`VX0HZUZ%U%^k=ux=rA%SN9!3 zs5SERI#-b=y*7S5wcXdxz zjWnaE=HM0~LxdcH16+J|1b0px_!XS;%4zul z`*u6!ZZM$&J_%@p1F zFxc9P*8QW@Vz8HX{jJ`a^M?lJVVu4B{?(7~0P(JN;|I68Vy_+5Ol4fzRL-4)IK!9F78jPV9~bMUa75uTt6RbqMLccci4va96K1h`0diMV z=~g5g&A4$jYPSy=XY5nyV!d#*I-L*^Otj=SK*~~{v4PPt1{j_h8S~B5%FI>Pw=&i; z<;~P?IcNpJh4GXFb2FZCVL~cvU}sjw2Yg^+UQsZ}U~hhkeHWT>uN{QZA-(7ja}0PONm)WqxxUt^@2ld&X1eW%(32|0UD@%&t%k3RI9A@BXm>A)(`L|C)+f=;&y+*z zTo=lv;eK~7t=Y;-qV^Uj+d&*h?L^Mg^7%^XK`&CCUbGz?L=u7(eLrjmN#gr|Vc%Xm zwcYJR>wt0NU@wSIg$E)?P9?o=a;no2Q5uD5vm595sZLj5{Znxi?f?b}PCQ=g9mqMn zZ~-lu1z^0&9qyu8+~c$S(c;vJtc^Aul3M6TPtwj+w1f^OWnbXrp+`>FZ(*~We%R@? zqYl&%iT9=K%9<%xF;E%%XVBK=E9m6lRU&)-{bn4<1HaoNPC_@5@kdKX#26153qlu! zE{7K6k3cfsVxl0}Z;e%4iycEQWiSm*b;ndD-$`NGxDbXgg!$AQjwsR`;jOYICM?Vq zdR|FW%Oh&Cotj_ZD&aj9&8e=k=P%y8kKnthD80~Z06%xp>$Q5E7SkWrG> z3><#*QKT$78cBMTGXS8Mb|Q(};A^&%q_QcZ!+$-0{#@2*)EFFs^jyA8v@yV)vXkuq zSBGC$r6v?Fd);=BMyh%_Ng_#CLZ5{4I>El*>vr4gqGsi%!uREOX|?YWc;RRYKBR`c z-vMuUS>|y~=J~8~TEj;RM@}gpeZ+R@nDh~C1JSC3m(7m|Y8>J_MshOcU+~n(;L`+0 z|1|ZF_(mT#uN7_pC!4o%T&(3})_|jLLGebe_*y}JpSHCIpv-m@%QMt{ zod8*#Tmh)rS`m^qDPIFnF6>kvggi~O7nQQrl@9|cehA)BDJP+jGB2Y!u%V+$8IFb# zlrbUqZZ((ONgQ+{xD(I!aU0-oftP)McQ0t?9Brq$uv+X5lC3gy2O<2W?@wf_arrU{ zOD3fq7VS?PsLQJ**L2I88iG3hk)>TiOBMmf9vog1KH3y8jhdwjclJ8uo5yx%Vk}M6 zhe{5BY-u@n09u@(xli;t5i*yK)c+H-k)_WMU$HiE1e>^>=P*Q4Y>^iT(5$JI^r7g@ z{s@d8qv_>bE~y`)jeq74OWpu7+NKUuv{je3DlBZ-(qYNQwr%CyxUF|hCQ3hAgBn?D zLhnx5fVE)hnei3pEHzrDFb24!zNIbS+#DLVe&o0<$=~LuN*Y&lmqpM^n-8MP4;r_% zQL0j--Mt^QwT+JarWMFWqMRLEx<)cO=gI!bn_ogpbjXA^e>9T||DVpAM$j&<((vN| zFdO*20dMgE{1};6#uwS}G;4;uh%u^=G#j06bL2552U~ac*Zm+(r;bIR$m^JOjb1U9 zXl^9Y*pJto7Y~^Bk)QHAgpI9kv}3(A9rSynrCnaOBvC@N@y{7z3E9i&xJ5-DIOQXh zm*n|~71;x1V3d;}lbU2DRpo46zH|Nd=EOr^KP+H1Sp*2Z;R`5)j#HB!Y~pBYl0uiM zH87GBsZCJ>cf--?N+Cr;qVGx(`eweD6#z?F5wM)uVgd1I8z)2k3XK3}t*{+C`HEJG z-^^^yvGa9yc=8+l1`~7|XXN9M=rF&k$IfI9@Y!^(H9x?a`wRQON*A$iRX1}P>C`oL z?}NUVd7xBwnEVIiMKp{ZBu1GBOiNu^q5D-`X8r2;&CC0Bytz8yc;6=8$6B+`xDwx5h5gj- z&tS|l)}EoYS3H_HuSyY~K%&xw`ojJmteStei+^KiXUnJeEVqbGZ`Tum6 z?Y|A$%LCZXy;u74xIgod`;~$5%#a^t^Q~6}ZVmQo?0$4N&o7p9FQbzTSSy8aRpD-2YAtzn90E|c%&iUsjxyb~poy`5;B@&2?U3tx<87FBFhscQ9O(Z=!P$Pr! zN6E1&Q!F0!Mw z8__CD>fS^ziu=ejX%C8bMqc@79J+}&CZ7W^ud3?`%7+~K8!l>$MbklrF*~Gq#tJ{q z%all(vlv&!h!ebOoG_^lagEvGzl!XN+AN>Plj2+MohR{a6s0LIp_rrN8>PqlZ@{7u zsbNtoeU8ErDP{{Pr1VfSYHQl{hPH(A#g;ziNZN$H0{VcZi7xDjN*LKBFpf+*ol#{f zwf%=tv$35ff5w1BM<57VMk+WIb$U!lV$zQ+l)?(VA^ANvk+Lrli2x}eQx?3G8pw)G zqW+HUp9I~)R+jq4m&kfEKryol9=V0xH}A41>4iH%}N|Vp+zSXz; zPQTE1pYi=Nxb8JL&U>t1giEyGdJ6~^cB?36^KwZSGak81@Vp^!ZioD!ZJlta-<#cn)|d#=t{3!n~y^UIjcV z%G|7?dCOBg7d(_|;5k_V^H!#KgF40n1DsfX z!^ZaXt#^@JV%Y8U-u(vCbb9OXN3RWU)Ud8tyV&lAK|6UD$Y{8{it%|+;PNctl#R*v z30x;|gTMy_J|yrFfsYC3l5$*YPO8rTiPcKlCrj;!$a~9M1b#`N4xr#>_d0N$aG$>* z;ynUXlqoyigO{vT^L=-S+&+8R21mcu;%{Pq_#zT@=KCLkpCyWy#ziTiLNW%0=~2i|J5t5Es0xOh2pw0 z+*YoBti`eRRrQ5YcWrAg)?b0}5z3q3@&}l$+)gKauohwxIU()a@KJ!i9Ib_28EGk% zE29QQ;{!=IUfFwT6Q8NbQ>oIGQD9mZ^4CNu-jLnB9)&7NwxOInZ0c%LM>ZYf)J(o6 vzD(wEPH$$)m5%Sdaxdm1-X%5bTFXUnKzXeTY(UeZPgU0ZTeJ2o!py$`#nR_> literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/necks/__pycache__/search_pafpn.cpython-36.pyc b/CDARTS_detection/mmdet/models/necks/__pycache__/search_pafpn.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9b83d0047d93b5d00e69335c46f5344e4c159f1 GIT binary patch literal 3426 zcmZu!TaVku6`mPhL{XIX>b=;TV(X@<)i%56+D?PkNa{G*qG>}IwObbg3YM$kuB9a@ zc}O{HgUUnYB2NbV(4UZpw7(+1W6-DerGDuTNFUno47Kvc2n8OVIdiTvobOQA+im~- zuOG%&HW~XDyKp(+e}TLB3`8=?Q`W1Wd?Iz8`4Xcvz6WoXGwdw`b#F8(tpOJpNQew%$o7uy5#p* zxAhV^u&`_C;4~g&gZv~K$}<+>S9%29MY{>eKp1kFgnwb4Ql8bNsdqEQ{_U}1Am+bjg35rhXuPtm%oCdlFa@TG-kzXL+o z8two$35zeGSF(~jVhi9 z33uY-638VZ(|=1*i?XSL0yE4f{X=?d~$sRRzGErrR14JjAzd2>oN z>>!s_%H;)r^zj26c)Z_!cxbk?-dq>^cfafC#yFM|hgN$yuaBetF}QrBGQ;_CzZ^V{ z%fTqujX^$6<9?!O_q(oki&70_qS_cLdOOq-k!2z)ATR%itNVs_ipe-pss)kuB6I$u zNc)3gVBn#jXgeJgrS9A=ibPSUkCYlI-5TUk|6!cxNm}S&G$~(=tTaJB`6Pq40_lRm zd|_hiO_ijR=qNcJsU%9yN)<;`8GI1hL`fbWrHRzdaU7i{3f>jLhxY5U9|BwS>QUT3 zJsRb(&t;S*kCRllfMu$j51Oy!OIW^dgt>GfW;Du?;uyJUk>}d4QSCQWu7~(_UXF_M zlSFxxg&U8@@U5H2V^mLRch|ICBrq40nh3OAlkiyZ9S06#-J8Js*Ui$6h9(1B&B zC*V)FE>?6=u}{^|MbJ&Rsp>xlr3rmY!XrF^iyNf?KC>b48q}8TFJc=X5;HLs&=NB~ zwa#vpPGw0ByaUqAEUd`2DbS~~N#?L^R=UFmjbUlghs1O1r)Uhf*rP)rAo&&&D>ns3 z4?Q5Ct^N&6N1PKrb>#2W}TXpB~aL#X8=4$240;(zV!{ME1TPhL%x9VW+gXiRobS?5V3*k z@ch-e6}l8MGpo8MJ@?!Z}_4riCERlF{Wy8E}0?z*}{QwD^#i2E)P142Ipr}-fPcA2R0D2?&h zrRl3Q<7e?%G#-soyz@UIX_tt>uvf_SW0DbkX`A3%JLP1YCf#OzJlr{kdd78(ey~0% z5*aOEy2dX>$F~=pD@EBrYMUB^w#sY_Rt-*GKW7ab+)raA6POkUAk5mZHiTzw@M|Kl zeE`Z1-{qHuZ7o}XAbu6J!#Bhh-vIc$DX#LTYZs5>+@D#*u3z_BTGhk*hhZ3+<{5UI z)Cu!AOQ;h@Q4JE{gD845iPQN^BZ}mxA4LWs)z9HX5mKp-L3DeL#{DEsp|XGv?BsP) zA`~%AmwItQeZyyEr;-yw$tS8o^A4~wElm6Ct($)Z)F$@o zFMeX{{=8IM;U7f3TH5`@K'Rrwbn)Voz{zDJ8z(Ch7CHv_&d0?QhR9 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/necks/auto_neck/__init__.py b/CDARTS_detection/mmdet/models/necks/auto_neck/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb13e38b7742d157389de10858f8d74c1ea8733d GIT binary patch literal 162 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnFMs`v{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*waDq~;Xs=cOiR7wadMmgL8SnELVY cnR%Hd@$q^EmA5!-fQm|UQtdz%6$3E?0C80-00000 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/build_neck.cpython-36.pyc b/CDARTS_detection/mmdet/models/necks/auto_neck/__pycache__/build_neck.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83d170531dd672cd3bb81d112688cda0db6b0430 GIT binary patch literal 568 zcmYjNJ#X7E5ap4wYP$*ACK)Km--+ zjwDog6A}*o0bYmDJ^PZ|Sn?%z*cp5DQ#{Qjh88)NBmK1{3$h_=Xkh_@ZeUGYWL#4H z*3w(}6(jNx9b1->7JkEoSWgTVgBE)?a0`_uu;x7Uuiu=k@TW+) z_M)r{S<1>vajvz}+I>zp{3e?oCcb+#wIS;m&t^EC&8Er7iJCgjd!olU@bEpFt5Uwh zSk0PMT1C7OsfkRjOjMRaTFGs$s%;+i>zK%~m?&LYHFat9(=FVmhbZyY78;pqKJTj| z_uqRM2phr>#^^r9lfI;S2di<&k{9#b_RZ7nV82g)+Y5aXp5sI}e2-dIh5XWSx>B+Uhn{PXkk8^5ogce>r+&g+Bd9~Ys&(S_lI z`~^()1b`uiQxrBUo{*5#8f!u(cIYs|tkliC(8Gw?%waC`n9o`)VC}>Y{riY@SoZ|6 zE+>=L!U_Y1_fWsLgqzWDi2?gqTMr-YX+O@zk97NJJ8X|7J=~%EGsd;rr*s$$$=~Q=%vYU+#q@CFKIUA5?J=CYLa1>&OAREj`0+U$~zcq4Spb<8CoU}sA zQ31>m(e(6KG5NLxsFbF8I=dGh(%68>0@Ym=E z`8}Bj^L7Qey7i37N@|pjeGw+BS|6^}e7p&m!VA88P z3yjh8$NT8$eb8K5An<>6-Yr+EF2s3xLFB8dJ6UC3ZTD&)$YP2$)ZQ~e%UQuSxjS6` z4ux1gGVpO!# z7fgFu{hI<`YAjS4iJ0r|-b}=IqcT1i<}%akR24iPk7ksyf+oe3!oB^TwiQoDa@8cY zX27HudMWr5@k~2Olx2~8%XoGVe~M(HWXo(Is{(RS<72MzSX+6av~42RID^8pD2>xd zDN6r9ub&PM@{HdE6v=EFi9tMLks7EXSA#5LyyS5?&P5{+AQzshfdI0qK{PFMYQ(_- z6x9e+@kqu8Tg6OXhCtqiscrxu(!=iQ*C7G!kX0Pu4Sbbs;2vJFdbop+S59BS)>)Tg zQ&Tfc^UVATrV0Tncv*+8>XhhL3^Klg=2n8{_R(#45+pW=5)C7rA9bAy{H=$=(0o1FXhJ45T<~C2L gzWYrmjZ_!IzuhGCxnrWaQbz-H!5xd>i=S)%2j}OaZD@%^O@*$&|?w+rEdcN-O z_q}>`Zfvab_IJz8zdmDG|FL#m$I-roJ6^UeOIgb9SPQn)&VnPgyW%Z)(kiTYE5(IU z=&ks;%eX7b`PyBms=D%2;f}4IS4F8`P$j8fRKCRg?Ox8k2fX zjZ1w_O-TKUnw0vynv(j0nwI)ibwujd)Qr?WQb(nJT^*D94Ru`Vi|R3{-&BuFed6n* z3$<&O`ks2?j-{T!3dXkFg>g`xR3||>nNcP{`Mx>@%BhSp3Ca)DX;4mQlqpc2Qcr{O zbViv5h;BcS|HJqOBj8D&P<^H%+>yA(mA?xxO_Mp~S|9koz9*BaUZy;Wc@ zF4nxS;*Q5rBv#+*+kL0+Zh4r-Zq)76e_w@B(oSyI3u&bt_12RhZf}HXwH*bm%iXnb4rR1-d%YRWwr;CtJRA4A@$A}~3X`yv zw7ZdM&#rY9W}S_~)@nT4Tu-_Io3qR9B2zDG^@1*fqDw4%7MxQ? z$-+!gBaL65!kuw5#0X~r-ID}{#Ua`Xg>yIfP&GmChjL1!v;m;HS`fo zVD`e%(`*~4jItq5j=;gmdqP5a(t? z4lSW)4`Ap4cZF!xWam)3Qa>CM0@y<{@!|nYJg{hTHXe~^W*;3xVpI-|mAS(*Bu0M+ zyaOjRie))}051=L+q?92hr;b^hhstjbM&xGym0^%_ropSo-3{x_C9yT`Cg>{eApW$ zEO=`KUWRs3Y+5&ZFaHI5Zf6munBri;WbyV1>2VDiBZAN8ZD7q7N^so&PY>bW31yHFjp0yUSD&*iNPh{(;*3xUeXaW?dP zJ_B3%40yC8huCZ1amhF;CzL+e87$BImVP}G~ zx6Zb((){wWR!pZGN`8pku=u65Xe&p#cghQ{d%H~#WA(h^;gvyc7VT|(lj1?C%)5fevT(ZeG$T;i?7m4*U!}D&o!!Objc4loo z3DXLMm$aAGyX$e?*DrCLeJiyaseLuIf1KK%rf#dZl)6X-_oPN?X|b!nXlivGFU3q? zEW2WR_9V&~`xxqtV|#WhdjU!%x>clDqUKr%u~dQp`Qdtp?OG6gzTWJZ9!()=+WmAa zlTj<|bOv%_9*X(SaF8Ty6!%BCMP|M|?g;=M0W>RO02)6HfAWceB|puJDI)!k!1CW{ zB-RitOKN0JX2sQ`^GWU@V#gtqj^Ip{3O?^yMYR_pyT3rn_PFUo~#T-4D>A9mpa=RC%r5lknWYbJyN{fNPH;Goo-Z-*@=X=c0~C360g3d3KD!bv4z_2!ZNQcg@;gA@y|@!s!mX)ajeJW7)-m6P)p*f{Yd@`NhTZil0U?a9852 z;I8iYa8__vcXm6gLh8)F+agYRv1W+PdEy?5Jw^cb+Mzi@e!2#}C^H%D%qa<|~TW49!)xEs{{|qAs zo4+IL7eL*_9TOQAJSo9>+TN(nzq>#9=<5Z1L{4JCbP{$001Egvodn0sgrR^EhLkPP ziWw~fW+^?==qB&4^*Z4iG&WRX0`xC2v2LPB?e`37Z}6cMx0lwsZS_F#y@NO8jMakU ztK@$h6)bB2-{MG+rKtLxoxpCX>mC?_Gc)!D;~+VSByeYe~J$oqeuA)R`t)=`Z?6Ed?5^e!1@>c5t^VgoMDrYg{)TxwK2} z?_j1Kk?*)T$n|-42{*JD#(`7#^~!!CPs`!0UNee$kj}FZ^9?!1WyCu;%RR%kh)BEv zXZ{*@%vhanvI3`=c^>{ca4#UZ{u~grsslj4@MK0N1I2^*ygyZ4f??%*E=pYfgXqU>o4l>8bED9pnRP`Dra1LX#KhJnJo2A)rRU%!BMAwj+a zs{d zw>fZ;jNM%10&}Qo)Y)?S_Hir2v6CHo6f^^Iz06{G^$+9~|AzN6b4#CijOsO+bH4}W z#LhOmkRNr6F@zmLpn0%zNgiRoV_-G#|M={1+bX z19Q5?f`GjUt7ru!?6fz-oX;2NR{?7NA$iYNS#X4W&s2pfUm(EF;lR6k! zM>nD|a}ZRo?PvcFRETGztc$M(bDtQAkO6&q&V1ylG=H z`w4k}C8VRc%Gszo!%8Tde-gATcBLxjis|W#5Q)XZfGry!VS5n8S}P?y`4Fi>jXXhG z`>Ack?51pP2MibmmaSOj;|hxr^!Z!77tf%e!!mzG>`SKE6N4QYD7I&eE&Zg?pAprY zDEd3@EZP}AW_~)cMJFLiu^j&w)zP$oLkau?gA?`ALekZ(<d4^n%;I_^ z{}s{fq?MhvR#<6Tz0yot%MF|)Ao&`T|L_U I$MARbzfu7*jsO4v literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/necks/auto_neck/build_neck.py b/CDARTS_detection/mmdet/models/necks/auto_neck/build_neck.py new file mode 100644 index 0000000..152bd14 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/auto_neck/build_neck.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------- +# Copyright (c) 2019 Jianyuan Guo (guojianyuan1@huawei.com) +# -------------------------------------------------------- + +# from .darts_neck_search import DartsNeck +from .hit_neck_search import HitNeck + + +def build_search_neck(cfg): + """Build neck model from config dict. + """ + if cfg is not None: + cfg_ = cfg.copy() + neck_type = cfg_.pop('type') + if neck_type == 'DARTS': + raise NotImplementedError + # return DartsNeck(**cfg_) + elif neck_type == 'HitDet': + return HitNeck(**cfg_) + else: + raise KeyError('Invalid neck type {}'.fromat(neck_type)) + else: + return None \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/necks/auto_neck/hit_neck_search.py b/CDARTS_detection/mmdet/models/necks/auto_neck/hit_neck_search.py new file mode 100644 index 0000000..f5f0d1b --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/auto_neck/hit_neck_search.py @@ -0,0 +1,57 @@ +# -------------------------------------------------------- +# Copyright (c) 2019 Jianyuan Guo (guojianyuan1@huawei.com) +# -------------------------------------------------------- + +import torch +import torch.nn as nn +import torch.nn.functional as F +from .hit_ops import OPS + +PRIMITIVES = [ + 'conv_1x1', + 'ir_k3_e6_d3', + 'ir_k5_e6', + 'ir_k5_e6_d3', + 'sd_k3_d1', + 'sd_k3_d3', + 'sd_k5_d2', + 'sd_k5_d3', +] + + +class HitNeck(nn.Module): + def __init__(self, num_fm=4, in_channel=[256], out_channel=256, + latency=None, gamma=0.02, genotype=None, **kwargs): + super(HitNeck, self).__init__() + self.num_fm = num_fm + self.in_channel = in_channel + self.out_channel = out_channel + self.genotype = genotype + bn_type = kwargs.get('bn_type', 'BN') + + self.cells = nn.ModuleList() + input_size = [160, 80, 40, 20] # 1/4, 1/8, 1/16, 1/32 + + for i, ops in enumerate(genotype): + if i < self.num_fm: + cell = OPS[PRIMITIVES[ops]](input_size[i%self.num_fm], + in_channel[i%self.num_fm], out_channel, 1, bn=bn_type) + else: + cell = OPS[PRIMITIVES[ops]](input_size[i%self.num_fm], + out_channel, out_channel, 1, bn=bn_type) + self.cells.append(cell) + + + for m in self.modules(): + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + def forward(self, x, step): + assert(step in [1, 2]) + _step = step - 1 + out = [] + + for i in range(_step*self.num_fm, step*self.num_fm): + out.append(self.cells[i](x[i%self.num_fm])) + + return out diff --git a/CDARTS_detection/mmdet/models/necks/auto_neck/hit_ops.py b/CDARTS_detection/mmdet/models/necks/auto_neck/hit_ops.py new file mode 100644 index 0000000..657e2e8 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/auto_neck/hit_ops.py @@ -0,0 +1,222 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +norm_cfg_ = { + 'BN': nn.BatchNorm2d, + 'SyncBN': nn.SyncBatchNorm, + 'GN': nn.GroupNorm +} + +OPS = { + 'skip': + lambda input_size, in_channels, out_channels, stride, bn='BN': + Identity(input_size, in_channels, out_channels, stride), + 'ir_k3_e1': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 1, stride, 3, bn=bn), + 'ir_k3_e1_d2': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 1, stride, 3, dilation=2, bn=bn), + 'ir_k3_e3': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 3, stride, 3, bn=bn), + 'ir_k3_e6': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 6, stride, 3, bn=bn), + 'ir_k3_e6_d3': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 6, stride, 3, dilation=3, bn=bn), + 'ir_k3_s2': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 1, stride, 3, 2, bn=bn), + 'ir_k5_e1': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 1, stride, 5, bn=bn), + 'ir_k5_e1_d2': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 1, stride, 5, dilation=2, bn=bn), + 'ir_k5_e3': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 3, stride, 5, bn=bn), + 'ir_k5_e6': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 6, stride, 5, bn=bn), + 'ir_k5_e6_d2': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 6, stride, 5, dilation=2, bn=bn), + 'ir_k5_e6_d3': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 6, stride, 5, dilation=3, bn=bn), + 'ir_k5_s2': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 1, stride, 5, 2, bn=bn), + 'ir_k7_e3': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 3, stride, 7, bn=bn), + 'ir_k7_e6': + lambda input_size, in_channels, out_channels, stride, bn='BN': + MBBlock(input_size, in_channels, out_channels, 6, stride, 7, bn=bn), + 'sd_k3_d1' : + lambda input_size, in_channels, out_channels, stride, bn='BN': + SepBlock(input_size, in_channels, out_channels, 1, stride, 3, 1, bn=bn), + 'sd_k3_d3' : + lambda input_size, in_channels, out_channels, stride, bn='BN': + SepBlock(input_size, in_channels, out_channels, 1, stride, 3, 3, bn=bn), + 'sd_k5_d2' : + lambda input_size, in_channels, out_channels, stride, bn='BN': + SepBlock(input_size, in_channels, out_channels, 1, stride, 5, 2, bn=bn), + 'sd_k5_d3' : + lambda input_size, in_channels, out_channels, stride, bn='BN': + SepBlock(input_size, in_channels, out_channels, 1, stride, 5, 3, bn=bn), + 'conv_1x1': + lambda input_size, in_channels, out_channels, stride, bn='BN': + ConvBlock(input_size, in_channels, out_channels, 1, stride, 1, 1, bn=bn), + } + + +class ChannelShuffle(nn.Module): + def __init__(self, groups=1): + super(ChannelShuffle, self).__init__() + self.groups = groups + + def forward(self, x): + if self.groups == 1: + return x + N, C, H, W = x.size() + cpg = C // self.groups # channels per group + out = x.view(N, self.groups, cpg, H, W) + out = out.permute(0, 2, 1, 3, 4).contiguous() + out = out.view(N, C, H, W) + return out + + +class ConvBNReLU(nn.Module): + def __init__(self, input_size, in_channels, out_channels, kernel_size, + stride, bias, relu_type, bn_type, groups=1, dilation=1): + super(ConvBNReLU, self).__init__() + assert(relu_type in ['relu', 'none']) + padding = (kernel_size - 1) * dilation // 2 + + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, + padding=padding, dilation=dilation, bias=bias, groups=groups) + nn.init.kaiming_normal_(self.conv.weight, mode="fan_out", nonlinearity="relu") + if self.conv.bias is not None: + nn.init.constant_(self.conv.bias, 0.0) + + if bn_type == 'none' : + self.bn = nn.Sequential() + elif bn_type == 'GN': + norm_layer = norm_cfg_[bn_type] + self.bn = norm_layer(num_channels=out_channels, num_groups=32) + else: + norm_layer = norm_cfg_[bn_type] + self.bn = norm_layer(out_channels) + self.relu = nn.ReLU(inplace=True) if relu_type == 'relu' else nn.Sequential() + + def forward(self, x): + out = self.conv(x) + out = self.relu(self.bn(out)) + return out + + +class SE(nn.Module): + def __init__(self, input_size, in_channels, se_ratio): + super(SE, self).__init__() + self.in_channels, self.se_ratio = in_channels, se_ratio + self.pooling = nn.AdaptiveAvgPool2d((1, 1)) + self.fc1 = nn.Conv2d(in_channels, max(1, int(in_channels * se_ratio)), 1, bias=False) + self.fc2 = nn.Conv2d(max(1, int(in_channels * se_ratio)), in_channels, 1, bias=False) + + def forward(self, x): + raise NotImplementedError + out = self.pooling(x) + out = self.fc1(out) + out = F.relu(out) + out = self.fc2(out) + out = F.sigmoid(out) + return out + + +class Identity(nn.Module): + def __init__(self, input_size, in_channels, out_channels, stride): + super(Identity, self).__init__() + if in_channels != out_channels or stride != 1: + self.conv = ConvBNReLU(input_size, in_channels, out_channels, kernel_size=1, stride=stride, + padding=0, bias=False, relu_type='relu', bn_type='bn') + else: + self.conv = nn.Sequential() + + def forward(self, x): + return self.conv(x) + + +class ConvBlock(nn.Module): + def __init__(self, input_size, in_channels, out_channels, + expansion, stride, kernel_size, dilation, groups=1, bn='none'): + super(ConvBlock, self).__init__() + self.conv = ConvBNReLU(input_size, in_channels, out_channels, kernel_size=kernel_size, + stride=stride, bias=True, relu_type='none', bn_type=bn, + groups=groups, dilation=dilation) + + def forward(self, x): + out = self.conv(x) + return out + + +class SepBlock(nn.Module): + def __init__(self, input_size, in_channels, out_channels, + expansion, stride, kernel_size, dilation, groups=1, bn='BN'): + super(SepBlock, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + self.conv1 = ConvBNReLU(input_size, in_channels, in_channels, kernel_size=kernel_size, + stride=stride, bias=False, relu_type='relu', bn_type=bn, + groups=in_channels, dilation=dilation) + self.conv2 = ConvBNReLU(input_size//stride, in_channels, out_channels, kernel_size=1, + stride=1, bias=False, relu_type='none', bn_type=bn, groups=groups) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + if self.in_channels == self.out_channels and self.stride == 1: + out = out + x + return out + + +class MBBlock(nn.Module): + def __init__(self, input_size, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1, has_se=False, bn='BN'): + super(MBBlock, self).__init__() + self.in_channels = in_channels + self.out_channels =out_channels + self.has_se = has_se + self.stride = stride + self.groups = groups + mid_channels = in_channels * expansion + + self.conv1 = ConvBNReLU(input_size, in_channels, mid_channels, kernel_size=1, stride=1, dilation=1, + bias=False, relu_type='relu', bn_type=bn, groups=groups) + self.conv2 = ConvBNReLU(input_size, mid_channels, mid_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, + bias=False, relu_type='relu', bn_type=bn, groups=mid_channels) + self.conv3 = ConvBNReLU(input_size//self.stride, mid_channels, out_channels, kernel_size=1, stride=1, dilation=1, + bias=False, relu_type='none', bn_type=bn, groups=groups) + + if has_se == True: + self.se = SE(input_size, mid_channels, se_ratio=0.05) + + if groups != 1: + self.shuffle = ChannelShuffle(input_size, in_channels, groups) + + def forward(self, x): + out = self.conv1(x) + if self.groups != 1: + out = self.shuffle(out) + out = self.conv2(out) + if self.has_se: + out = out * self.se(out) + out = self.conv3(out) + if self.in_channels == self.out_channels and self.stride == 1: + out = out + x + return out diff --git a/CDARTS_detection/mmdet/models/necks/bfp.py b/CDARTS_detection/mmdet/models/necks/bfp.py new file mode 100644 index 0000000..03aee10 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/bfp.py @@ -0,0 +1,102 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import xavier_init + +from ..plugins import NonLocal2D +from ..registry import NECKS +from ..utils import ConvModule + + +@NECKS.register_module +class BFP(nn.Module): + """BFP (Balanced Feature Pyrmamids) + + BFP takes multi-level features as inputs and gather them into a single one, + then refine the gathered feature and scatter the refined results to + multi-level features. This module is used in Libra R-CNN (CVPR 2019), see + https://arxiv.org/pdf/1904.02701.pdf for details. + + Args: + in_channels (int): Number of input channels (feature maps of all levels + should have the same channels). + num_levels (int): Number of input feature levels. + conv_cfg (dict): The config dict for convolution layers. + norm_cfg (dict): The config dict for normalization layers. + refine_level (int): Index of integration and refine level of BSF in + multi-level features from bottom to top. + refine_type (str): Type of the refine op, currently support + [None, 'conv', 'non_local']. + """ + + def __init__(self, + in_channels, + num_levels, + refine_level=2, + refine_type=None, + conv_cfg=None, + norm_cfg=None): + super(BFP, self).__init__() + assert refine_type in [None, 'conv', 'non_local'] + + self.in_channels = in_channels + self.num_levels = num_levels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.refine_level = refine_level + self.refine_type = refine_type + assert 0 <= self.refine_level < self.num_levels + + if self.refine_type == 'conv': + self.refine = ConvModule( + self.in_channels, + self.in_channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + elif self.refine_type == 'non_local': + self.refine = NonLocal2D( + self.in_channels, + reduction=1, + use_scale=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + def forward(self, inputs): + assert len(inputs) == self.num_levels + + # step 1: gather multi-level features by resize and average + feats = [] + gather_size = inputs[self.refine_level].size()[2:] + for i in range(self.num_levels): + if i < self.refine_level: + gathered = F.adaptive_max_pool2d( + inputs[i], output_size=gather_size) + else: + gathered = F.interpolate( + inputs[i], size=gather_size, mode='nearest') + feats.append(gathered) + + bsf = sum(feats) / len(feats) + + # step 2: refine gathered features + if self.refine_type is not None: + bsf = self.refine(bsf) + + # step 3: scatter refined features to multi-levels by a residual path + outs = [] + for i in range(self.num_levels): + out_size = inputs[i].size()[2:] + if i < self.refine_level: + residual = F.interpolate(bsf, size=out_size, mode='nearest') + else: + residual = F.adaptive_max_pool2d(bsf, output_size=out_size) + outs.append(residual + inputs[i]) + + return tuple(outs) diff --git a/CDARTS_detection/mmdet/models/necks/fpn.py b/CDARTS_detection/mmdet/models/necks/fpn.py new file mode 100644 index 0000000..0b8e7e6 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/fpn.py @@ -0,0 +1,271 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import xavier_init + +from mmdet.core import auto_fp16 +from ..registry import NECKS +from ..utils import ConvModule + + +# For toy experiments +class MBBlock(nn.Module): + def __init__(self, in_channels, out_channels, expansion, stride, kernel_size, dilation=1, groups=1): + super(MBBlock, self).__init__() + self.in_channels = in_channels + self.out_channels =out_channels + self.stride = stride + self.groups = groups + mid_channels = in_channels * expansion + padding = (kernel_size - 1) * dilation // 2 + + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups), + nn.SyncBatchNorm(mid_channels), + nn.ReLU(inplace=True) + ) + + self.conv2 = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False, groups=mid_channels), + nn.SyncBatchNorm(mid_channels), + nn.ReLU(inplace=True) + ) + + self.conv3 = nn.Sequential( + nn.Conv2d(mid_channels, out_channels, 1, stride=1, padding=0, dilation=1, bias=False, groups=groups), + nn.SyncBatchNorm(out_channels) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1]) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0.0001) + nn.init.constant_(m.running_mean, 0) + + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + if self.in_channels == self.out_channels and self.stride == 1: + out = out + x + return out + + +@NECKS.register_module +class FPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + extra_convs_on_inputs=True, + relu_before_extra_convs=False, + conv_cfg=None, + norm_cfg=None, + activation=None, + fpn_kernel=3, + lateral_kernel=1, + depthwise=None, + toy_replace=None, + dense_add=None): + super(FPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.activation = activation + self.relu_before_extra_convs = relu_before_extra_convs + self.fp16_enabled = False + self.fpn_kernel = fpn_kernel + self.lateral_kernel = lateral_kernel + self.dense_add = dense_add + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + self.extra_convs_on_inputs = extra_convs_on_inputs + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + lateral_kernel, + padding=(lateral_kernel-1)//2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=self.activation, + inplace=False) + if depthwise is not None: + if depthwise == 'sep': + fpn_conv = nn.Conv2d(out_channels, out_channels, self.fpn_kernel, + padding=int((self.fpn_kernel-1)/2), groups=out_channels) + elif depthwise == 'sep-depth': + fpn_conv = nn.Sequential( + nn.Conv2d(out_channels, out_channels, self.fpn_kernel, + padding=int((self.fpn_kernel-1)/2), groups=out_channels), + nn.Conv2d(out_channels, out_channels, 1, padding=0)) + else: + if toy_replace is not None and i == toy_replace.get('stage', 30): + if toy_replace.get('block', 'res') == 'ir': + fpn_conv = MBBlock( + out_channels, out_channels, 1, 1, + toy_replace.get('conv_kernel'), dilation=toy_replace.get('dilation'), groups=1) + else: + fpn_conv = ConvModule( + out_channels, + out_channels, + toy_replace.get('conv_kernel'), + padding=(toy_replace.get('conv_kernel')-1) * toy_replace.get('dilation') // 2, + dilation=toy_replace.get('dilation'), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=self.activation, + inplace=False) + else: + fpn_conv = ConvModule( + out_channels, + out_channels, + self.fpn_kernel, + padding=int((self.fpn_kernel-1)/2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=self.activation, + inplace=False) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.extra_convs_on_inputs: + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=self.activation, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + @auto_fp16() + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + if self.dense_add is not None: + if self.dense_add == 'no': + laterals = laterals + elif self.dense_add == 'all': + laterals_ = [0 for i in range(len(laterals))] + for i in range(used_backbone_levels - 1, -1, -1): + h, w = laterals[i].size(2), laterals[i].size(3) + for j in range(len(laterals)): + for k in range(i-j): + if k == 0: + tmp_lateral = F.max_pool2d(laterals[j], 2, stride=2) + else: + tmp_lateral = F.max_pool2d(tmp_lateral, 2, stride=2) + if i > j: + laterals_[i] += F.interpolate(tmp_lateral, size=(h,w), mode='bilinear', align_corners=True) + else: + laterals_[i] += F.interpolate(laterals[j], size=(h,w), mode='bilinear', align_corners=True) + laterals = laterals_ + elif self.dense_add == 'top-down': + laterals_ = [0 for i in range(len(laterals))] + for i in range(used_backbone_levels - 1, -1, -1): + h, w = laterals[i].size(2), laterals[i].size(3) + for j in range(used_backbone_levels - 1, i-1, -1): + laterals_[i] += F.interpolate(laterals[j], size=(h,w), mode='nearest') + laterals = laterals_ + elif self.dense_add == 'bottom-up-nearest': + for i in range(0, used_backbone_levels-1, 1): + laterals[i+1] += F.max_pool2d(laterals[i], 1, stride=2) + elif self.dense_add == 'bottom-up': + laterals_ = [0 for i in range(len(laterals))] + for i in range(used_backbone_levels - 1, -1, -1): + h, w = laterals[i].size(2), laterals[i].size(3) + for j in range(i+1): + for k in range(i-j): + if k == 0: + tmp_lateral = F.max_pool2d(laterals[j], 2, stride=2) + else: + tmp_lateral = F.max_pool2d(tmp_lateral, 2, stride=2) + if i > j: + laterals_[i] += F.interpolate(tmp_lateral, size=(h,w), mode='bilinear', align_corners=True) + else: + laterals_[i] += F.interpolate(laterals[j], size=(h,w), mode='bilinear', align_corners=True) + laterals = laterals_ + else: + for i in range(used_backbone_levels - 1, 0, -1): + laterals[i - 1] += F.interpolate( + laterals[i], scale_factor=2, mode='nearest') + + # build outputs + # part 1: from original levels + if self.fpn_kernel == 1 or self.fpn_kernel == 3: + outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)] + else: + outs = [laterals[i] for i in range(used_backbone_levels)] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.extra_convs_on_inputs: + orig = inputs[self.backbone_end_level - 1] + outs.append(self.fpn_convs[used_backbone_levels](orig)) + else: + outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/necks/fpn_panet.py b/CDARTS_detection/mmdet/models/necks/fpn_panet.py new file mode 100644 index 0000000..1d6af39 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/fpn_panet.py @@ -0,0 +1,169 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import kaiming_init, constant_init, xavier_init + +from mmdet.core import auto_fp16 +from ..registry import NECKS +from ..utils import ConvModule + +@NECKS.register_module +class PAFPN(nn.Module): + r""" PAFPN Arch + lateral TD 3x3 BU + C5 --------> C5 P5 N5 N5 + lateral + C4 --------> C4 P4 N4 N4 + lateral + C3 --------> C3 P3 N3 N3 + lateral + C2 --------> C2 P2 N2 N2 + """ + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + extra_convs_on_inputs=True, + relu_before_extra_convs=False, + conv_cfg=None, + norm_cfg=None, + activation=None, + lateral_kernel=1, + fpn_kernel=3, + bottom_up_kernel=3, + pa_kernel=3): + super(PAFPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.activation = activation + self.relu_before_extra_convs = relu_before_extra_convs + self.fp16_enabled = False + self.fpn_kernel = fpn_kernel + self.lateral_kernel = lateral_kernel + self.bottom_up_kernel = bottom_up_kernel + self.pa_kernel = pa_kernel + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + self.extra_convs_on_inputs = extra_convs_on_inputs + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + self.bottom_up_convs = nn.ModuleList() + self.pa_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): # Faster [0,4] + l_conv = ConvModule( + in_channels[i], out_channels, lateral_kernel, + padding=(lateral_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, + activation=None, inplace=True) + fpn_conv = ConvModule( + out_channels, out_channels, fpn_kernel, + padding=(fpn_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, + activation=None, inplace=True) + + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + for i in range(self.start_level, self.backbone_end_level - 1): # Faster [0,3] + if bottom_up_kernel > 0: + bottom_up_conv = ConvModule( + out_channels, out_channels, bottom_up_kernel, stride=2, + padding=(bottom_up_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, + activation=activation, inplace=True) + + self.bottom_up_convs.append(bottom_up_conv) + + if pa_kernel > 0: + pa_conv = ConvModule( + out_channels, out_channels, pa_kernel, + padding=(pa_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, + activation=activation, inplace=True) + + self.pa_convs.append(pa_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.extra_convs_on_inputs: + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, out_channels, 3, + stride=2, padding=1, conv_cfg=conv_cfg, + norm_cfg=norm_cfg, activation=self.activation, inplace=True) + self.fpn_convs.append(extra_fpn_conv) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + @auto_fp16() + def forward(self, inputs): + # inputs [C2, C3, C4, C5] + assert len(inputs) == len(self.in_channels) + + # build top-down laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + used_backbone_levels = len(laterals) # Faster rcnn:4 + + # Top-down path + for i in range(used_backbone_levels - 1, 0, -1): + laterals[i - 1] += F.interpolate(laterals[i], scale_factor=2, mode='nearest') + + fpn_middle = [fpn_conv(laterals[i]) for i, fpn_conv in enumerate(self.fpn_convs)] + + # Bottom-up path + # build outputs + if self.pa_kernel > 0: + outs = [fpn_middle[0]] + for i in range(0, self.backbone_end_level - self.start_level - 1): # Faster: [0,3] + if self.bottom_up_kernel > 0: + tmp = self.bottom_up_convs[i](outs[i]) + fpn_middle[i + 1] + else: + tmp = F.max_pool2d(outs[i], 2, stride=2) + fpn_middle[i + 1] + outs.append(self.pa_convs[i](tmp)) + else: + outs = fpn_middle + + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.extra_convs_on_inputs: + orig = inputs[self.backbone_end_level - 1] + outs.append(self.fpn_convs[used_backbone_levels](orig)) + else: + outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return tuple(outs) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/necks/hrfpn.py b/CDARTS_detection/mmdet/models/necks/hrfpn.py new file mode 100644 index 0000000..743eba6 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/hrfpn.py @@ -0,0 +1,97 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint +from mmcv.cnn.weight_init import caffe2_xavier_init + +from ..utils import ConvModule +from ..registry import NECKS + + +@NECKS.register_module +class HRFPN(nn.Module): + """HRFPN (High Resolution Feature Pyrmamids) + + arXiv: https://arxiv.org/abs/1904.04514 + + Args: + in_channels (list): number of channels for each branch. + out_channels (int): output channels of feature pyramids. + num_outs (int): number of output stages. + pooling_type (str): pooling for generating feature pyramids + from {MAX, AVG}. + conv_cfg (dict): dictionary to construct and config conv layer. + norm_cfg (dict): dictionary to construct and config norm layer. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + num_outs=5, + pooling_type='AVG', + conv_cfg=None, + norm_cfg=None, + with_cp=False): + super(HRFPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.reduction_conv = ConvModule( + sum(in_channels), + out_channels, + kernel_size=1, + conv_cfg=self.conv_cfg, + activation=None) + + self.fpn_convs = nn.ModuleList() + for i in range(self.num_outs): + self.fpn_convs.append( + ConvModule( + out_channels, + out_channels, + kernel_size=3, + padding=1, + conv_cfg=self.conv_cfg, + activation=None)) + + if pooling_type == 'MAX': + self.pooling = F.max_pool2d + else: + self.pooling = F.avg_pool2d + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + caffe2_xavier_init(m) + + def forward(self, inputs): + assert len(inputs) == self.num_ins + outs = [inputs[0]] + for i in range(1, self.num_ins): + outs.append( + F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) + out = torch.cat(outs, dim=1) + if out.requires_grad and self.with_cp: + out = checkpoint(self.reduction_conv, out) + else: + out = self.reduction_conv(out) + outs = [out] + for i in range(1, self.num_outs): + outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) + outputs = [] + + for i in range(self.num_outs): + if outs[i].requires_grad and self.with_cp: + tmp_out = checkpoint(self.fpn_convs[i], outs[i]) + else: + tmp_out = self.fpn_convs[i](outs[i]) + outputs.append(tmp_out) + return tuple(outputs) diff --git a/CDARTS_detection/mmdet/models/necks/nas_fpn.py b/CDARTS_detection/mmdet/models/necks/nas_fpn.py new file mode 100644 index 0000000..f2ebba2 --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/nas_fpn.py @@ -0,0 +1,191 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import xavier_init +from mmcv.cnn import caffe2_xavier_init + +from mmdet.core import auto_fp16 +from ..registry import NECKS +from ..utils import ConvModule + +norm_cfg_ = { + 'BN': nn.BatchNorm2d, + 'SyncBN': nn.SyncBatchNorm, + 'GN': nn.GroupNorm, +} + + +class MergingCell(nn.Module): + def __init__(self, channels=256, with_conv=True, norm_type='BN'): + super(MergingCell, self).__init__() + self.with_conv = with_conv + norm_layer = norm_cfg_[norm_type] + if self.with_conv: + self.conv_out = nn.Sequential( + nn.ReLU(inplace=True), + nn.Conv2d(channels, channels, 3, 1, 1), + norm_layer(channels) + ) + + def _binary_op(self, x1, x2): + raise NotImplementedError + + def _resize(self, x, size): + if x.shape[-2:] == size: + return x + elif x.shape[-2:] < size: + return F.interpolate(x, size=size, mode='nearest') + else: + assert x.shape[-2] % size[-2] == 0 and x.shape[-1] % size[-1] == 0 + kernel_size = x.shape[-1] // size[-1] + x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size) + # x = F.interpolate(x, size=size, mode='nearest') + return x + + def forward(self, x1, x2, out_size): + assert x1.shape[:2] == x2.shape[:2] + assert len(out_size) == 2 + + x1 = self._resize(x1, out_size) + x2 = self._resize(x2, out_size) + + x = self._binary_op(x1, x2) + if self.with_conv: + x = self.conv_out(x) + return x + + +class SumCell(MergingCell): + def _binary_op(self, x1, x2): + return x1 + x2 + + +class GPCell(MergingCell): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) + + def _binary_op(self, x1, x2): + x2_att = self.global_pool(x2).sigmoid() + return x2 + x2_att * x1 + + +@NECKS.register_module +class NASFPN(nn.Module): + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + stack_times=7, + lateral_kernel=1, + norm_type='SyncBN'): + super(NASFPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.stack_times = stack_times + self.norm_type = norm_type + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + + self.lateral_convs = nn.ModuleList() + # for i in range(self.start_level, self.backbone_end_level): # RetinaNet (1,4) + for i in range(self.start_level, self.start_level + num_outs): + in_channel = in_channels[i] if i < self.backbone_end_level else in_channels[-1] + padding = (lateral_kernel - 1) // 2 + l_conv = nn.Conv2d(in_channel, out_channels, kernel_size=lateral_kernel, padding=padding) + self.lateral_convs.append(l_conv) + + # add extra downsample layers (stride-2 pooling or conv) + extra_levels = num_outs - self.backbone_end_level + self.start_level + self.extra_downsamples = nn.ModuleList() + for i in range(extra_levels): + if self.add_extra_convs: + extra_conv = nn.Conv2d(in_channels[-1], in_channels[-1], 3, stride=2, padding=1) + self.extra_downsamples.append(extra_conv) + else: + self.extra_downsamples.append(nn.MaxPool2d(2, stride=2)) + + # add NAS FPN connections + self.fpn_stages = nn.ModuleList() + for _ in range(self.stack_times): + stage = nn.ModuleDict() + # gp(p6, p4) -> p4_1 + stage['gp_64_4'] = GPCell(out_channels, norm_type=norm_type) + # sum(p4_1, p4) -> p4_2 + stage['sum_44_4'] = SumCell(out_channels, norm_type=norm_type) + # sum(p4_2, p3) -> p3_out + stage['sum_43_3'] = SumCell(out_channels, norm_type=norm_type) + # sum(p3_out, p4_2) -> p4_out + stage['sum_34_4'] = SumCell(out_channels, norm_type=norm_type) + # sum(p5, gp(p4_out, p3_out)) -> p5_out + stage['gp_43_5'] = GPCell(with_conv=False) + stage['sum_55_5'] = SumCell(out_channels, norm_type=norm_type) + # sum(p7, gp(p5_out, p4_2)) -> p7_out + stage['gp_54_7'] = GPCell(with_conv=False) + stage['sum_77_7'] = SumCell(out_channels, norm_type=norm_type) + # gp(p7_out, p5_out) -> p6_out + stage['gp_75_6'] = GPCell(out_channels, norm_type=norm_type) + self.fpn_stages.append(stage) + + for m in self.modules(): + if isinstance(m, nn.SyncBatchNorm): + m._specify_ddp_gpu_num(1) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + caffe2_xavier_init(m) + + @auto_fp16() + def forward(self, inputs): + assert len(inputs) == len(self.in_channels) + # build P6-P7 on top of P5 + inputs = list(inputs) + for downsample in self.extra_downsamples: + inputs.append(downsample(inputs[-1])) + + # 1x1 on P3-P7 + feats = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + p3, p4, p5, p6, p7 = feats + + for stage in self.fpn_stages: + # gp(p6, p4) -> p4_1 + p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:]) + # sum(p4_1, p4) -> p4_2 + p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:]) + # sum(p4_2, p3) -> p3_out + p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:]) + # sum(p3_out, p4_2) -> p4_out + p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:]) + # sum(p5, gp(p4_out, p3_out)) -> p5_out + p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:]) + p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:]) + # sum(p7, gp(p5_out, p4_2)) -> p7_out + p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:]) + p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:]) + # gp(p7_out, p5_out) -> p6_out + p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:]) + + return tuple([p3, p4, p5, p6, p7]) + \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/necks/search_pafpn.py b/CDARTS_detection/mmdet/models/necks/search_pafpn.py new file mode 100644 index 0000000..7a9b35d --- /dev/null +++ b/CDARTS_detection/mmdet/models/necks/search_pafpn.py @@ -0,0 +1,146 @@ +# -------------------------------------------------------- +# Copyright (c) 2019 Jianyuan Guo (guojianyuan1@huawei.com) +# -------------------------------------------------------- + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import kaiming_init, constant_init, xavier_init + +from mmdet.core import auto_fp16 +from ..registry import NECKS +from ..utils import ConvModule + +from .auto_neck.build_neck import build_search_neck + + +@NECKS.register_module +class SearchPAFPN(nn.Module): + r""" PAFPN Arch + TBS TD TBS BU + C5 -----> C5 P5 -----> N5 N5 + + C4 -----> C4 P4 -----> N4 N4 + + C3 -----> C3 P3 -----> N3 N3 + + C2 -----> C2 P2 -----> N2 N2 + """ + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + add_extra_convs=False, + extra_convs_on_inputs=True, + relu_before_extra_convs=False, + conv_cfg=None, + norm_cfg=None, + activation=None, + pa_kernel=3, + search_neck=None): + super(SearchPAFPN, self).__init__() + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.activation = activation + self.relu_before_extra_convs = relu_before_extra_convs + self.fp16_enabled = False + self.pa_kernel = pa_kernel + + self.SearchNeck = build_search_neck(search_neck) + + if end_level == -1: + self.backbone_end_level = self.num_ins + assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + self.extra_convs_on_inputs = extra_convs_on_inputs + + self.pa_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level - 1): # Faster (0,3) one-stage (1,3) + if pa_kernel > 0: + pa_conv = ConvModule( + out_channels, out_channels, pa_kernel, + padding=(pa_kernel-1)//2, conv_cfg=conv_cfg, norm_cfg=norm_cfg, + activation=activation, inplace=True) + + self.pa_convs.append(pa_conv) + + # add extra conv layers (e.g., RetinaNet); one-stage 5-4+1 + extra_levels = num_outs - self.backbone_end_level + self.start_level + if add_extra_convs and extra_levels >= 1: + self.fpn_convs = nn.ModuleList() + for i in range(extra_levels): + if i == 0 and self.extra_convs_on_inputs: + in_channel = self.in_channels[self.backbone_end_level - 1] + else: + in_channel = out_channels + extra_fpn_conv = ConvModule( + in_channel, out_channels, 3, + stride=2, padding=1, conv_cfg=conv_cfg, + norm_cfg=norm_cfg, activation=self.activation, inplace=True) + self.fpn_convs.append(extra_fpn_conv) + + # default init_weights for conv(msra) and norm in ConvModule + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + xavier_init(m, distribution='uniform') + + @auto_fp16() + def forward(self, inputs): + # inputs [C2, C3, C4, C5] + assert len(inputs) == len(self.in_channels) + + # build top-down laterals + laterals = self.SearchNeck(inputs[self.start_level:], 1) + + used_backbone_levels = len(laterals) # Faster rcnn:4; one-stage:3 + + # Top-down path + for i in range(used_backbone_levels - 1, 0, -1): + laterals[i - 1] += F.interpolate(laterals[i], scale_factor=2, mode='nearest') + + laterals_mid = self.SearchNeck(laterals, 2) + + # Bottom-up path + # build outputs + if self.pa_kernel > 0: + outs = [laterals_mid[0]] + for i in range(0, self.backbone_end_level - self.start_level - 1): # Faster: [0,3] + tmp = F.max_pool2d(outs[i], 2, stride=2) + laterals_mid[i + 1] + outs.append(self.pa_convs[i](tmp)) + else: + outs = laterals_mid + + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.extra_convs_on_inputs: + orig = inputs[self.backbone_end_level - 1] + outs.append(self.fpn_convs[0](orig)) + else: + outs.append(self.fpn_convs[0](outs[-1])) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i-used_backbone_levels](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i-used_backbone_levels](outs[-1])) + return tuple(outs), None diff --git a/CDARTS_detection/mmdet/models/plugins/__init__.py b/CDARTS_detection/mmdet/models/plugins/__init__.py new file mode 100644 index 0000000..2a771b9 --- /dev/null +++ b/CDARTS_detection/mmdet/models/plugins/__init__.py @@ -0,0 +1,4 @@ +from .non_local import NonLocal2D +from .generalized_attention import GeneralizedAttention + +__all__ = ['NonLocal2D', 'GeneralizedAttention'] diff --git a/CDARTS_detection/mmdet/models/plugins/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/plugins/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d547000508180fde76bfa3b5fee355e5ed7bbc82 GIT binary patch literal 293 zcmYL^J8Hx*5QZh$&STl_k{hIP6Tzkpc|c&BuuVEOgRq2GFp`WV7tSeijajvC#&HP1QQkX;O8SwB{UecPzZ5CdRw{MOZV|8D_( z@|5xA96%!ie6q1AI|`0Fd5GR_iJ_k-9b`8fon#tflHMDeZC8H%)6z#{G06d^-i0Iq ZTyPoSc9@rodumQ-AId4ICan2d`~X2^PoMw* literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/plugins/__pycache__/generalized_attention.cpython-36.pyc b/CDARTS_detection/mmdet/models/plugins/__pycache__/generalized_attention.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b68b7bf996d822c546b0c8eb98eb3458104641bc GIT binary patch literal 7550 zcmb7J&5z^8mFJfziu!1^TGQS0(PR3vM|P$^lI+gf7&xAIoLuaIff>MnVlTlFDK*s+ zr7lw4tw9kBL}LV4>?~#vyV%Wj_p(6#jRk_7a^@*OF6mp4Q{Y3K-z!p-)NUJDHCTLA z_3FJ>kM+K)e^9NOKlGfbIH{_rC*+@?TRZumRf}iP?a9FG|7|NvMYhF+*aN7Ur1fq)!pLrQd@HyZpk&D%aEh&R-Q|cqw3c1F8)H& zq_zQW8%gu0Uf?lj;6L-+2T|k&kv|HYf5#@Tny`i=cQWwyOw+>evFBO44+86>;n-(> z#~E0Uqlr7UMqTUi*ok}$gD?}}0qceDiRC1aAK0!xwC?yp z)OyeQ5c~pf5RyF6k%!swB(l1m6HSP;ahF+nIljTe&bQ- z(`T|hta4pCBTL&RvF8{dmtk1I@PZJl7V^xo?4i@c8JoEI)*o2I$sqE_10Pa3od_(f zQGiX2m+90|I`^w7APxUty1w zY0heePOd>anvStI$bn<=z#$&vo%Oy-B-YDVC+!e~iO9E`yf!ant#@H>S)I5=FP z@eR=~QCgKK)!~HR3MnRKkhxm1A|SAU^lMa>1pe$8IX@qZ;C1Ngtb0>3Ws>%gD$T+gti z{yK49qTLXAkK!)3{Hj~AgSAbu{cQ3qDSq*C{$I43uVWb*Y z1M1gAf6rZutC7X4Zh0Huwr|{oZnvwGDZRwK)f&xUZACXsbIzW6)VKp z)hoK%ed(og5`9K(_cMv~1r5B#*Jg5mcP8OWufL#EeFt=L8jm$-Vh>m?7iSV94Ke$6 zfHVbIZ<9nB){`8|$gtkYu-*aI_sFJP32DT&M~v*ULk<1NGz)kh;|aIX^g#KGdMpcj z#CMs=|E8R1kEQ>WT1uil9gPNwNtugv=cKa4t~;rRW3AR$R& zU3zwSRnoI(ixkYZ_wEly$a2E>0nAr3N(X<^(vmWg56W;T<47u_Nwnydk3EKAJ4BL& z-NH{kVLFywke_PFjNDca4}O3A(FK$$T|5EolO8YL283*|;8F|&P-Wi9#AjY;_n70h z6t)h8L{DSv#b3Vu`fGNBmfTGW;biPFMobc8zK3LU?p9Z`hoq5@=h;9~$}^V4%=QLV zB-NF4DJkW_KQZQ^JE^S(-=uPKoHC?TB~gMP(LWpor{8xIQ$#--LF<$WpKc(yh;C9i z71*Z871AO7V?Xd52Koq!cF!9P?QX|Dzto>5+R%xP5-mJoQBsITtb-1cI5#PM>M&=B zW5be?ph4dwDG5p`IA)`M#{ZeeMqyIuIm4mDOj4v0j1z4Xco-EqVWJM5b2Lzk*=3Lh zDjS3S(`n=-O>tg`7CsNcBr{c&1=-WPR$!=l7cveSQojc?X`Tfwoqq#E#GoF zkQA{Kd*B6aGXw17Q}zK7Qv4~NbDX}CZFdGv7}}YMR*BgyG{idmB^t@BX$?h}7k_ox zlue~6YqBE#Oxd{9GsT>NS(@8MuZefsRse6x6=nX`WWp1eazm*oCOBcevME;8mU$VN7|k)u8Kx>Q)f}_JE5O94z|=vrl3{8BQ_C?CmVk-T z9J2~}(1$D)yy0lKr5J$=?z-?ULFH_=n$NBwRHd_P;G$!XhO6EbNt4)N9iv5F>Va0a z=XJi0(18%i>rH7xiqWUE0@_#yt^s=MHxXE7QU;kbsjG-2NQoN)qn2Z&&@#pbZ}5$- zg0NCb!-{aR7xXbQCt3eMxKA8M70%=WF7NA>DzMUq3iDQ(9paT31H^%`(}CmdOk5|? zPJQnzDNTY9ZpVXtli?&fd;%)JM1O)BX~Ys0Rb8TnFlx0k3R_L~5n;(kiF}r5N3e@T zJ0t55wnj%k(OvJ<2N#2i9`ai4@|kn$kv;XIqqcrz`|f$z*3Z&+iHKw%Q4(MREGn)D zOT0}%Oo-Mlf+pNY)Gp^WoqfLCnJ$7RBeP64dtPMcUOiv+UziIisz~NGENJObo3crd zvZ*v=SS{@6ws@YsbEV~6O}|4*YM^nn z97grI7He@K)?><)Re|@AAc1EHJcAnouK>If3{aw@jNH2~Acb;oKn1Si!0KG1j2;JA z=f$SfMOGM-AsQ}BmM&ms8v1hzl#OdHW$(WMRN^?&OB70!oeBz+SHsA_^LGMLMOOST zfKB4VfeD!CR=6xO*4oVYT#2hNK&ih*StAS$4g`2Yx>-R;1e2$X9=$plb^13e#AKlB zyq2xfLH}=ARp)C_qrX9!@Qwr;^`%}tYWA;zD&|}T8L-U$L_OMj25$-pAp^#XGi@Oe zG`1dX^|z6yR!~_94FgMP2XhTqiLtl1_)-lH1WkOKmr8gGYp#p0G1ric$^hCy25-cT zCCne@k|^xYi*fezrE!8UNtj7h=Ih9%bIIo4W2VQA;B<-Fuf!h8--7w^5-*bB60cQl z@Wwawk!ob7-;=Vv;k3L|L#hTem(ELpr6*U6vtQ@qs(^2%5_k;`1R;j3dxYR z5xv%boi}h2l=udyzCnI1zR97P%salBt|i~LI3=)Uve|8ZgWrOkFL}(AGuBG|H}N1> zA_;)MgWg+w2h<7LaSO1+cP@oJPqBwg52xZI`4g4dT3#uHSX?Ojg+q#ox zxIe}S%9$ev^$bN5k>0X91W}2}-lX{=CWjN`^(`85sL8{#Ae<)3cL`T#-WYM3?Ge^K zH3!tZOLJ;C8L}TzS0oTRVlFO2**zK&LjD~=RG_6f_y>Sc9^ul??@dM%6n{e}Joa3> z>kkHYQs@pwNG9Z;B*k#ljgVTicW5m|VD|UaPyyPyEmDkj@f-yfuBpYo+v>S}lE?w8 z5L%m>pP^}M=ahSVPW>;aZ;FaVlu>O%z^I_w_0`aiWa0A2w7r(&h)m=B^1xImBGKu( zvo~e5^LAsw%|;nYAEj-i2;eb?rtPguv~%Q36VrTRyE9?-RV7oqlvk74X44WT6(y|> zGf@&1MbG6-sEC{0Gl#kV2l>OV&`7ebnkXtXxrPEFeIxNfsZmF1g10Cq>gbtBgiurf zTf7y5Z=>X>iISua3WiFBMq1jWttOlLw!AH{>Qtnl9HBN{RTD{BzD0ToT zfJ~*HmMj>n%^y)vZHv}aYB?4~>8lSW;@T)FP68hS3`O?dDxh}9ElyGiaIq|keLtFtJCda0 z;@XAzPbYNalgLg>MbRq$BNZmaBPYZuVS*R@$wv@6k%yO#0(*$zGn8{7UAjn6NPXqg zJ$v;^^(i5xOX!)H4XPk`Cska zeBJmb=?CIcs8!8MByl~m7X<7rtZQ$~yL;V9Anw+j0q!<(m>m-@a+*m&Ty~^x(-;mr tr+Xbxq5MD5KM~(WFk1Qs;$BZwC&v9$1@GfyNFjMR1BO?DNf>fP{VOW5qdout literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/plugins/__pycache__/non_local.cpython-36.pyc b/CDARTS_detection/mmdet/models/plugins/__pycache__/non_local.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..980acb24e43896ff798456a324d48bc42c4542d4 GIT binary patch literal 2969 zcmbVOTaVmG6|S<~cDMVI$z+v~B@2ECOB%#9vmlL#BEpiOMTi}c5@|&XJT4b`cU~)p7aMsj74N&ME((+x4G* zF^T@wW9%Pnr#Tq^1YJD`5lrxm4VP~|wT2crR%YkU(8(La24}(+PUhy`(BteQCK|%M zX2MOZY4ge+wgi8{`raGt#E$!x?#5-Is;H>Kv`8!6E=rk4nfZ8QL8i~O|Ew%dKQ6^Q zOJZ8KZjIHzsFkoG7Yxeqp)GEI&PH6g!nh$ek%CUNEHlR(?B8(?dQ6!lP9;HRqe;hnp zf0v`6nj{dJ&8uLPMAcj-K_1NxHmhYK=5dvl#f`PWc0xv&->#UeBvddZ2_Bu4WrpKE znIt$x22~j>CuUJ9&r-;pCFyul1t%B5;Q*>7a--^YKSb^{j7Q_(kw|0G{v|X9bCiw) z;ur^zP?^n*qFHp2Nce(`U$;kPkzEAQY?h^QbOPO=i`lrGSN)A7q2hlYwS-Q(Aghbu z|33_ohKp1sVbZT=w3!Mbxc^m>pCm#gA{<9^rP8Q)6-0#yUWu{_XA<~`t5-|wB&E#D zV}1KeOJZ+@T6#CpZFKcVkcw5jW>?nKx?-Hw7>9OkfjYGf+5pFTmto}AE~r<#!u`}$ zPpal0c-5-e6jDG!l#w_gIqR zy1irb8x9Uqx&3J(0p?Jpi$wboze5@NleidnZHc3~S z0dcxRRcM8N)%Uccl58aRutwe{vQMN#q)X%-5LBpE7{*zoR2cq=eev_bq|B2KK#TFk zJSqn9g^1KZ%}O=Ma}ETBt!$7NMy(2eS)#3%h>}T|EI|UXQ!x z@j*L|#T`5r_tF3R$RY`Aac5iB4%ROH=kMS7^GAPQTtAHog477KenFW?<4qt z+!SD(T4Vb;``!MfUD>rgb!sm7Ym2kb_CH~@C7)Cc$hcESSXbZ{dyq2UYdHfS`xxG` zRVAdsj3@8n6nT#b87fH=?GR2R6@Pgj1n~JfjRTOOzer@MmUjiu<$E+?$Tw*8F33;O z72%EXyL=z6*|Qe+cFf-m@LQJC*~ao8Vo;IINVqHOlI*bLLnI+baya*q7qeQr=?2QsU!X7DV0E}_?eh*_JlJu=mkSfwPmNHp@ddf{ zEnkq;may)xJOHchV@Xyo?Xu;!X#75rZ-cCZ*C)nzh?oJ(S9Pj{A9&L)sgou2q#Z#c3N3 z?a9QzYnfDClT@0YYhGv`YCh5YY-o?ebGeDtu)6`;XkNr7bedmi7nKtKB{!m|J^b*T zMN<3*iYZEG#(VbNb+_BgO`0@H^Z=q@&tE) zABM}nFpN84_}M(lRx@50iZTvEQ!-5%*j_2}2pj3{szk?0mO*X 0 else in_dim) + + self.position_magnitude = position_magnitude + self.num_heads = num_heads + self.channel_in = in_dim + self.spatial_range = spatial_range + self.kv_stride = kv_stride + self.q_stride = q_stride + self.attention_type = [bool(int(_)) for _ in attention_type] + self.qk_embed_dim = in_dim // num_heads + out_c = self.qk_embed_dim * num_heads + + if self.attention_type[0] or self.attention_type[1]: + self.query_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_c, + kernel_size=1, + bias=False) + self.query_conv.kaiming_init = True + + if self.attention_type[0] or self.attention_type[2]: + self.key_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_c, + kernel_size=1, + bias=False) + self.key_conv.kaiming_init = True + + self.v_dim = in_dim // num_heads + self.value_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=self.v_dim * num_heads, + kernel_size=1, + bias=False) + self.value_conv.kaiming_init = True + + if self.attention_type[1] or self.attention_type[3]: + self.appr_geom_fc_x = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_x.kaiming_init = True + + self.appr_geom_fc_y = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_y.kaiming_init = True + + if self.attention_type[2]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.appr_bias = nn.Parameter(appr_bias_value) + + if self.attention_type[3]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.geom_bias = nn.Parameter(geom_bias_value) + + self.proj_conv = nn.Conv2d( + in_channels=self.v_dim * num_heads, + out_channels=in_dim, + kernel_size=1, + bias=True) + self.proj_conv.kaiming_init = True + self.gamma = nn.Parameter(torch.zeros(1)) + + if self.spatial_range >= 0: + # only works when non local is after 3*3 conv + if in_dim == 256: + max_len = 84 + elif in_dim == 512: + max_len = 42 + + max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) + local_constraint_map = np.ones( + (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int) + for iy in range(max_len): + for ix in range(max_len): + local_constraint_map[iy, ix, + max((iy - self.spatial_range) // + self.kv_stride, 0):min( + (iy + self.spatial_range + + 1) // self.kv_stride + + 1, max_len), + max((ix - self.spatial_range) // + self.kv_stride, 0):min( + (ix + self.spatial_range + + 1) // self.kv_stride + + 1, max_len)] = 0 + + self.local_constraint_map = nn.Parameter( + torch.from_numpy(local_constraint_map).byte(), + requires_grad=False) + + if self.q_stride > 1: + self.q_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.q_stride) + else: + self.q_downsample = None + + if self.kv_stride > 1: + self.kv_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.kv_stride) + else: + self.kv_downsample = None + + self.init_weights() + + def get_position_embedding(self, + h, + w, + h_kv, + w_kv, + q_stride, + kv_stride, + device, + feat_dim, + wave_length=1000): + h_idxs = torch.linspace(0, h - 1, h).cuda(device) + h_idxs = h_idxs.view((h, 1)) * q_stride + + w_idxs = torch.linspace(0, w - 1, w).cuda(device) + w_idxs = w_idxs.view((w, 1)) * q_stride + + h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).cuda(device) + h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride + + w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).cuda(device) + w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride + + # (h, h_kv, 1) + h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) + h_diff *= self.position_magnitude + + # (w, w_kv, 1) + w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) + w_diff *= self.position_magnitude + + feat_range = torch.arange(0, feat_dim / 4).cuda(device) + + dim_mat = torch.Tensor([wave_length]).cuda(device) + dim_mat = dim_mat**((4. / feat_dim) * feat_range) + dim_mat = dim_mat.view((1, 1, -1)) + + embedding_x = torch.cat( + ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) + + embedding_y = torch.cat( + ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) + + return embedding_x, embedding_y + + def forward(self, x_input): + num_heads = self.num_heads + + # use empirical_attention + if self.q_downsample is not None: + x_q = self.q_downsample(x_input) + else: + x_q = x_input + n, _, h, w = x_q.shape + + if self.kv_downsample is not None: + x_kv = self.kv_downsample(x_input) + else: + x_kv = x_input + _, _, h_kv, w_kv = x_kv.shape + + if self.attention_type[0] or self.attention_type[1]: + proj_query = self.query_conv(x_q).view( + (n, num_heads, self.qk_embed_dim, h * w)) + proj_query = proj_query.permute(0, 1, 3, 2) + + if self.attention_type[0] or self.attention_type[2]: + proj_key = self.key_conv(x_kv).view( + (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) + + if self.attention_type[1] or self.attention_type[3]: + position_embed_x, position_embed_y = self.get_position_embedding( + h, w, h_kv, w_kv, self.q_stride, self.kv_stride, + x_input.device, self.position_embedding_dim) + # (n, num_heads, w, w_kv, dim) + position_feat_x = self.appr_geom_fc_x(position_embed_x).\ + view(1, w, w_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + # (n, num_heads, h, h_kv, dim) + position_feat_y = self.appr_geom_fc_y(position_embed_y).\ + view(1, h, h_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + position_feat_x /= math.sqrt(2) + position_feat_y /= math.sqrt(2) + + # accelerate for saliency only + if (np.sum(self.attention_type) == 1) and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy = torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, h_kv * w_kv) + + h = 1 + w = 1 + else: + # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for + if not self.attention_type[0]: + energy = torch.zeros( + n, + num_heads, + h, + w, + h_kv, + w_kv, + dtype=x_input.dtype, + device=x_input.device) + + # attention_type[0]: appr - appr + # attention_type[1]: appr - position + # attention_type[2]: bias - appr + # attention_type[3]: bias - position + if self.attention_type[0] or self.attention_type[2]: + if self.attention_type[0] and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + energy = torch.matmul(proj_query + appr_bias, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[0]: + energy = torch.matmul(proj_query, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy += torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, 1, h_kv, w_kv) + + if self.attention_type[1] or self.attention_type[3]: + if self.attention_type[1] and self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + + proj_query_reshape = (proj_query + geom_bias).\ + view(n, num_heads, h, w, self.qk_embed_dim) + + energy_x = torch.matmul( + proj_query_reshape.permute(0, 1, 3, 2, 4), + position_feat_x.permute(0, 1, 2, 4, 3)) + energy_x = energy_x.\ + permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul( + proj_query_reshape, + position_feat_y.permute(0, 1, 2, 4, 3)) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[1]: + proj_query_reshape = proj_query.\ + view(n, num_heads, h, w, self.qk_embed_dim) + proj_query_reshape = proj_query_reshape.\ + permute(0, 1, 3, 2, 4) + position_feat_x_reshape = position_feat_x.\ + permute(0, 1, 2, 4, 3) + position_feat_y_reshape = position_feat_y.\ + permute(0, 1, 2, 4, 3) + + energy_x = torch.matmul(proj_query_reshape, + position_feat_x_reshape) + energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul(proj_query_reshape, + position_feat_y_reshape) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, self.qk_embed_dim, 1).\ + repeat(n, 1, 1, 1) + + position_feat_x_reshape = position_feat_x.\ + view(n, num_heads, w*w_kv, self.qk_embed_dim) + + position_feat_y_reshape = position_feat_y.\ + view(n, num_heads, h * h_kv, self.qk_embed_dim) + + energy_x = torch.matmul(position_feat_x_reshape, geom_bias) + energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) + + energy_y = torch.matmul(position_feat_y_reshape, geom_bias) + energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) + + energy += energy_x + energy_y + + energy = energy.view(n, num_heads, h * w, h_kv * w_kv) + + if self.spatial_range >= 0: + cur_local_constraint_map = \ + self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ + contiguous().\ + view(1, 1, h*w, h_kv*w_kv) + + energy = energy.masked_fill_(cur_local_constraint_map, + float('-inf')) + + attention = F.softmax(energy, 3) + + proj_value = self.value_conv(x_kv) + proj_value_reshape = proj_value.\ + view((n, num_heads, self.v_dim, h_kv * w_kv)).\ + permute(0, 1, 3, 2) + + out = torch.matmul(attention, proj_value_reshape).\ + permute(0, 1, 3, 2).\ + contiguous().\ + view(n, self.v_dim * self.num_heads, h, w) + + out = self.proj_conv(out) + out = self.gamma * out + x_input + return out + + def init_weights(self): + for m in self.modules(): + if hasattr(m, 'kaiming_init') and m.kaiming_init: + kaiming_init( + m, + mode='fan_in', + nonlinearity='leaky_relu', + bias=0, + distribution='uniform', + a=1) diff --git a/CDARTS_detection/mmdet/models/plugins/non_local.py b/CDARTS_detection/mmdet/models/plugins/non_local.py new file mode 100644 index 0000000..cbec7a4 --- /dev/null +++ b/CDARTS_detection/mmdet/models/plugins/non_local.py @@ -0,0 +1,114 @@ +import torch +import torch.nn as nn +from mmcv.cnn import constant_init, normal_init + +from ..utils import ConvModule + + +class NonLocal2D(nn.Module): + """Non-local module. + + See https://arxiv.org/abs/1711.07971 for details. + + Args: + in_channels (int): Channels of the input feature map. + reduction (int): Channel reduction ratio. + use_scale (bool): Whether to scale pairwise_weight by 1/inter_channels. + conv_cfg (dict): The config dict for convolution layers. + (only applicable to conv_out) + norm_cfg (dict): The config dict for normalization layers. + (only applicable to conv_out) + mode (str): Options are `embedded_gaussian` and `dot_product`. + """ + + def __init__(self, + in_channels, + reduction=2, + use_scale=True, + conv_cfg=None, + norm_cfg=None, + mode='embedded_gaussian'): + super(NonLocal2D, self).__init__() + self.in_channels = in_channels + self.reduction = reduction + self.use_scale = use_scale + self.inter_channels = in_channels // reduction + self.mode = mode + assert mode in ['embedded_gaussian', 'dot_product'] + + # g, theta, phi are actually `nn.Conv2d`. Here we use ConvModule for + # potential usage. + self.g = ConvModule( + self.in_channels, + self.inter_channels, + kernel_size=1, + activation=None) + self.theta = ConvModule( + self.in_channels, + self.inter_channels, + kernel_size=1, + activation=None) + self.phi = ConvModule( + self.in_channels, + self.inter_channels, + kernel_size=1, + activation=None) + self.conv_out = ConvModule( + self.inter_channels, + self.in_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + activation=None) + + self.init_weights() + + def init_weights(self, std=0.01, zeros_init=True): + for m in [self.g, self.theta, self.phi]: + normal_init(m.conv, std=std) + if zeros_init: + constant_init(self.conv_out.conv, 0) + else: + normal_init(self.conv_out.conv, std=std) + + def embedded_gaussian(self, theta_x, phi_x): + # pairwise_weight: [N, HxW, HxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + if self.use_scale: + # theta_x.shape[-1] is `self.inter_channels` + pairwise_weight /= theta_x.shape[-1]**-0.5 + pairwise_weight = pairwise_weight.softmax(dim=-1) + return pairwise_weight + + def dot_product(self, theta_x, phi_x): + # pairwise_weight: [N, HxW, HxW] + pairwise_weight = torch.matmul(theta_x, phi_x) + pairwise_weight /= pairwise_weight.shape[-1] + return pairwise_weight + + def forward(self, x): + n, _, h, w = x.shape + + # g_x: [N, HxW, C] + g_x = self.g(x).view(n, self.inter_channels, -1) + g_x = g_x.permute(0, 2, 1) + + # theta_x: [N, HxW, C] + theta_x = self.theta(x).view(n, self.inter_channels, -1) + theta_x = theta_x.permute(0, 2, 1) + + # phi_x: [N, C, HxW] + phi_x = self.phi(x).view(n, self.inter_channels, -1) + + pairwise_func = getattr(self, self.mode) + # pairwise_weight: [N, HxW, HxW] + pairwise_weight = pairwise_func(theta_x, phi_x) + + # y: [N, HxW, C] + y = torch.matmul(pairwise_weight, g_x) + # y: [N, C, H, W] + y = y.permute(0, 2, 1).reshape(n, self.inter_channels, h, w) + + output = x + self.conv_out(y) + + return output diff --git a/CDARTS_detection/mmdet/models/registry.py b/CDARTS_detection/mmdet/models/registry.py new file mode 100644 index 0000000..bfc88fb --- /dev/null +++ b/CDARTS_detection/mmdet/models/registry.py @@ -0,0 +1,9 @@ +from mmdet.utils import Registry + +BACKBONES = Registry('backbone') +NECKS = Registry('neck') +ROI_EXTRACTORS = Registry('roi_extractor') +SHARED_HEADS = Registry('shared_head') +HEADS = Registry('head') +LOSSES = Registry('loss') +DETECTORS = Registry('detector') \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/roi_extractors/__init__.py b/CDARTS_detection/mmdet/models/roi_extractors/__init__.py new file mode 100644 index 0000000..9161708 --- /dev/null +++ b/CDARTS_detection/mmdet/models/roi_extractors/__init__.py @@ -0,0 +1,3 @@ +from .single_level import SingleRoIExtractor + +__all__ = ['SingleRoIExtractor'] diff --git a/CDARTS_detection/mmdet/models/roi_extractors/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/roi_extractors/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a50f4e872450c62a95c5587937d7b90cb039cbe3 GIT binary patch literal 238 zcmX|5y9&ZU5WGtQifCiuKcw+MTTvSen-tomS&rP29Na_hPBEY4FQv8e7p%NQbYX^J zXV{(fY&O}w8-5~$JR|;-M759bi%5b9s)(i~r9>n(EfbNRNuIrEG;`X`&(hQiPxg4Y z2gj>mo$C)2d0I~W;DN#$s$tuW0Ju^B58|?z8t7fH#%f$eHg$XDhE=WLp81RQOlyGw ot01lUYNo9~<(ab*@b}U)0BK|ZSYFz0yos5s!foHt_id)h2W5RhV*mgE literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/roi_extractors/__pycache__/single_level.cpython-36.pyc b/CDARTS_detection/mmdet/models/roi_extractors/__pycache__/single_level.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27ba7b6b8e92d653969a3c6a035155f4687e0539 GIT binary patch literal 4213 zcma)9&5zs06`!F{0 zk@5^>uNK@(b_+WP2k5D{qSpdFyKAFwiF(`|IbDad&>1fMn2Cn4516n+bJ*Ii?ep%kFz&HV^AKmS4lisGJ&shA zCR-TP(y_wF>7_CVz25lR#-UM?wYTry_HO<1{`RMz-M_niFQDDC$E-R^pcAq#7YwrU zZcUtd%6eSXgn7{HnxRF~9WcmHFQw`>L{m5itZR!U(ZaVWmPH$1CtMO|#cA={b3?3( zH^j;T$F9~>X0q-wq(4`@elJS;ak!n{zO|D{Kgd$)|A{kJja)3az0l7lGE{C)rX$yP zmEPjU;o~r#ExIFryyiGAp4&Y)8-y}+eY~Sd9A)D;bfaWE$>!90ue#)helXByM2a|! zL*ZsAj?n9xn;!(J6gZIQmWQ*EJa(EYrw~{b?xOiy{iL&UN?RI1HlR zRG(0;WYaMOO9X`LPex&qskQn3bdq_&z)zAeR_?_pAr5yZ8Oc4LPJ2hp7PgX#Ait+F z83|~r+I_DYV%PEegLZLul!Pkt^v&j5>6%a^^gc*q;erJzs8U3!`zxjFhaUki0im8A z`qrvf(MrZLp0j;pXzVl2ayG2xJg@B=IrGe%S}U)it>-4%h5)T+3kGx;TVHgVg{3Cr zP|5~46}IO^NtAhBar(g|iiI~RpT#oyBUqZ=E395#=%INJ1?g#T;#HzNJrj#fdF#_9tP~A7tu1>?zOWyWczE z%thLOphyqFk`AM>`U#qx57|EZhCPF31az}k%Z#i>z*KH#e$6wh9DT#H`mphoJ>@+^ z7@~Gy=f>vi_Ee9)HGrIv?cx0*m-F_B-QS z{#)Y_F)@l|HTJW}k4rZ^Bn_1sF$!btO2HKwCg}Bxrj8^)pepS`CVAm(#*xZ`bTqzs zjgBQ5>TLH%C#dIT&sp1yQ#iZ2iO!5i$1cnu9Z!qeI2{*_fv@~5laiEIv_DlUl!WP9 zQl_$K^uvr+3nNJi=gU+~;_wa_bu0~@?o8fZUt_9pO8H4;lJH&KQocp~x2d5U$t5(G z@hb8>#%=Dv3mk6pvwZi5|HrkCS-uVy8gE~tlfmLSY%ch3V9}oOCa_SAYBV~Bq`Z1G zXC9-ljwn`u0-%Ns4ZJ`Oe9XMSM16IRj-e$QIWPl`_H$!c-)9wOn*QHm;4k5Ihy=h} z>LSY^)Q;0c=b9?zRX*uHn3ib=F?*gZE|*j8rn?x;-1pp#>s4yJvOxPXjk|f{Rr}wc z?Z5i!{qO&e{U6QtZ!GLDbIZ*eFAF;-zIJr8<5{)JtjKBi!$hSLNwJJwC=HOo)xbwa zz4-8w`$6X@mC_~(J!PJs<+S^0=Es$Cu=*gB3;Ns+QDh_w>700!M_zaD=vpI6M7ZN# zymI-0uaGJzC(a6pML|0Adv6_*$H?<&4k`FZibDP8|NQeG{`J@IZc6fT^6F8Ppz(J) zjlx0!6bz7q9?7h*dT|N|Gvlt1RA@7XFG?Vlz|0+y^P#KfU>mp@R@-CJ#6d7UW z#gAv9LdS6(!rO@WW%i_-sShEE&J^sF$$!evlaqb_1SdPbYwlv?qH`BR`AW`%NC)>q z(A4f#jvhSV{QAds2P|g|TxL??z!v@ttmTZCWLilnLVq1xr+iAnXuaeugvD zIW&yFQ~hj2-P6_Y9TW~H0Ahgw;EF0#-3Ov=REqJ`%hJN8N{kAut$-YW9*X(o2Ra`A z1szl;$fklnG^9&)iYOsNt+9_kIkUN;v1gXFFgitCBe(XAy*f+|+98Tb#9Lz@1>_JW z$<5yz-;&8|f<0_XiCM&A%I|x|&YjH0Uj8}z>XSVips*hr06}xu+-EWeRkMU985Urm zd|?xeb?bn`5{%u*o15%W{tK2jVCY(9XzkOEJcE1b>{e<(MZ2LSCo7s{3k*^PC#P9m zUEZQ4OZrJaENXr-?X*h6e?e@>`rXr{=|u#{+q6c>UVcCXx?t57D0ozuh*hd#yz<a?k5PyA7cn%(g{l)tDB&~JI(qlq6^D-F*RY2bOf@Gb1IOi>$TQ+XC#= finest_scale * 8: level 3 + + Args: + rois (Tensor): Input RoIs, shape (k, 5). + num_levels (int): Total level number. + + Returns: + Tensor: Level index (0-based) of each RoI, shape (k, ) + """ + scale = torch.sqrt( + (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1)) + target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) + target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() + return target_lvls + + def roi_rescale(self, rois, scale_factor): + cx = (rois[:, 1] + rois[:, 3]) * 0.5 + cy = (rois[:, 2] + rois[:, 4]) * 0.5 + w = rois[:, 3] - rois[:, 1] + 1 + h = rois[:, 4] - rois[:, 2] + 1 + new_w = w * scale_factor + new_h = h * scale_factor + x1 = cx - new_w * 0.5 + 0.5 + x2 = cx + new_w * 0.5 - 0.5 + y1 = cy - new_h * 0.5 + 0.5 + y2 = cy + new_h * 0.5 - 0.5 + new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) + return new_rois + + @force_fp32(apply_to=('feats', ), out_fp16=True) + def forward(self, feats, rois, roi_scale_factor=None): + if len(feats) == 1: + return self.roi_layers[0](feats[0], rois) + + out_size = self.roi_layers[0].out_size + num_levels = len(feats) + target_lvls = self.map_roi_levels(rois, num_levels) + roi_feats = feats[0].new_zeros( + rois.size(0), self.out_channels, *out_size) + if roi_scale_factor is not None: + rois = self.roi_rescale(rois, roi_scale_factor) + for i in range(num_levels): + inds = target_lvls == i + if inds.any(): + rois_ = rois[inds, :] + roi_feats_t = self.roi_layers[i](feats[i], rois_) + roi_feats[inds] = roi_feats_t + return roi_feats diff --git a/CDARTS_detection/mmdet/models/shared_heads/__init__.py b/CDARTS_detection/mmdet/models/shared_heads/__init__.py new file mode 100644 index 0000000..bbe7014 --- /dev/null +++ b/CDARTS_detection/mmdet/models/shared_heads/__init__.py @@ -0,0 +1,3 @@ +from .res_layer import ResLayer + +__all__ = ['ResLayer'] diff --git a/CDARTS_detection/mmdet/models/shared_heads/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/models/shared_heads/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..320f627ee012199815a53dcda46510a12cc26c63 GIT binary patch literal 223 zcmX|5y9&ZU5WGtiqiE|tr13yYMg0J=(9UK#+$9OzBitoLKFME7YvnIkxlwRohGAz0 zX0uq#4)0QLg%D51{}NV5MqDzXKxu@PHz@@sg}f!0UPV57Nj7uYXBUbm-BAcb?VP9m zG*ATdfAtdpZ4AIegfU0H^ggN5TUs-_qHDEN_YQSbvG!3}i`3A)sl5AXWj&H%x72|U cO41k=K;^0iU{!a0w&O%=Xg`dK!;pFM0X{-IZU6uP literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/shared_heads/__pycache__/res_layer.cpython-36.pyc b/CDARTS_detection/mmdet/models/shared_heads/__pycache__/res_layer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91688319daf517cc04ca23372ac85e0f0ae4974a GIT binary patch literal 2271 zcmZ8jOK%%D5GJ|r)k;>Jm(yOT3UG@GD#)2 zySh{cddju`p?`_jp8OYb>dZ>E)2zW+ayU!Q5;Bv zIyUUV;1vX7>!YbKZ7B|B8HO>E3Evr{A=u*6g%QEPQ`FxeG|{vs3pQa3LP_n_B(`c7 zq+K(R4u{>?)!xLf=?(IR&BJ-CZq@7tnMW9D%`THGWZquRxxGPfU5AqZv>hGRVcn_Q zbyQomT|2d3lZjmi+<70)*I?e_?j))ix35F|_1c0VIv`?iuycA3+-oA0=yBgQuF^8& z!uUMP6P;D1ag;vFh4J2HdK{-y%h+ci_BaBus?Rh((#~=7LKPciIUgIaWezfR-|WIGiS>0v{y8z=R3saZ&LZ zc;oJY6N&D3jjcq!XB;l3dMrN&JGqYHGKwoG@KX61imNEDgD}o+UZwjcYFvq%g&MX# zS{ZkmC#6uv&&t&-_!zN49H)7rR2=_9K0MnRSB1C(vK$@Fl5#6O;)&W)(@Jd>1s7VR zn91dQ3nCCX?2Z#Dcsv#fS6k;tZ%&Wo4e)RchWZ8s2|LtX{dK5MBYKT7`stqyJI9yK z@Vj}I`xHA59~%bZ`5_E-2tt#ZYPw+Zre+gsLFP=`HRu3}jI;MPCFOHik$qq0hdTfw zXF(p3-*3$=_|nqu#Dj02Yrh7vpyXhCr42y44S=+`g)lqVeoabWx46Baf5K@1I%lwh zRDdW;wQ=)mGy=#V_>Oy1DYQ(oQt)0eQ@Xb+dPxr~dzI`Bs#5eL6J{!Fh$t1tf-U3W z2>=%pjD&u=JPo5Y-V!Owu(CAn!>W9Hhf8cp(|VBTbUXk6fd(^Vf>+SckEN_+-1_Ol{A?zBWseA78%HZ5xOd_Tj-|8NS3w}cff#Jx+l6Mbp z!rN|OvjOmOYPNz+uVxb;ned?ZddWm|&=2GWoV4T)r$NE8XK$nSI}~?N`~bq(NG5%2 zSt%R)N&X1>K9ygiO;aZ?Qs!7RyFJ2nU4Ha#kT!aJ~V@-7Oz&9qmR+i9CO#_4t^=zh<6pfQ6 z*iF*?UFaj#(Cv~`eJMs6G}4iTe?Ev=TP}q>C)#HX;WqAEM01g2aK4Uzyw$9C~hfD1wU6q_h18Ke})kJMMXMbZVwGO9rCza>_vVm64htxNLJ1WtVFmSpvKp?o z(fazQPZX_VSM2-lnV&7+fXhF5{cXDezh3V~selJpiA!r-f$o2@!?nL7F+o@Hye?Bw7mz9ZCBlw&8K@S9+j--Xn&J51GwofI}mewM?LpG?0qv5#qH zhy;-$=7=1zKokf(P*Wl@kFV=1mi?LRy@jJ;NrxHU`?&-YhOREPI6~;*=(&}L;}8=4 zcRQDHE|lW@M`W&Xd&Jx4=Gg0YR&<^c>kbO7l#s_MZZN@=* e^63Ql&FSLuyD95E{w%xns7Xyqe}7DJl9S(FS)k1T literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/utils/__pycache__/conv_module.cpython-36.pyc b/CDARTS_detection/mmdet/models/utils/__pycache__/conv_module.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98465a80ddba29952b8ed74e35e6141848126949 GIT binary patch literal 4438 zcmb7I&5zs073Y^IiqdL#ZO0#p<1kK|t^!+II6#p)foms?+oJ6jjW-RzreL_7(TZA% zREHyPcSH4HEs#rGw5Jq7k$W!%dhM}?{tL@)8N*q!<)R#nt!&#HRiF_i4k^xXfU6(PYl*Z`}*7tH<8Oi7bNUu_G5Eq=hviJ z8Pb?r;x*<}#(`VDF0DtX8`*PXW>@yeVb0u~Ih7+D2TidnUDg;iE7Z-+M`rHHM&(>F zW{s**nUz)9pIg5-XKv-fYBz@ejNl!zAV@IHBAd#z$OCPN?+5tZ6Z_?D&Cypf*blaL zikt_92w0lP-fdl14vT56L&j;o>`yZCXu^Z7QVR4<8P8=pIMSol!|3+@lMzTl4mi-AQwC-_Ve@@0<^}pHJ{9So^r^3<)>vmr-ZBSy|UzjJougvg-}jX zOpcU2DJIGz_eOA^@_xdP>SlKUg|X~4l}!x}8LZrcLwbh}s-n=>^N(mop|4fw3f0iB zzZl{orup@cwui---vZ6|j}Us>$q|do?Q&9-+v73gk|#8&n%^E5jA!LGRvJ|6L=9Zi z$6OcDyd3Eq^p_VvjJ9WXtc#{+`KD)XSf16j#AVbc)Oh;y7fg8<3^ziP8OUBUwAtpT z#=vAP=AX1ehc{T8bxw@A8Mw|cV$SDIv1Sz}4mZ4Wn<+F4@ z-~x)H60xC`Q%cC`B;&urNWcEyLm?ZG3ePMyi5UYnJ+mB3v)WbUglC!Oz9^=X|HpTG zDSW!MR}>kkuao`9gP@%7Bpswk9yEU(Pi67(jUbi5VVY&Z9uF9rDYN&E0y*SoKtLlZ zlAs^FL%L5TN-jy)u9)&59q62-vAr@v0BQL&8ZKo|C-z@usY9Hl$F<9KrY@!9W!w@blDBBy2$z+JrhLBf< z8^#CmG?Pm$X+Fu~1OY@IhwO(#PG%EAPD?mu(M+kX%~_7R)Uj7cnek|lic17F)(hF!#^QV?8i%Z!Lwti4xNu-{H9AZrdS-c+rAu(4>5KBIcAEvN5&JhxmG#JN6 zs64aql(GAWiF%M=Fa@OZeoyxiv|S3?KLMABrXA%7o=tZNYV(Il7Y+2=Kxw1cDCM8f zT^h8F&aII(H%y~yG2_51UzK*%lFr-$*T9zQ>)P9>^>yuSUE8XR$gOsXT1rmUx6Mx-EXktQ=eU%&R=KK?ZeSg06`#Wp}hrTeoS*jjB~| z&Xt30+O3+<(RfVSHqm!;bPgN}xbxspz+C``0`56*DBv!FLjiXQ916J0;84I_0fz$a zd2lGB_8&~KgSXGz0}K3(%FM0Gnswz1u(&(7KC;mB>TDf-UPQTCtf z<{vb{3we!sqnD_rK6vQ1rq=qI3`Q?k9oBk`HHfaG=^ss`G*5LY0tyhtAdqexJ23EJ#Lhse5a zJp~5kFB3+*LQS33TvM&XR1WotQEh5=(Ou)kqE%1a!%-jy-j~WDPH(s3Dk*HQ>};yD zx^b!Y%DyCCq}g2~@)C*pQp7nO3pi6g7_emNq1Zt6>RaQGGm6{x9Qf z%JtDjY*Iff&y_e<;h|hH^@t4Md-GZxi_rk+ZX^M}C3&gOF*H^|QA3Pb4+8Zc(ytlz% zEMyoi6J2KJxHycAnMoKpvMQ5VkLxV9#ha4a8g!uo;@FFw#ZaX?M0m>{((A2Q{089Uw)#cEl5|DB|g&a zL>KMgmKsUA*0Io--$xDZqLBVOM*Kpz6J3t@SA02w=u;YTGjj?4vGLFiw6Rk=kbp{D z2RXjH3L1o4Y5ow$3gA>zC&h}x$=Bj_B5x9*?XTx(*DLe4b)D{Vb_rAyCmjF_`yBmC z$}?jn?vWi&o-;hM=lI;pGA)$!{AGC5oZhPm-FND6+SN1eS-ZPEpRkk=CW@3FEf5BL zCyIVEjk85dGm2P|Ah{`}l{XOupcrAOZK3!Pcd21*fdUE7vQs1=OA(}$2{kGZfD;Ur zgrDAXj*A`W8&CJGj^nu+>U*2w2Kp&a79tt;^IUvae~!n=!+wHK9q|v&qR(G32-l=pUxy>|#s^fipGUmHL! NJho$6mgjg^{|zN-i*x`0 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/utils/__pycache__/conv_ws.cpython-36.pyc b/CDARTS_detection/mmdet/models/utils/__pycache__/conv_ws.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f98faef7395d3fce743e03638c4495c2b86e5cf GIT binary patch literal 1380 zcmZ`(&5qne5bpMG+cO(BB#?w~KnNijp&eN-*cQP9(lvQ6>|5txiwVwxqi9m!GG!K6!A`0M%HHoimdlXKb!b!g#y>sj9hHnh=u0z@bIi8<&_U3z8nLjmC?1|vXNO6T-5 z`1Yy31N{;Qw}M%KV!j5D1mmB3B6&fURD>eBNOE7qf?be2ctHY^hrnv;*kic!mnVl} zWOn5E=`Q~GE);B0eNgiaKu>yV$;PX_>yffC#GJlll-R@)?h68Uct=^N_hFB|iSRMP zErgE%vcQFA=%m(nq30L}eyg5yeH$lJh^TCqZdNH?t_r1qDmQP3e%G5llNyFOM*e?q znNFiF;k;}LW8i!YmHrgrJ^+$w@Jf=V)O+t^^z7!Aql2w`C8i+?wwM89VSEKfkEsjg z2kL?PpaCePbRB`l8wQlnql}FK)Hh(uNW=$yA4%}^=dj4p-zT5nIqu?Rt4|6ozJ+NM z01$ecp6$PL_G&tdcA=fDv4-?L*Z?&Hl&fMbId>`Nkc~rw_MG$IhN78fMvC;8u-@%m zF<8o`*|LZpWmJCw&BQ2%5(f>wjzxrSzZ>&0%vj9m!>*#IE@oH89P3+Mt`3xPo>KX( zZSP>8v{Wu=j^;`v3Un3E&i;(#=NJ3=d!N7elD7wgYj-hnRlCT1FL6Pylgs6cnf_^)}3|KL@YRo)t4qd!+=Xn z&m;xe5Iy&p9g{RV3)}=h;&l(!dM8Bv z+4SapmTWLa;uW^l1iyK?kz8-={_rSGrZ${J(D$@ z&ZRK46jyOB)~qU??rWnUwq#tGvRL^%Exg}9xI}g*eA<+{jNYb#bH-U}WLgSsC_qrd zQ+f^MCV=9P>QZr?O%-M7{zS?JB>zfQ?pAd1F#UGM&5Wy58(F0;s03bVT`|Dbms~`> z%nD9T4N&QRDco&gZ2|{nk>)ch1YhX1P;v>xs@hz!sH8SU`PiR&3(~mD)2iaa-2;$h z%biXpihl#QacUcRnc1n3D@e^#L6>mEOLEYb;!_Tc#IvN%aE~lbA1sbkB+Jb(!9^1hUoj!Iw}^&f3iY^tJ)iz$Tg-#tT|6jD0cF z70;m;GsdcHTGbhlvPnf1sA;&;DhAraEgQI0p7BL5*zUQkK&zN$Hu0)vPA?m}0>EV* zN55=CR~`>$JMBuxC;0`bS0Tf*!_iDG`E9Ubx~i!d;;8?sRDV(NZ$# zxlwIOurA91=(00$A8&S*v=6}g4KQeD562|IG2S74up!ySeej&`2l%^W8z)4)4$(ub z;HQj+T^k#z^4Wn9mcZCLq9&h>A>D0e2mWVzv0!`mT~Xbh+J&FJ%FeRw^#*LEJ0hl3~B>(~j9NP##Bal&i!uz@gnH0`OL-I<-~ z*;My>onDk%!I&qMDRn^s1 zHTC$u-}l_d=jJNUe6k)saKB~!+nRb@v@fD0ma;5uY1Olu`CB!WeA`W1-cHwTx@g(G zQr~NODxX#Ex?R6H^P;64?Y?JeH}XbKv!Wl-CGEYZnzI-wYab)Nj8yfEuDoY8=g>c^ ztLU#v|Gb{l^XOkd|AJmb|DyCS>O*=7{fG2nUBi1xAJIqgKCHi>m-TV%P}8UN34Ic+ zBl;eFFUF4Q`}F;IAH!O8{Q%Z_K-OB;4gH{g2s4jk4Io>Zqm@xws`Mphl`$!WVCEq3vlK>-yQut6O25 zc6K7O^HHUaCk;2EdRRA6*sEU)ds|Wcdc70Z!`|k4_~;vtojLzjePD{uv*+MJWNXX@jxqW1+v@ToIJ<@hZ>X?;}HJsI(Huk&;h+2(O zRuUp*-k{x1qBJWdt*{qmrF39g>siSRwqo7zGN+YZ&q|#bw|uD0d>6gE|_$#^{x(+bIIl)IoI!NkXUe#x6kzlI_f3ow$cvjcgWMAHHfdR zY`&Y7+XJ&58ohv3lPW69_LQd{MmeE&4ov{CGM?Aso%rxgLJM&(pfR+%kS-AF=#Vmq zzqWhX%G;^x+C!iWb3j=3Yml#@RvT`9w;3#uImuREZrQNSEIVe`OsIn#J#z27;#o8Q zYS`M~$sfU2LMmByMOE#crQI_ZQyTW_YNMR_K@f-iClBxI3aa+R&BN2Rw#;p4;igj`%LwYne& z@MI625ePzJ*zy}dWplIlE=WUum^oIvDA?UUCxfsPBD#WUi7-c*6KJEMhLU5*&YKRZD-Anybf=${H=<}04TuV1WO(n)?MGppIpNi$;J1j; z0f71zrWgFK@}_qePA^X_KPSuI1u)%=kV0|?=DoXPVG{g-sLVJ=3%V;1Fk=X81~&cVmSNL5K|FfCZG;q<3>U* zWlnJuX`?|tQHn(O2PmxBUdZ3MMYvrfWUKW5$F z2)r_=3cCi!e;GYPDhYQ=N$?PO{`E}5lS7Dd$lNrH*E6RdUKhrTk!-k`+Ri*WTETYa zhGs3vJbD#t$vy*-`FdcYb{K4Tv2^Y!DPL9KbKIkrlsSXmgD*?Ne9L8*Qu_dYne+P(OGwFKAG_^gNn;d% zs9PFYU2il7697|OLIQ_id+#Nl>zsWJF;0Guvf&0|TyirrQ63psF^HqggTo!R*6-|k z{?!iSr(pZXSate>PvCnGHzx8zyOvtHe9%A=Fl~Ei{~o^MFX<)VGIS!w!2kG}hO>(o zLjdAvw1Q)(hqnhz0rT)@m|jTom(gxf>+_)+nVgJg)=Km)NSnWhh^0n=^UywK)euFc zHsUDwxov0Y;KZ(+*k0Y0QYRH_gnf5Sb6 zN>E%o%lof(i&f)P7CGzdXz;9Wsv9^XgXNlqaM$LUM~Lu7V0Y0^_II7#eyDCIb-N{m z-8S?bpfFTVp^ssBo7o23lp6_-5eU!mraDNYI2o8EvwH}c+r2@UX7*Z?<7JF%xwN?S zg7AvOj1ocbLyE+2zr`L&1(jv{>IqeaX4yG@3mFRPBB>(0`~RK)r?AtnxTl1MaQ+b* zx`w@bM17)od9*^DKotoJIXkz%ja=)V)g&Ij3SCk|TcVH_hV zl2wsj*!{BOIGcGTG)NPq_#uy;c~=C4<~e>WnP{z(U@X`yW@XDJLP{rS!3-iPvpayr zB|2xarQKPBE^(8n*ETO;A#;hV&2B^nr-19U11plGrlX^*ycufUiPzwAbb6w3GH(qD zqRm8(fIQz!5WojWk|4+-zQ6^)j_OY+31z`j6;(qH$Fp5kv#Y@RI4?p*txPE4I%SsH_=6OuC9Qr}y5XhZ4T_ig@?8cW)p775m;pa+X;p@{w&CXe zFrDpI)4A@vb`aw+wj+-hgqw@n8*>wS1ap6J;M_ywx#jWPlKIQ@JVD3Oc#c0o#{x1^ zGv*(ddAM5xN*46Y7&nJxVX?U70JuLBV znqF?%&38dS5r!+^)trgm*@6X00xG7gENQ?XEE?OA|GG)B&sE=%+@li_r&F1GYIJ@K zjjf?A7V~Bze%{N$mpVc>6QHRTxnM-NNW`Ip(x{wiU_~ttp zY>qK2`LOLhU*Yc2nw9l13G!TKb2gtF=nm+Se}wX}Ujcs`>;Dx+jIc#`szv-&;70pu z*`_jPEs%98GO`F2y>s+c<}3xe(*m_8f|*z;5{McU8U{H9+8o$ZAnU_iUjS=B#08mA z#taXLEO_g#-lSRt*HoBu!_Ue2CVI2Qjj+>)Tn1Zl2PU{5z`Kq*YwO6=_QP}?!u^g( zMMTY8vWn1iKMk04_&dtj^IOL4cOKlm-|bY%#9HuxM2kphof68tD?+J+_|Ai>KcgkL zM|i}yOSwv(kX8v2_7mz=xc5Nx`}{x2xYqXju*UeqyX_tqTb|oFn{L-a3B2dl=? zF1$rM=X#nEMy4)h4z#jq;=_cjCURun;3owvb4}FS$~^3F?SeM1u>Vz7uc3m{pmd3M z)|}Iwc6(xU%vU&Yn$-aw+dt8t+=t4tYmkMSdP%wVX_SgOVSA`+cE!H0NY#ssj8pa4 zl3>X%FD+JA#_CISSU!Q>mmvN%&OXBGC@UHiLymEbz06}`EpSyRNFaH`@iDMDL=6wDcZlwb% zQe$I!^Kda|r9J-7SJ=xcyKU3PEJ>>8@sK1^8BH0mrR`gCA7U%tfe_+PK!o8U!HQ@FLnkVvZdt4-_Hh?E&S5)(t8TkWo%log zFL`A^Y|Nc_cCKo%%JEP){T2Xid+7 zQyyIdj|AxjoZxrREvff(up zIWw1Bwv80O!%6si+R|wTeoU}O*v%Uh3zqERKs$N>0ygSmn{?@(pw2k?Ne>uh83&}& zi;}p6+uc@_xl7Er6O`&@QoqZr3igLYV=VD>*b4kNV_F z>S810N}nJC$vI=KY>Z*-7VbV)>!BBKQPtPo*r=-A@y1kUJD95PIl^1J8Pqhd`hg2+ zs?lyx1AMvN=@hv$>?T-97UXIXTv^^{`=2YmkWYU19A%GHcksejO|a@IMXC!7Ir2k4 z#c`5n{>yAN=F#&rx1u!E^t^`NLG7#Vs|p`v8vlyyc#ZK zulx}HQcIjT?O)gvRXw&D!ipPXb-AXhrk?%Pr^mOp%M_natz>xkR4JLLMWR%Aq`Y&Ze2rn0UB_DMmwdIbtgZITUEhq=W6DXrtNHR4=w0p(o{53Ntb5#!OZ#S=p*2 z^NydO&#E>P^nkR>X+{}8UT@veE;o9opr$~=c)>G%5uEXy!#TdcdMF>G#u3;38y=b+Tz0jlkm2VrK;qI}XW)oL$3%uix~9Mh*+i>KXcrnQ zz1a7mBWI*O+PR=Q$llPmjQa{+NA7>FAlnc9ze?c3@%byWFZQ2P`mfzf{}H~s^h)Si zv#~CMs@{||DthRjjt9w4$h3&U14tqdk`MYIX#<1_PDx+%VX~JVfbygVl)q<}T=t+4 z;7IUdxk5L(R=N_P?6w=(KM(^crqvtv>5Wh;r56XB_?Eeoy?B2Y?p=e}UjulHoV`ti z1`MY}2ok-kQ0zTXGpTuotXNEUI^m~5!iRLP-$7YPn}?nN&tciTfiIR(2PALVL)NlK z?2aGd`GvQ%w}NM%kI1K5XHBXt8zr!VstC&O*Il_*?`051QAg2@aEoPzx86hS*^G5F3MeI8z`5LweE&W`vC}IK)v>D i_2GL-`vG~FW}kHn?h8uFkSODMnCE$tr;O7tFa842?`ZS@ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/models/utils/conv_module.py b/CDARTS_detection/mmdet/models/utils/conv_module.py new file mode 100644 index 0000000..1c70d56 --- /dev/null +++ b/CDARTS_detection/mmdet/models/utils/conv_module.py @@ -0,0 +1,172 @@ +import warnings + +import torch.nn as nn +from mmcv.cnn import kaiming_init, constant_init + +from .conv_ws import ConvWS2d +from .norm import build_norm_layer +from .quant_conv import QuantConv + +conv_cfg = { + 'Conv': nn.Conv2d, + 'ConvWS': ConvWS2d, + # TODO: octave conv + 'QuantConv': QuantConv, +} + + +def build_conv_layer(cfg, *args, **kwargs): + """ Build convolution layer + + Args: + cfg (None or dict): cfg should contain: + type (str): identify conv layer type. + layer args: args needed to instantiate a conv layer. + + Returns: + layer (nn.Module): created conv layer + """ + if cfg is None: + cfg_ = dict(type='Conv') + else: + assert isinstance(cfg, dict) and 'type' in cfg + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in conv_cfg: + raise KeyError('Unrecognized norm type {}'.format(layer_type)) + else: + conv_layer = conv_cfg[layer_type] + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer + + +class ConvModule(nn.Module): + """Conv-Norm-Activation block. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int or tuple[int]): Same as nn.Conv2d. + stride (int or tuple[int]): Same as nn.Conv2d. + padding (int or tuple[int]): Same as nn.Conv2d. + dilation (int or tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + bias (bool or str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if norm_cfg is None, otherwise + False. + conv_cfg (dict): Config dict for convolution layer. + norm_cfg (dict): Config dict for normalization layer. + activation (str or None): Activation type, "ReLU" by default. + inplace (bool): Whether to use inplace mode for activation. + bottle_first (bool): Whether to apply the activation layer in the + last. (Do not use this flag since the behavior and api may be + changed in the future.) + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias='auto', + conv_cfg=None, + norm_cfg=None, + activation='relu', + inplace=True, + bottle_first='conv'): + super(ConvModule, self).__init__() + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.activation = activation + self.inplace = inplace + self.bottle_first = bottle_first + + self.with_norm = norm_cfg is not None + self.with_activatation = activation is not None + # if the conv layer is before a norm layer, bias is unnecessary. + if bias == 'auto': + bias = False if self.with_norm else True + self.with_bias = bias + + if self.with_norm and self.with_bias: + warnings.warn('ConvModule has norm and bias at the same time') + + # build convolution layer + self.conv = build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + # export the attributes of self.conv to a higher level for convenience + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = self.conv.padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + + # build normalization layers + if self.with_norm: + norm_channels = in_channels if self.bottle_first == 'bn' else out_channels + self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) + self.add_module(self.norm_name, norm) + + # build activation layer + if self.with_activatation: + if self.activation not in ['relu']: + raise ValueError('{} is currently not supported.'.format( + self.activation)) + if self.activation == 'relu': + self.activate = nn.ReLU(inplace=inplace) + + # Use msra init by default + self.init_weights() + + @property + def norm(self): + return getattr(self, self.norm_name) + + def init_weights(self): + nonlinearity = 'relu' if self.activation is None else self.activation + kaiming_init(self.conv, nonlinearity=nonlinearity) + if self.with_norm: + constant_init(self.norm, 1, bias=0) + + def forward(self, x, activate=True, norm=True): + if self.bottle_first == 'conv': + x = self.conv(x) + if norm and self.with_norm: + x = self.norm(x) + if activate and self.with_activatation: + x = self.activate(x) + elif self.bottle_first == 'relu': + if activate and self.with_activatation: + x = self.activate(x) + x = self.conv(x) + if norm and self.with_norm: + x = self.norm(x) + elif self.bottle_first == 'bn': + if norm and self.with_norm: + x = self.norm(x) + if activate and self.with_activatation: + x = self.activate(x) + x = self.conv(x) + else: + raise KeyError('bottle_first is invalid.') + return x \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/utils/conv_ws.py b/CDARTS_detection/mmdet/models/utils/conv_ws.py new file mode 100644 index 0000000..5ccd735 --- /dev/null +++ b/CDARTS_detection/mmdet/models/utils/conv_ws.py @@ -0,0 +1,46 @@ +import torch.nn as nn +import torch.nn.functional as F + + +def conv_ws_2d(input, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + eps=1e-5): + c_in = weight.size(0) + weight_flat = weight.view(c_in, -1) + mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) + std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) + weight = (weight - mean) / (std + eps) + return F.conv2d(input, weight, bias, stride, padding, dilation, groups) + + +class ConvWS2d(nn.Conv2d): + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + eps=1e-5): + super(ConvWS2d, self).__init__( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + self.eps = eps + + def forward(self, x): + return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, + self.dilation, self.groups, self.eps) diff --git a/CDARTS_detection/mmdet/models/utils/norm.py b/CDARTS_detection/mmdet/models/utils/norm.py new file mode 100644 index 0000000..d5687cb --- /dev/null +++ b/CDARTS_detection/mmdet/models/utils/norm.py @@ -0,0 +1,55 @@ +import torch.nn as nn + +norm_cfg = { + # format: layer_type: (abbreviation, module) + 'BN': ('bn', nn.BatchNorm2d), + 'SyncBN': ('bn', nn.SyncBatchNorm), + 'GN': ('gn', nn.GroupNorm), + # and potentially 'SN' +} + + +def build_norm_layer(cfg, num_features, postfix=''): + """ Build normalization layer + + Args: + cfg (dict): cfg should contain: + type (str): identify norm layer type. + layer args: args needed to instantiate a norm layer. + requires_grad (bool): [optional] whether stop gradient updates + num_features (int): number of channels from input. + postfix (int, str): appended into norm abbreviation to + create named layer. + + Returns: + name (str): abbreviation + postfix + layer (nn.Module): created norm layer + """ + assert isinstance(cfg, dict) and 'type' in cfg + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in norm_cfg: + raise KeyError('Unrecognized norm type {}'.format(layer_type)) + else: + abbr, norm_layer = norm_cfg[layer_type] + if norm_layer is None: + raise NotImplementedError + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN': + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer diff --git a/CDARTS_detection/mmdet/models/utils/quant_conv.py b/CDARTS_detection/mmdet/models/utils/quant_conv.py new file mode 100644 index 0000000..51dfb9f --- /dev/null +++ b/CDARTS_detection/mmdet/models/utils/quant_conv.py @@ -0,0 +1,239 @@ +import math +import time +import torch +import torch.nn as nn +from torch.autograd import Function +import torch.nn.functional as F + +# quantize for weights and activations +class Quantizer(Function): + ''' + take a real value x in alpha*[0,1] or alpha*[-1,1] + output a discrete-valued x in alpha*{0, 1/(2^k-1), ..., (2^k-1)/(2^k-1)} or likeness + where k is nbit + ''' + @staticmethod + def forward(ctx, input, nbit, alpha=None, offset=None): + ctx.alpha = alpha + ctx.offset = offset + scale = (2 ** nbit - 1) if alpha is None else (2 ** nbit - 1) / alpha + ctx.scale = scale + return torch.round(input * scale) / scale if offset is None \ + else (torch.round(input * scale) + torch.round(offset)) / scale +# if alpha is None: +# scale = 2 ** nbit - 1 +# ctx.scale = scale +# if offset is None: +# return torch.round(input * scale) / scale +# else: +# return (torch.round(input * scale) + offset) / scale +# else: +# scale = (2 ** nbit - 1) / alpha +# if offset is None: +# return torch.round(input * scale) / scale +# else: +# ctx.save_for_backward(input, scale) +# return (torch.round(input * scale) + offset) / scale + + @staticmethod + def backward(ctx, grad_output): + if ctx.offset is None: + return grad_output, None, None, None + else: + return grad_output, None, None, torch.sum(grad_output) / ctx.scale + + +def quantize(input, nbit, alpha=None, offset=None): + return Quantizer.apply(input, nbit, alpha, offset) + + +# standard sign with STE +class Signer(Function): + ''' + take a real value x + output sign(x) + ''' + @staticmethod + def forward(ctx, input): + return torch.sign(input) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +def sign(input): + return Signer.apply(input) + + +# sign in xnor-net for weights +class Xnor(Function): + ''' + take a real value x + output sign(x_c) * E(|x_c|) + ''' + @staticmethod + def forward(ctx, input): + return torch.sign(input) * torch.mean(torch.abs(input), dim=[1,2,3], keepdim=True) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +def xnor(input): + return Xnor.apply(input) + + +# sign in dorefa-net for weights +class ScaleSigner(Function): + ''' + take a real value x + output sign(x) * E(|x|) + ''' + @staticmethod + def forward(ctx, input): + return torch.sign(input) * torch.mean(torch.abs(input)) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +def scale_sign(input): + return ScaleSigner.apply(input) + +def dorefa_w(w, nbit_w, *args, **kwargs): + if nbit_w == 1: + w = scale_sign(w) + else: + w = torch.tanh(w) + w = w / (2 * torch.max(torch.abs(w))) + 0.5 + w = 2 * quantize(w, nbit_w) - 1 + return w + +def wrpn_w(w, nbit_w, *args, **kwargs): + if nbit_w == 1: + w = scale_sign(w) + else: + w = quantize(torch.clamp(w, -1, 1), nbit_w - 1) + return w + +def xnor_w(w, nbit_w=1, *args, **kwargs): + if nbit_w != 1: + raise ValueError('nbit_w must be 1 in XNOR-Net.') + return xnor(w) + +def bireal_w(w, nbit_w=1, *args, **kwargs): + if nbit_w != 1: + raise ValueError('nbit_w must be 1 in Bi-Real-Net.') + return sign(w) * torch.mean(torch.abs(w.clone().detach())) + + +# dorefa quantize for activations +def dorefa_a(input, nbit_a, *args, **kwargs): + return quantize(torch.clamp(input, 0, 1.0), nbit_a, *args, **kwargs) + +# PACT quantize for activations +def pact_a(input, nbit_a, alpha, *args, **kwargs): + x = 0.5*(torch.abs(input)-torch.abs(input-alpha)+alpha) + return quantize(x, nbit_a, alpha, *args, **kwargs) + +# bi-real sign for activations +class BirealActivation(Function): + ''' + take a real value x + output sign(x) + ''' + @staticmethod + def forward(ctx, input, nbit_a=1): + ctx.save_for_backward(input) + return input.clamp(-1, 1).sign() + + @staticmethod + def backward(ctx, grad_output): + input, = ctx.saved_tensors + grad_input = (2 + 2 * input) * input.lt(0).float() + (2 - 2 * input) * input.ge(0).float() + grad_input = torch.clamp(grad_input, 0) + grad_input *= grad_output + return grad_input, None + + +def bireal_a(input, nbit_a=1, *args, **kwargs): + return BirealActivation.apply(input) + + +class QuantConv(nn.Conv2d): + # general QuantConv for quantized conv + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): + super(QuantConv, self).__init__( + in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) + self.in_channels = in_channels + self.out_channels = out_channels + + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_custome_parameters() + self.quant_config() + + def quant_config(self, quant_name_w='dorefa', quant_name_a='dorefa', nbit_w=1, nbit_a=1, has_offset=False): + self.nbit_w = nbit_w + self.nbit_a = nbit_a + name_w_dict = {'dorefa': dorefa_w, 'pact': dorefa_w, 'wrpn': wrpn_w, 'xnor': xnor_w, 'bireal': bireal_w} + name_a_dict = {'dorefa': dorefa_a, 'pact': pact_a, 'wrpn': dorefa_a, 'xnor': dorefa_a, 'bireal': bireal_a} + self.quant_w = name_w_dict[quant_name_w] + self.quant_a = name_a_dict[quant_name_a] + + if quant_name_a == 'pact': + self.alpha_a = nn.Parameter(torch.Tensor(1), requires_grad=True) + else: + self.register_parameter('alpha_a', None) + if quant_name_w == 'pact': + self.alpha_w = nn.Parameter(torch.Tensor(1), requires_grad=True) + else: + self.register_parameter('alpha_w', None) + if has_offset: + self.offset = nn.Parameter(torch.Tensor(1)) + else: + self.register_parameter('offset', None) +# print(quant_name_w, quant_name_a, nbit_w, nbit_a) + + if self.alpha_a is not None: + nn.init.constant_(self.alpha_a, 10) + if self.alpha_w is not None: + nn.init.constant_(self.alpha_w, 10) + if self.offset is not None: + nn.init.constant_(self.offset, 0) + + def reset_custome_parameters(self): + nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, input): + # 0-bit: identity mapping + if self.nbit_w == 0 or self.nbit_a == 0: + diff_channels = self.out_channels - self.in_channels + if self.stride == 2 or self.stride == (2, 2): + x = F.pad(input[:, :, ::2, ::2], (0, 0, 0, 0, diff_channels//2, diff_channels-diff_channels//2), 'constant', 0) + return x + else: + x = F.pad(input, (0, 0, 0, 0, diff_channels//2, diff_channels-diff_channels//2), 'constant', 0) + return x + # w quan + if self.nbit_w < 32: + w = self.quant_w(self.weight, self.nbit_w, self.alpha_w, self.offset) + else: + w = self.weight + # a quan + if self.nbit_a < 32: + x = self.quant_a(input, self.nbit_a, self.alpha_a) + else: + x = F.relu(input) + + x = F.conv2d(x, w, None, self.stride, self.padding, self.dilation, self.groups) + return x + \ No newline at end of file diff --git a/CDARTS_detection/mmdet/models/utils/scale.py b/CDARTS_detection/mmdet/models/utils/scale.py new file mode 100644 index 0000000..68c37cd --- /dev/null +++ b/CDARTS_detection/mmdet/models/utils/scale.py @@ -0,0 +1,12 @@ +import torch +import torch.nn as nn + + +class Scale(nn.Module): + + def __init__(self, scale=1.0): + super(Scale, self).__init__() + self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) + + def forward(self, x): + return x * self.scale diff --git a/CDARTS_detection/mmdet/models/utils/weight_init.py b/CDARTS_detection/mmdet/models/utils/weight_init.py new file mode 100644 index 0000000..17d4988 --- /dev/null +++ b/CDARTS_detection/mmdet/models/utils/weight_init.py @@ -0,0 +1,46 @@ +import numpy as np +import torch.nn as nn + + +def xavier_init(module, gain=1, bias=0, distribution='normal'): + assert distribution in ['uniform', 'normal'] + if distribution == 'uniform': + nn.init.xavier_uniform_(module.weight, gain=gain) + else: + nn.init.xavier_normal_(module.weight, gain=gain) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def normal_init(module, mean=0, std=1, bias=0): + nn.init.normal_(module.weight, mean, std) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def uniform_init(module, a=0, b=1, bias=0): + nn.init.uniform_(module.weight, a, b) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def kaiming_init(module, + mode='fan_out', + nonlinearity='relu', + bias=0, + distribution='normal'): + assert distribution in ['uniform', 'normal'] + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias'): + nn.init.constant_(module.bias, bias) + + +def bias_init_with_prob(prior_prob): + """ initialize conv/fc bias value according to giving probablity""" + bias_init = float(-np.log((1 - prior_prob) / prior_prob)) + return bias_init diff --git a/CDARTS_detection/mmdet/ops/__init__.py b/CDARTS_detection/mmdet/ops/__init__.py new file mode 100644 index 0000000..5f6ad09 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/__init__.py @@ -0,0 +1,19 @@ +from .dcn import (DeformConv, DeformConvPack, ModulatedDeformConv, + ModulatedDeformConvPack, DeformRoIPooling, + DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack, + deform_conv, modulated_deform_conv, deform_roi_pooling) +from .gcb import ContextBlock +from .nms import nms, soft_nms +from .roi_align import RoIAlign, roi_align +from .roi_pool import RoIPool, roi_pool +from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss +from .masked_conv import MaskedConv2d + +__all__ = [ + 'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', + 'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', + 'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv', + 'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv', + 'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss', + 'MaskedConv2d', 'ContextBlock' +] diff --git a/CDARTS_detection/mmdet/ops/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52878e3cbe91fb5637bb64724018cb1f6cc9f9d6 GIT binary patch literal 917 zcmZva%Z{5c6ow(>LI@;;kV`M~0NIpkx*hedN|id*sI&9Nq8uiJ;)5e#s^*cp>bB3~ zZC8DTS=GnMC}~tIKArPppYz-JMG$ze?r+ndj%EF}j-FV@uk~C1(f|u=VWG`zY_S$P z%)vHmV~2IH%evTOJ#?9iebz^hc{a^&i2?e|H?boE3|N3eHZ*x#j4)&&j@ZcL9Wlm; zML1?-lXt}gV;19tO-$YsQ=GAxj$P=32LtdSfFX<^gfT=gff%MRgE=hV5j=(`tC-E< z>9w^y`(tbSOHX<4^BY;=J6V2H!$W+PW@|OMlyE1~CWpfZHUE!jW{0~;U*yLtDMeAP zRD7zKd1ogD4hziZ0T@F(BS%wwaKMk{XrENF;G12urLBCui6;NvycIHAFI&nfu~u#^ zZyK&6Ex5YX8&RxE)u)vyg<9_I9Vqvqo)*LFVuiAR4>C)|Ct24js&@*%A&CpEQPaz` zUgtpPcMesc549Q+MudJoYcm(V732z`P_7!W1|x^dN%;A?zdE|>#Z zc~tGN{G{%`)tIx&zM2|GK8;0V)>t%VO+#wU255F?eulB2dR(8I;QWVG&B+qs{vx@R zn4fEwtLJew%IE8!u;j@C0)sSq4B)! Q+u?rrj&Hkm+wSV|7gFr*bpQYW literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/dcn/__init__.py b/CDARTS_detection/mmdet/ops/dcn/__init__.py new file mode 100644 index 0000000..165e637 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/dcn/__init__.py @@ -0,0 +1,13 @@ +from .functions.deform_conv import deform_conv, modulated_deform_conv +from .functions.deform_pool import deform_roi_pooling +from .modules.deform_conv import (DeformConv, ModulatedDeformConv, + DeformConvPack, ModulatedDeformConvPack) +from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, + ModulatedDeformRoIPoolingPack) + +__all__ = [ + 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', + 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', + 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', + 'deform_roi_pooling' +] diff --git a/CDARTS_detection/mmdet/ops/dcn/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/dcn/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00eb1ff4f4d566d85afd1156de331af4e72e9b87 GIT binary patch literal 665 zcmZvZOHRWu5QgJ4&9iAM+6p!tAe%;lErLq0VF9XO=ZzI*(l#QFqohUZVK@L+@|G1> z=n9Vu>VqcAr}23H@$boX82WdcMe<=9#+R}8_$r>LuK@)xfD{HNlyH-p1lMNK;uf`Z z+$!4KrZ#t|!(Hlfk9s<96+RDWz&o_VLmH}F8*FgE1rK}(paUUv=RWO1@70L<-$Wf1 znKFQECTh-7QNGI&7qBXlWd>|79T9o0YwCi>FEY zCeKe4;YQSw{cY)sc${wg%i(cGx41lNu@m5S-B!1%9fXZ=5gr0ZTn7j>e4}n5EQRN& zM@O?&nJ#ltHe=XLV^?^h0EeLUbf9OJ9ZIzg?Ld1$8~+M*i$ibHVJs;M#y*WYM2CpY zBwh%fT`89H^(rajbPY)pH_nJ*a|S8M6sY)#(cPj9*k NebONp=FqYUzCYwcyDR_z literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/dcn/functions/__init__.py b/CDARTS_detection/mmdet/ops/dcn/functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/ops/dcn/functions/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/dcn/functions/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1ec30037f1be996791d694378eab0e08627af54 GIT binary patch literal 157 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnFE9O!{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*p5~>!&2=>8F+EfrX3p~=Mv-x1PJmEe8`XRxlaLd;x$K~@>Tb&xRxj-0Ww6_R6nM>w!6Ois`=j1 zQuD*_4&$Hvz_R{nEj%vtdnocD1h+WLtRB7XjE(G`%}94LXXN%==q~p%Z{+uUW_@OH zp9f!AJV@-B(+f}!c?0!^u7^pGHhRq@gluv4rPXboVg+lj%T)O3B=48QB8Top<9H}e zZFHfw%9BAMMp3`WA4mNOkNfn^{0X4kLy@;36066!)w2_ax5%!q*o^gD)U;L5J?PpZ z=ss`4g0@<}ujl^+)6Z=tQwFgX|rOLT{$y{VU4s^w$mlF zIOy?Ed=z(vnWh+*wzz|t>i2<-8J1C7k$)*ypRxQ;t`ab=|A$eqHzLI(}&y zR;}3@eidzgxOYmvcJls%C*!1FCcN|h{Z5(WvJjnEc82+QQoh?cD9X<7r_Vcvxh#{I z@96V6d2=|r-7m5Z+QyyHM3x;sd_3eyXFo3chjMt52y&5A2SU(d-7V$t;Yc-q6=##= zfe?jIJ~>!i3L2z*Sqe;`f^p3GFh5WsA7(MRrt%L&F&Rs>W?VAf&yuKaY8g8svO}%O z_;C_p(r7>KA05Ynt45w2N6BbhJ{3N#(vFHriJupdiOSob3}mE&q0}C(F6Q%u|i^%#2N{5c5wj$qqVter9TnT!f*YXC2pesyDxSRi&1hLGCz1a ziSyn5Qy$A*IWFYxXvC8e=L>6%zFUmtF7M~NgBtXxoq>F3{8R;tM?u6#9)C)sG3)R%$(T zruNjCy4-TDsfXfE1K@_qTc!Psea-SaKrj$c*{R2w##K1is}Q(iz|}8HpWB2hj0|X$ z9B2)w68|U>1}pSVJ*@%5Q@?DcEy6u8=v98@Vsx9ntK4*n5USrgf-GQ@`mJ=O#@LD; zZ>6hseYLJr6j-mq**dUC)&RL*GqJDN#>~?IYhS3>CY+je=}j@<|Fk}LY76Ms1)!1o zZxA9H_ z|4=m%EeaWB!=uC?>e_-^KF4zXAyJXoqDjG@)+^!DyLs*$<5d+vr(YzfJZ&F`V_Zjfk;|zjLrH1hh8c&ck;w}0zoQEMolKcR|vfKFcS!h6GZ)i}t z0MG`Yqrqbn)Lek*vTj)?xUrKf=KyLxd!M2ZJ;aj~Sfj#w~ zh?1~jgFEeQYsT(dzx^F>hB5FCdkZa7psETkSsZ*1es`V5f6F!|Dv+{xn6K9BDv^eu{Y^3!v+ zc9A7^?qo$j&g5Mv=G&9^C@Ctk358Zemz|;Plts~zqd3btZ=0yyIW8s{@9Zbw(XL`o z;BDly$`f&ZkcbxP4#q2I6psa+o>-zbH!VOp4hXT^Hr_~%xIhxb87k}_#(AD(QneO2 zjqzSE%niRe;8NuSRT!oML$J{cTOu|w*>6xJVb-#nY?*DMgzPHHbwqZG{~PS&+JD%m zy{@m1$($Oozo}}CPN^HHFp6^EFN#z%ibe&WfXO3}a`b2tXLV0omZ1GUP_tEwIc~$b-{4bJMN}pTm$`@pNj2z+R zP1!r!g+3iIJ#@AkC>ojnf&#o)5fBB*bPfvaKzAtri`>ei{1zJF){`FqeSVE&mIm_U zG6c7|X+!pq6_T{IWNAZW_@d5YflY7b=}guyTWPxjijff$<54cEIdcBi`Rh`-oUYIq zu%HFzvw?eJ#g?uaJ~aI1g6CY#f10(?Pw_;@6uglOr-r3r{p-3>N0e~J0?Hl5^2l(3tBrYO%66ST{5}b22hPzh--WQ+I)S1j z3Q^t^7ZCGx0<{X@5!Bv5S!E|TUgKmhWmDu~#Jt+WzJ)>&TQ*CR1=$ZhlUGeWWIGP! zPdfJ?jr<|hZIq8Rw5IPkTEop_+A2NVeQcc@)fpAMg0dvA1-u}D*S6rrxb0M48tTl2 z-o``dUS;l94x?Iw< z|E!$3h4>-0c1gTLf_Sla<2C0#XHfAe4cD2;~A^LOD=%B@q zA$kp+I%xOVL0y@;_MsQNgd*G@Q6#ln$Piq3fJwPYy2u8a@DB0|^dU%}+f#v~mP4oowfV(;! zaJMYbK2mcKKXfDx2r%Cp35Pg$WK%m6j(k8AK0juC!gNQ_2j2Bex~I?R?6yRw)om>R zmQF#jyw{$SBQd%15A|J3dB;Ou9;(nVVsxe*_nKx14eufDU>K<{g82ca|0{~78sf?( zfV+;n&{bVd-hSPoEQTh^x`~DsTv}I2bOUjhx;`XvkHkkL&X|y_Sg=gHl4e2*ff>r2 z3H><_TCBa@-qaxyK1>MdE$iHSC!UlAd8wd;s<&wx^L$66CXqYHaWZLc=mt=lPED}& q;_*0ps#**2R9!q5Zs%F6!aHUW`j3t!=_o_&A`)b0m9Kv8vws8fNp%?j literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/dcn/functions/__pycache__/deform_pool.cpython-36.pyc b/CDARTS_detection/mmdet/ops/dcn/functions/__pycache__/deform_pool.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5f1e0a76d9459ba6dd3ba01c7c9906a7661ecdf GIT binary patch literal 1769 zcmZ`(&5s*36t_Jenas{4>Gsk>1-l?2g3w054WYJ(FZIBxg4+nm8phsDW;5fd?ND|# zxsa9kTljZ4`h>&*Eho-B@t%{=7O1WF^Yhp9v)}LeqdZR^Je!w44+;65yih{WAA*>V z0R$0rNeXP)k~XYhl!!ot%b*F1kdm*6h(vr%L@e1RC?XlvaWN3|TQVKozgy|&^Ba*5>wdZC)(3` zXoO8cWA%Xu?vix|V_BcUIZ+>N!gcbL_sE74^(C7UKtOp!~D3{z+ zZ-E9o-Ss(IPAa%o)BDsFYz8ke4Hwd%HOhFz(5%g3f{x~1fu)Er0M&@bo4 z3OD-H7^#sLUudng8!U_`gS&mI>?7RP$ks}+osG=A?4%pE@+p^1XD>BIv^d^V$!N9k zgo4m}GaM-PwA8}oT0U7Vv^4x&m%`mLI}S=oH}q0a>JT1muuLGU*9`qb+RlHuW9o|3dVe&^-HkHdl@O5U@SJ zT$Sytx)i0EnNFEm(+Fv$Pm`@aQ=OTKs-2zfvIlz4)8?eRbc5Xu(JS*l0Lf#zuZO6y zHvmJH(h1a%j<98i(8lcQ=v4`GvNKy@wqim1crBkK*PsB&h)FF4Dgzb23)XBMtdWkv zIhywSG-fUyu(!L4kK(#Ai#i>%Vs+@)7iE5nzI3%MoDTl56P)~>YE*^h?+~T{KzlL7Dj&KhFDS!zA3l=T#tQdOTGi$eROD`me zXNo?;Ey>=Q51?nz6k!<741$_MI8zqV%m+0AygGhmK=*d0p^m}GsU8681YjDwg!8s+ zBB?Qk`IA+-+|6Xh!go+L($1C8$GH1lgy#->?Fh&4`$qc)vm;?_2tOdy z?4F;QCi-m{FGfBzC*{g2d<>0TbHs-X^kGkCRx KW-JRP_x}cqRLCL# literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/dcn/functions/deform_conv.py b/CDARTS_detection/mmdet/ops/dcn/functions/deform_conv.py new file mode 100644 index 0000000..6af75a7 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/dcn/functions/deform_conv.py @@ -0,0 +1,181 @@ +import torch +from torch.autograd import Function +from torch.nn.modules.utils import _pair + +from .. import deform_conv_cuda + + +class DeformConvFunction(Function): + + @staticmethod + def forward(ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format( + input.dim())) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + DeformConvFunction._output_size(input, weight, ctx.padding, + ctx.dilation, ctx.stride)) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = min(ctx.im2col_step, input.shape[0]) + assert (input.shape[0] % + cur_im2col_step) == 0, 'im2col step must divide batchsize' + deform_conv_cuda.deform_conv_forward_cuda( + input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], + weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], + ctx.padding[1], ctx.padding[0], ctx.dilation[1], + ctx.dilation[0], ctx.groups, ctx.deformable_groups, + cur_im2col_step) + return output + + @staticmethod + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = min(ctx.im2col_step, input.shape[0]) + assert (input.shape[0] % + cur_im2col_step) == 0, 'im2col step must divide batchsize' + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + deform_conv_cuda.deform_conv_backward_input_cuda( + input, offset, grad_output, grad_input, + grad_offset, weight, ctx.bufs_[0], weight.size(3), + weight.size(2), ctx.stride[1], ctx.stride[0], + ctx.padding[1], ctx.padding[0], ctx.dilation[1], + ctx.dilation[0], ctx.groups, ctx.deformable_groups, + cur_im2col_step) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + deform_conv_cuda.deform_conv_backward_parameters_cuda( + input, offset, grad_output, + grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), + weight.size(2), ctx.stride[1], ctx.stride[0], + ctx.padding[1], ctx.padding[0], ctx.dilation[1], + ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, + cur_im2col_step) + + return (grad_input, grad_offset, grad_weight, None, None, None, None, + None) + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, ) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + 'x'.join(map(str, output_size)))) + return output_size + + +class ModulatedDeformConvFunction(Function): + + @staticmethod + def forward(ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError + if weight.requires_grad or mask.requires_grad or offset.requires_grad \ + or input.requires_grad: + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty( + ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + deform_conv_cuda.modulated_deform_conv_cuda_forward( + input, weight, bias, ctx._bufs[0], offset, mask, output, + ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride, + ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, + ctx.groups, ctx.deformable_groups, ctx.with_bias) + return output + + @staticmethod + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + deform_conv_cuda.modulated_deform_conv_cuda_backward( + input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], + grad_input, grad_weight, grad_bias, grad_offset, grad_mask, + grad_output, weight.shape[2], weight.shape[3], ctx.stride, + ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, + ctx.groups, ctx.deformable_groups, ctx.with_bias) + if not ctx.with_bias: + grad_bias = None + + return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, + None, None, None, None, None) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = (height + 2 * ctx.padding - + (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1 + width_out = (width + 2 * ctx.padding - + (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = DeformConvFunction.apply +modulated_deform_conv = ModulatedDeformConvFunction.apply diff --git a/CDARTS_detection/mmdet/ops/dcn/functions/deform_pool.py b/CDARTS_detection/mmdet/ops/dcn/functions/deform_pool.py new file mode 100644 index 0000000..65ff0ef --- /dev/null +++ b/CDARTS_detection/mmdet/ops/dcn/functions/deform_pool.py @@ -0,0 +1,69 @@ +import torch +from torch.autograd import Function + +from .. import deform_pool_cuda + + +class DeformRoIPoolingFunction(Function): + + @staticmethod + def forward(ctx, + data, + rois, + offset, + spatial_scale, + out_size, + out_channels, + no_trans, + group_size=1, + part_size=None, + sample_per_part=4, + trans_std=.0): + ctx.spatial_scale = spatial_scale + ctx.out_size = out_size + ctx.out_channels = out_channels + ctx.no_trans = no_trans + ctx.group_size = group_size + ctx.part_size = out_size if part_size is None else part_size + ctx.sample_per_part = sample_per_part + ctx.trans_std = trans_std + + assert 0.0 <= ctx.trans_std <= 1.0 + if not data.is_cuda: + raise NotImplementedError + + n = rois.shape[0] + output = data.new_empty(n, out_channels, out_size, out_size) + output_count = data.new_empty(n, out_channels, out_size, out_size) + deform_pool_cuda.deform_psroi_pooling_cuda_forward( + data, rois, offset, output, output_count, ctx.no_trans, + ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size, + ctx.part_size, ctx.sample_per_part, ctx.trans_std) + + if data.requires_grad or rois.requires_grad or offset.requires_grad: + ctx.save_for_backward(data, rois, offset) + ctx.output_count = output_count + + return output + + @staticmethod + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + + data, rois, offset = ctx.saved_tensors + output_count = ctx.output_count + grad_input = torch.zeros_like(data) + grad_rois = None + grad_offset = torch.zeros_like(offset) + + deform_pool_cuda.deform_psroi_pooling_cuda_backward( + grad_output, data, rois, offset, output_count, grad_input, + grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels, + ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part, + ctx.trans_std) + return (grad_input, grad_rois, grad_offset, None, None, None, None, + None, None, None, None) + + +deform_roi_pooling = DeformRoIPoolingFunction.apply diff --git a/CDARTS_detection/mmdet/ops/dcn/modules/__init__.py b/CDARTS_detection/mmdet/ops/dcn/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/ops/dcn/modules/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/dcn/modules/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e36b0a03f743db1567c87d2b63f48cc2f1314e6 GIT binary patch literal 155 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnFAx2U{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*p5~>!&0GdHE@&IjP0^@$s2?nI-Y@ TdIgoYIBX!Ib|4#yftUdRFHt%f)jWC1V5y%oHn>{_K7d6TWYOl29~|J6_uS;U8>5=%FHk8&1N%v z=hM^fzqT3sCtG`>fr33K`g8 zn`<{Z@22vpg>Ku@b&;Hmdy*+acE9EtIun;9SFn;5@_Cr#|L1rU7U(Z}qR`8~y880srhbZm|=r)ert=eZtJ_ zP4u=%R>7dKb7&Nt9HdzkpLVk>8K}MA&GurjqkQi;*%Rr*RHes*WbgR#UQdq3=QLsR z#%P@XU;k)tYgdiW6DjF{bPz>pmgZ5UTR&1Nk$F1Gej?>aYL8C4o9p_DNV>7Qog~lD z^|M4m+(@O9M0+ZisYtYc-W4LvdO8p(+yskiuMk>aFC3-|Ribjp$}(NM-<92Ak|$ET z`AEj6+B-@zMS?s^(%xyVH)Nuc96njjQ|(YYDjA%}2IP=UA}u1|r51|9+?K7v>|&X$J>>3Zp)dxqQhCh;H81!8l7z?6)0xi-vio`5ETr;-UN; z1`6_d;%#rE zKZFWoBqQ;ahu^kK(LdDQ=;Q>Rv?jUsq2i;i6yL;JiWX#ilTS8Rqv_@~_FhTv$&6WM3eN$-V|RRkl<@pG{P2TDY>n1BXd%@*MlfWr%-r?L}!p9y`XC32> zWn%OWC2d;S`UpO_7Uj1wq1}`unEVD2av=$fW!fVExho16ryNlHml9XyDY{OW78PDW z`40K8=q#MZ>B^qc-WlAZhXhg_igA2%GPK|K(G=|ouR&Zp@SPAez%S&J8_yc_Vw96F zQM-qx_CR0-czqwSYygdV&Z1`^1AgyHinPM-zW`3VlSGapX$-=RzgNCgIMExJ_A|87 z!4a6Z)I>KwF}k~^!Xi$6*`^h~O5|%qjAM`qOxQ`%q*{M*uI5O|kG_j$wh638GjNP= z#TZ*YMg;U;ti;*+#iE9)#IF*|QEU<@R;Tx}*7++sRnQsNpvuHHm5P(QWu@Xx5rh$H z6wqKwVO2BLNocB*`m~0TM&9hV@@xGKQ=|B0h2kwMl&JOBE`HswOZwwH+3&U+wx|H(Z2^$+XV~^#|BX5e@;$0c8ZV4+0|CBU(ZrJ}eO*Ywmo49=< zCj76&{EZdNkBlP_E|h>T@6ZBwY2Z?@{}aAk4fcCi*z;mGctOAr1uU?H{jvRPduGp^nVWNz>Gsq)vCOzN z^DyGjeHq_elnWl=5-ueOV>D9;;vJ$D#UF(>Zp3e6F9V3Uve_aXqQ*0t6BcMUJ{@Og z+KzEW^;Oy%j#Ba5slYD)f>MFDai!JP=uXEP-Id>>Ew3s&{(_HXwhQ^LDez2sc;zZ< zE>gthY2dl)5OVA}?Mt;rS%=VAyHzA|V~fCCnD?{H0A^=*(cK>wx77V{o(@!TUEetw zXXZky_CD%q~gZD65X^a_V}{`M&B`y5060 zA00<`pJVI;wv%j>e~hg5Ap{eA%4U4bITMy>Os!dC+~DkIOxVJC#DtUZ)Lu2l4q9B% zL`ySqP@3Xo}V&Hn!hqHXA!wr>*<<%&c$czc{GoJm(m`e@?Ug z6WOHLs|)nssIxhD!3tiom6cj6##xDS*eESXyKF!@r48wp4rH@*1v_cUS4%6iN*67y zvI*&xEy%WD*bm*!V(PO?c6hPtcfQ89_8p^BqvqFec*2-V~)(LR;p z<0#9LsnV@14+|M(O1F<>zL-}!-aL}EdQU~O`7{Z!L`bdLGwPu##K_aON~VXhgQcZU zqDvwm(Ie3(agD?P0`~F3FrG$Ah2h`WN59-V&S%NXklE40MU?Hu4@IQ*)I3*vvzbVW zBrYa-R+abix!MykYPncU6O=3Sl4A^lJtdqhvskiFDHrBo**BG_U zeP}OB9)!tkUObev&gDD1>bh+C3^cVZq9W2Z{8(u>KRi@PA+OUEMm$R@O?di)NQy6@ zqUZ<=M{aYA2fWYEZhY+Mwfe}GE_C;06H?Ocja+h}ESe>7p>`NnmyNO?hNp{YTJ@OA zmb94e)&`A}=`;+t`x=i?W2O#6kHBqd50W3ppnl46l!(i80q zG3o2!{W5w^W-~MjdExS|`{02K=GDX>523eoRP#yvS zF)AoKX{+$kcIlVF%31*#%V6nW7<*9bbVNhge}+$Tc6#s|hEaam`3u$$X{D}kZn88; zduiXG47IpuxwdXG*wNfKS0w39-a%+KC)s=&#mV`9kmQSK9StO1jeLOwnNA1UVir~| z7#_w-uQB#8-XO{r2eboRIZCt}&F4ubB;m4l-?AG{QMcWFBh!_mfDCdL|?;2hWoLM-sHluhA?Gm7Hp85^FxuRvxe2a}z@5 ze57t%k9BN>j&A`&ESuYWfG`Sp%j%;OAS8s6TLsWn$;J+2n zKjkNu8Wz@{sN9eT=&=QC53F%EShL0!qXRvIG5CiUrX9kX(n@_{-DFGq>Ei{V)}_%) z^1*KDq(KShCBIuX%7&>CzL6g{SN5mdkMS?J_Adg#cZ`=+PSpcxfQJlK=-^GI5=kik zQz>&ec@h*( z20`T)skFiKwPPCGd;wiwMONhCtr|BCcHS^S8KZfV2*W9v18v?jzQImeTKZ;NbJ^9dA9H!T-uKRV zm%Xz8#653E&TFlmU1vev_RNYAFFfX_L4A(CImZyI4Mh_?uzQZ7XrX0u4imZFMP`uX zf+lv2IR7{C!(FtEu2=l<8uc58CIHnwk;E)gCqxkjeYcz-zfQU%5<~~`Hi>VL_$CPo zWqF5$3E@2|eH%hI?@x|q`9$buJ@M$;XJCRiXg;EYad$&DA&5b~OdYR~_zv}U>P4t; z9gr89B7&N_2f=*8iZ(F?xWauRR}8=-#3FsL2yusv($E@KJo3_0gYe^oLZQAPlmMaj z|41ls$6if4uAv>bbPeqg&8=z2{p2)Q0xs_3!xB&L_0tA7vr9b{|McZ|(OiA0e2+@z z_3#NfM}D717&m>DN^e4pnpd%o`~mg<5JKDJPFslI^3PwgA`j*+kw)_#QpOKL$)I(%NUv|5Ug!lYyNxl1{mUfC<#>drz@A}~)GoNwl xj@W*&8G6^QIZI-`1FBc1fszdwR(D2Js9Uek%y-{ivZ + +#include +#include + +void deformable_im2col(const at::Tensor data_im, const at::Tensor data_offset, + const int channels, const int height, const int width, + const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor data_col); + +void deformable_col2im(const at::Tensor data_col, const at::Tensor data_offset, + const int channels, const int height, const int width, + const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, const at::Tensor data_im, + const at::Tensor data_offset, const int channels, const int height, + const int width, const int ksize_h, const int ksize_w, const int pad_h, + const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, const at::Tensor data_offset, + const at::Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, const at::Tensor data_offset, + const at::Tensor data_mask, const int batch_size, const int channels, + const int height_im, const int width_im, const int height_col, + const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, const at::Tensor data_im, + const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, + const int width_im, const int height_col, const int width_col, + const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, const int dilation_h, + const int dilation_w, const int deformable_group, at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check(at::Tensor input, at::Tensor offset, at::Tensor *gradOutput, + at::Tensor weight, int kH, int kW, int dH, int dW, int padH, + int padW, int dilationH, int dilationW, int group, + int deformable_group) { + TORCH_CHECK(weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + TORCH_CHECK(kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", kH, + kW); + + TORCH_CHECK((weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", kH, + kW, weight.size(2), weight.size(3)); + + TORCH_CHECK(dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); + + TORCH_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + TORCH_CHECK(ndim == 3 || ndim == 4, "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + TORCH_CHECK(nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight, + outputWidth); + + TORCH_CHECK(input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, input.size(1)); + + TORCH_CHECK((inputHeight >= kH && inputWidth >= kW), + "input image is smaller than kernel"); + + TORCH_CHECK((offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, outputWidth, offset.size(2), offset.size(3)); + + TORCH_CHECK((offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + TORCH_CHECK(gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, gradOutput->size(dimf)); + + TORCH_CHECK((gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, outputWidth, gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight, + at::Tensor offset, at::Tensor output, + at::Tensor columns, at::Tensor ones, int kW, + int kH, int dW, int dH, int padW, int padH, + int dilationW, int dilationH, int group, + int deformable_group, int im2col_step) { + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW, + dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane, + outputHeight, outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + at::Tensor output_buffer = + at::zeros({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}, + output.options()); + + output_buffer = output_buffer.view( + {output_buffer.size(0), group, output_buffer.size(1) / group, + output_buffer.size(2), output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = output_buffer.view( + {output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), output_buffer.size(4)}); + + output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step, outputHeight, outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset, + at::Tensor gradOutput, at::Tensor gradInput, + at::Tensor gradOffset, at::Tensor weight, + at::Tensor columns, int kW, int kH, int dW, + int dH, int padW, int padH, int dilationW, + int dilationH, int group, + int deformable_group, int im2col_step) { + shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, padH, padW, + dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, + outputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), group, gradOutput.size(1) / group, + gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view( + {gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)}); + + deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane, + inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, + dilationH, dilationW, im2col_step, deformable_group, + gradOffset[elt]); + + deformable_col2im(columns, offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, at::Tensor offset, at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH, + int padW, int padH, int dilationW, int dilationH, int group, + int deformable_group, float scale, int im2col_step) { + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH, + padW, dilationH, dilationW, group, deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step, + nOutputPlane, outputHeight, outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step, + outputHeight, outputWidth}); + gradOutputBuffer.copy_(gradOutput); + gradOutputBuffer = + gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, + im2col_step * outputHeight, outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize / im2col_step, im2col_step, nInputPlane, + inputHeight, inputWidth}); + offset = + offset.view({batchSize / im2col_step, im2col_step, + deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight, + inputWidth, kH, kW, padH, padW, dH, dW, dilationH, + dilationW, im2col_step, deformable_group, columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = + gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_(gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), 1.0, scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, + int kernel_h, int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, const int deformable_group, + const bool with_bias) { + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = + at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view({output.size(0), group, output.size(1) / group, + output.size(2), output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + // divide into group + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view({output.size(0), output.size(1) * output.size(2), + output.size(3), output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, + at::Tensor offset, at::Tensor mask, at::Tensor columns, + at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, + at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, + int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h, + int pad_w, int dilation_h, int dilation_w, int group, int deformable_group, + const bool with_bias) { + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, kernel_w, kernel_h_, kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", + channels, channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = + grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, + grad_output.size(2), grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, weight.size(0) / group, weight.size(1), + weight.size(2), weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), 0.0f, 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), + weight.size(3), weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, input[b], offset[b], mask[b], 1, channels, height, width, + height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, + stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], offset[b], mask[b], 1, channels, height, width, height_out, + width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, deformable_group, columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view({group, grad_weight.size(0) / group, + grad_weight.size(1), grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), + grad_output.size(2), grad_output.size(3), + grad_output.size(4)}); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("deform_conv_forward_cuda", &deform_conv_forward_cuda, + "deform forward (CUDA)"); + m.def("deform_conv_backward_input_cuda", &deform_conv_backward_input_cuda, + "deform_conv_backward_input (CUDA)"); + m.def("deform_conv_backward_parameters_cuda", + &deform_conv_backward_parameters_cuda, + "deform_conv_backward_parameters (CUDA)"); + m.def("modulated_deform_conv_cuda_forward", + &modulated_deform_conv_cuda_forward, + "modulated deform conv forward (CUDA)"); + m.def("modulated_deform_conv_cuda_backward", + &modulated_deform_conv_cuda_backward, + "modulated deform conv backward (CUDA)"); +} diff --git a/CDARTS_detection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu b/CDARTS_detection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu new file mode 100644 index 0000000..fd56016 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu @@ -0,0 +1,866 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ******************** + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; + +inline int GET_BLOCKS(const int N) +{ + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +template +__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const scalar_t map_h = i * dilation_h + offset_h; + //const scalar_t map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +void deformable_im2col( + const at::Tensor data_im, const at::Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, const int ksize_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, const int parallel_imgs, + const int deformable_group, at::Tensor data_col) +{ + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *data_col_ = data_col.data(); + + deformable_im2col_gpu_kernel<<>>( + num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, + channel_per_deformable_group, parallel_imgs, channels, deformable_group, + height_col, width_col, data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, const scalar_t *data_col, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * + 2 * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +void deformable_col2im( + const at::Tensor data_col, const at::Tensor data_offset, const int channels, + const int height, const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, + at::Tensor grad_im) +{ + + // todo: make sure parallel_imgs is passed in correctly + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *grad_im_ = grad_im.data(); + + deformable_col2im_gpu_kernel<<>>( + num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, + ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + parallel_imgs, deformable_group, height_col, width_col, grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, + const scalar_t *data_im, const scalar_t *data_offset, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, scalar_t *grad_offset) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * + batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * + kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + +void deformable_col2im_coord( + const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, + const int channels, const int height, const int width, const int ksize_h, + const int ksize_w, const int pad_h, const int pad_w, const int stride_h, + const int stride_w, const int dilation_h, const int dilation_w, + const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) +{ + + int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; + int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + scalar_t *grad_offset_ = grad_offset.data(); + + deformable_col2im_coord_gpu_kernel<<>>( + num_kernels, data_col_, data_im_, data_offset_, channels, height, width, + ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, + height_col, width_col, grad_offset_); + })); +} + +template +__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, + const int height, const int width, scalar_t h, scalar_t w) +{ + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, + const int h, const int w, const int height, const int width) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, + const int height, const int width, const scalar_t *im_data, + const int data_width, const int bp_dir) +{ + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) + { + //empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + else if (bp_dir == 1) + { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, + const int height, const int width, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int num_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; + const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + + const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) + { + for (int j = 0; j < kernel_w; ++j) + { + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; + const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) + { + //const float map_h = i * dilation_h + offset_h; + //const float map_w = j * dilation_w + offset_w; + //const int cur_height = height - h_in; + //const int cur_width = width - w_in; + //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); + val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + //data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_im) +{ + CUDA_KERNEL_LOOP(index, n) + { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) + { + for (int dx = -2; dx <= 2; dx++) + { + if (cur_h + dy >= 0 && cur_h + dy < height && + cur_w + dx >= 0 && cur_w + dx < width && + abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) + { + int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, + const scalar_t *data_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, + const int channels, const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, const int offset_channels, const int deformable_group, + const int height_col, const int width_col, + scalar_t *grad_offset, scalar_t *grad_mask) +{ + CUDA_KERNEL_LOOP(index, n) + { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; + const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; + const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) + { + const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); + const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) + { + inv_h = inv_w = -2; + } + else + { + mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, inv_w, + height, width, data_im_ptr + cnt * height * width, width, bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); + grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; + } +} + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kenerl_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, at::Tensor data_col) +{ + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *data_col_ = data_col.data(); + + modulated_deformable_im2col_gpu_kernel<<>>( + num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, + pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, + batch_size, channels, deformable_group, height_col, width_col, data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, at::Tensor grad_im) +{ + + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *grad_im_ = grad_im.data(); + + modulated_deformable_col2im_gpu_kernel<<>>( + num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, deformable_group, height_col, width_col, grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, + const int batch_size, const int channels, const int height_im, const int width_im, + const int height_col, const int width_col, const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, const int stride_h, const int stride_w, + const int dilation_h, const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, at::Tensor grad_mask) +{ + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; + const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t *data_col_ = data_col.data(); + const scalar_t *data_im_ = data_im.data(); + const scalar_t *data_offset_ = data_offset.data(); + const scalar_t *data_mask_ = data_mask.data(); + scalar_t *grad_offset_ = grad_offset.data(); + scalar_t *grad_mask_ = grad_mask.data(); + + modulated_deformable_col2im_coord_gpu_kernel<<>>( + num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, + kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, + dilation_h, dilation_w, channel_per_deformable_group, + batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, + grad_offset_, grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); + } +} diff --git a/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda.cpp b/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda.cpp new file mode 100644 index 0000000..e19cf42 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda.cpp @@ -0,0 +1,87 @@ +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/modulated_dcn_cuda.c + +// based on +// author: Charles Shang +// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu + +#include + +#include +#include + +void DeformablePSROIPoolForward( + const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, + at::Tensor out, at::Tensor top_count, const int batch, const int channels, + const int height, const int width, const int num_bbox, + const int channels_trans, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std); + +void DeformablePSROIPoolBackwardAcc( + const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox, + const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad, + at::Tensor trans_grad, const int batch, const int channels, + const int height, const int width, const int num_bbox, + const int channels_trans, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std); + +void deform_psroi_pooling_cuda_forward( + at::Tensor input, at::Tensor bbox, at::Tensor trans, at::Tensor out, + at::Tensor top_count, const int no_trans, const float spatial_scale, + const int output_dim, const int group_size, const int pooled_size, + const int part_size, const int sample_per_part, const float trans_std) { + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + const int channels_trans = no_trans ? 2 : trans.size(1); + + const int num_bbox = bbox.size(0); + if (num_bbox != out.size(0)) + AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", + out.size(0), num_bbox); + + DeformablePSROIPoolForward( + input, bbox, trans, out, top_count, batch, channels, height, width, + num_bbox, channels_trans, no_trans, spatial_scale, output_dim, group_size, + pooled_size, part_size, sample_per_part, trans_std); +} + +void deform_psroi_pooling_cuda_backward( + at::Tensor out_grad, at::Tensor input, at::Tensor bbox, at::Tensor trans, + at::Tensor top_count, at::Tensor input_grad, at::Tensor trans_grad, + const int no_trans, const float spatial_scale, const int output_dim, + const int group_size, const int pooled_size, const int part_size, + const int sample_per_part, const float trans_std) { + TORCH_CHECK(out_grad.is_contiguous(), "out_grad tensor has to be contiguous"); + TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + const int channels_trans = no_trans ? 2 : trans.size(1); + + const int num_bbox = bbox.size(0); + if (num_bbox != out_grad.size(0)) + AT_ERROR("Output shape and bbox number wont match: (%d vs %d).", + out_grad.size(0), num_bbox); + + DeformablePSROIPoolBackwardAcc( + out_grad, input, bbox, trans, top_count, input_grad, trans_grad, batch, + channels, height, width, num_bbox, channels_trans, no_trans, + spatial_scale, output_dim, group_size, pooled_size, part_size, + sample_per_part, trans_std); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("deform_psroi_pooling_cuda_forward", &deform_psroi_pooling_cuda_forward, + "deform psroi pooling forward(CUDA)"); + m.def("deform_psroi_pooling_cuda_backward", + &deform_psroi_pooling_cuda_backward, + "deform psroi pooling backward(CUDA)"); +} diff --git a/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu b/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu new file mode 100644 index 0000000..e494460 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/dcn/src/deform_pool_cuda_kernel.cu @@ -0,0 +1,364 @@ +/*! + * Copyright (c) 2017 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file deformable_psroi_pooling.cu + * \brief + * \author Yi Li, Guodong Zhang, Jifeng Dai +*/ +/***************** Adapted by Charles Shang *********************/ +// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/cuda/deform_psroi_pooling_cuda.cu + +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N) +{ + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__device__ scalar_t bilinear_interp( + const scalar_t *data, + const scalar_t x, + const scalar_t y, + const int width, + const int height) +{ + int x1 = floor(x); + int x2 = ceil(x); + int y1 = floor(y); + int y2 = ceil(y); + scalar_t dist_x = (scalar_t)(x - x1); + scalar_t dist_y = (scalar_t)(y - y1); + scalar_t value11 = data[y1 * width + x1]; + scalar_t value12 = data[y2 * width + x1]; + scalar_t value21 = data[y1 * width + x2]; + scalar_t value22 = data[y2 * width + x2]; + scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; + return value; +} + +template +__global__ void DeformablePSROIPoolForwardKernel( + const int count, + const scalar_t *bottom_data, + const scalar_t spatial_scale, + const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const scalar_t *bottom_rois, const scalar_t *bottom_trans, + const int no_trans, + const scalar_t trans_std, + const int sample_per_part, + const int output_dim, + const int group_size, + const int part_size, + const int num_classes, + const int channels_each_class, + scalar_t *top_data, + scalar_t *top_count) +{ + CUDA_KERNEL_LOOP(index, count) + { + // The output is in order (n, ctop, ph, pw) + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int ctop = (index / pooled_width / pooled_height) % output_dim; + int n = index / pooled_width / pooled_height / output_dim; + + // [start, end) interval for spatial sampling + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; + scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; + scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; + scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; + + // Force too small ROIs to be 1x1 + scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 + scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); + + // Compute w and h at bottom + scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); + scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); + + scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); + scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); + + int part_h = floor((scalar_t)(ph) / pooled_height * part_size); + int part_w = floor((scalar_t)(pw) / pooled_width * part_size); + int class_id = ctop / channels_each_class; + scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + + scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; + wstart += trans_x * roi_width; + scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; + hstart += trans_y * roi_height; + + scalar_t sum = 0; + int count = 0; + int gw = floor((scalar_t)(pw)*group_size / pooled_width); + int gh = floor((scalar_t)(ph)*group_size / pooled_height); + gw = min(max(gw, 0), group_size - 1); + gh = min(max(gh, 0), group_size - 1); + + const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; + for (int ih = 0; ih < sample_per_part; ih++) + { + for (int iw = 0; iw < sample_per_part; iw++) + { + scalar_t w = wstart + iw * sub_bin_size_w; + scalar_t h = hstart + ih * sub_bin_size_h; + // bilinear interpolation + if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) + { + continue; + } + w = min(max(w, 0.), width - 1.); + h = min(max(h, 0.), height - 1.); + int c = (ctop * group_size + gh) * group_size + gw; + scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); + sum += val; + count++; + } + } + top_data[index] = count == 0 ? (scalar_t)(0) : sum / count; + top_count[index] = count; + } +} + +template +__global__ void DeformablePSROIPoolBackwardAccKernel( + const int count, + const scalar_t *top_diff, + const scalar_t *top_count, + const int num_rois, + const scalar_t spatial_scale, + const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + const int output_dim, + scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff, + const scalar_t *bottom_data, + const scalar_t *bottom_rois, + const scalar_t *bottom_trans, + const int no_trans, + const scalar_t trans_std, + const int sample_per_part, + const int group_size, + const int part_size, + const int num_classes, + const int channels_each_class) +{ + CUDA_KERNEL_LOOP(index, count) + { + // The output is in order (n, ctop, ph, pw) + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int ctop = (index / pooled_width / pooled_height) % output_dim; + int n = index / pooled_width / pooled_height / output_dim; + + // [start, end) interval for spatial sampling + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; + scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; + scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; + scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; + + // Force too small ROIs to be 1x1 + scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 + scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); + + // Compute w and h at bottom + scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); + scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); + + scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); + scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); + + int part_h = floor((scalar_t)(ph) / pooled_height * part_size); + int part_w = floor((scalar_t)(pw) / pooled_width * part_size); + int class_id = ctop / channels_each_class; + scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; + + scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; + wstart += trans_x * roi_width; + scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; + hstart += trans_y * roi_height; + + if (top_count[index] <= 0) + { + continue; + } + scalar_t diff_val = top_diff[index] / top_count[index]; + const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; + scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; + int gw = floor((scalar_t)(pw)*group_size / pooled_width); + int gh = floor((scalar_t)(ph)*group_size / pooled_height); + gw = min(max(gw, 0), group_size - 1); + gh = min(max(gh, 0), group_size - 1); + + for (int ih = 0; ih < sample_per_part; ih++) + { + for (int iw = 0; iw < sample_per_part; iw++) + { + scalar_t w = wstart + iw * sub_bin_size_w; + scalar_t h = hstart + ih * sub_bin_size_h; + // bilinear interpolation + if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) + { + continue; + } + w = min(max(w, 0.), width - 1.); + h = min(max(h, 0.), height - 1.); + int c = (ctop * group_size + gh) * group_size + gw; + // backward on feature + int x0 = floor(w); + int x1 = ceil(w); + int y0 = floor(h); + int y1 = ceil(h); + scalar_t dist_x = w - x0, dist_y = h - y0; + scalar_t q00 = (1 - dist_x) * (1 - dist_y); + scalar_t q01 = (1 - dist_x) * dist_y; + scalar_t q10 = dist_x * (1 - dist_y); + scalar_t q11 = dist_x * dist_y; + int bottom_index_base = c * height * width; + atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); + atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); + + if (no_trans) + { + continue; + } + scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; + scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; + scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; + scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; + scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; + diff_x *= roi_width; + scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; + diff_y *= roi_height; + + atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); + atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); + } + } + } +} + +void DeformablePSROIPoolForward(const at::Tensor data, + const at::Tensor bbox, + const at::Tensor trans, + at::Tensor out, + at::Tensor top_count, + const int batch, + const int channels, + const int height, + const int width, + const int num_bbox, + const int channels_trans, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + const int pooled_height = pooled_size; + const int pooled_width = pooled_size; + const int count = num_bbox * output_dim * pooled_height * pooled_width; + const int num_classes = no_trans ? 1 : channels_trans / 2; + const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data.type(), "deformable_psroi_pool_forward", ([&] { + const scalar_t *bottom_data = data.data(); + const scalar_t *bottom_rois = bbox.data(); + const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); + scalar_t *top_data = out.data(); + scalar_t *top_count_data = top_count.data(); + + DeformablePSROIPoolForwardKernel<<>>( + count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, + bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim, + group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); + } +} + +void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad, + const at::Tensor data, + const at::Tensor bbox, + const at::Tensor trans, + const at::Tensor top_count, + at::Tensor in_grad, + at::Tensor trans_grad, + const int batch, + const int channels, + const int height, + const int width, + const int num_bbox, + const int channels_trans, + const int no_trans, + const float spatial_scale, + const int output_dim, + const int group_size, + const int pooled_size, + const int part_size, + const int sample_per_part, + const float trans_std) +{ + // LOG(INFO) << "DeformablePSROIPoolBackward"; + const int num_rois = num_bbox; + const int pooled_height = pooled_size; + const int pooled_width = pooled_size; + const int count = num_bbox * output_dim * pooled_height * pooled_width; + const int num_classes = no_trans ? 1 : channels_trans / 2; + const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + out_grad.type(), "deformable_psroi_pool_backward_acc", ([&] { + const scalar_t *top_diff = out_grad.data(); + const scalar_t *bottom_data = data.data(); + const scalar_t *bottom_rois = bbox.data(); + const scalar_t *bottom_trans = no_trans ? NULL : trans.data(); + scalar_t *bottom_data_diff = in_grad.data(); + scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data(); + const scalar_t *top_count_data = top_count.data(); + + DeformablePSROIPoolBackwardAccKernel<<>>( + count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width, + pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff, + bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, + group_size, part_size, num_classes, channels_each_class); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); + } +} \ No newline at end of file diff --git a/CDARTS_detection/mmdet/ops/gcb/__init__.py b/CDARTS_detection/mmdet/ops/gcb/__init__.py new file mode 100644 index 0000000..05dd625 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/gcb/__init__.py @@ -0,0 +1,5 @@ +from .context_block import ContextBlock + +__all__ = [ + 'ContextBlock', +] diff --git a/CDARTS_detection/mmdet/ops/gcb/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/gcb/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef3d5e8d8ec40cec73f3a095e090443813d5d645 GIT binary patch literal 219 zcmX|*%?g4*6oqG0C_=ZrgPR6w8zr@mpq-n`I5Vg;KjMs#kJKxOe;vv2o?ZnGnq+!o? zdcCD#p0GKEVu^*1rQG_{f literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/gcb/__pycache__/context_block.cpython-36.pyc b/CDARTS_detection/mmdet/ops/gcb/__pycache__/context_block.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ab280a3f193531c12b35c90d14ea1f554ba9c4f GIT binary patch literal 2814 zcmb_e&u<*J6(%`9cQsneiqgbN8>lH-bc;Z8ouY@raAT)#dvJxIPA`iVgVAubn%()a zCb_aD%=VBqkn~`nz4U*`wSNw;J^5eorTrdfE!jqooyEL)d?X*q_rC9OKil3O{`%Ef z_VEqI{>|QaeCVI!)ej&fle}UR`ihFzVj^m9;&Ezw6|4P;58ao+b2bTn$7Cp@=S)V0 zKMU91B$E6w8^edZgJIkVfSSuV+y|<$RWPrHwfG$LJYQT@pj&>Dy{%_ctuem;UbI@LM8O?otVmukI`PaOI|8&p^$f4OnOYC6=T1kiO zrrVtlIl{_fF4VuwjoD7eaYCH&1@+&AWmj&=|KWLm z<9Qz|enhc$=-oL+vd*Up)MRQ0Vse$XdRbkOguqHbc@MANgP3BIe@Rvd?ze0n{zE+R zANMeJSOx>d;cd%LL5N)tqLk_BQUkoHxmXn1jZq2Qyc*uEN^SDCUflbTeA7Z9yBxm> zr^D1CIZY4m0$o|9??HJz%kl2=lgu1c1%Q9hHpM}|)`322mz6v?DGrvpkcyy({cOz( zMX1!}W$|)DLQj_USLln%W>IB%F&643M!Tqtzi$J*Tog);V6yS+&Dha>tqVnze5h2b zR7~AIZOjH{xu^<6wyLaG6^)IZW37fPe}Zj#7`g)!$F$n zC_y?+$6OIqT%y%S&Dd_xT!0w1FRCU#D2*l(L=~VHrX`p_NCe>d} z7-Kl`h{s}x?~q^q;H~q%n~j_xOb=z4AnwlnPbeLqu{Ha%zY;5N<*(>$+{NMBj<3Ql zoC(xK)UPf;J@lLfHE>ORh(d>&x{A7pMoDosDneh-MVmfPqNc4P6V9S8gjKBKF7Evz z%3|%W;%0~$vGayL+!g+DUul=U7SaLokO)U+jt@e!Wb@t~V5yx8BOqo3pPnM7J>E!)548D2NQQga`^` zgm8$v7nr+k#LTriWCOiEWoeJO5j|2LVXRNR$g0-`A5fo%*z)6XpdP@p!M5bMG5`r3la+gaWXP;}a=r@SKS#3~X($o&q6pUI{q$g?mWSLcck1HXLDg6;2 zVy4~RRGYl0sx-acBzX22`uo~Lq#_3DAw(Q7KKfBSjQ3qZAIC0Clen()^TWJp+#fUL ncuZx)QP+lla9eUSt{3Swy2osMx8L+VDnFX8f*4pyyg2$VI*P9S literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/gcb/context_block.py b/CDARTS_detection/mmdet/ops/gcb/context_block.py new file mode 100644 index 0000000..be9092c --- /dev/null +++ b/CDARTS_detection/mmdet/ops/gcb/context_block.py @@ -0,0 +1,104 @@ +import torch +from mmcv.cnn import constant_init, kaiming_init +from torch import nn + + +def last_zero_init(m): + if isinstance(m, nn.Sequential): + constant_init(m[-1], val=0) + else: + constant_init(m, val=0) + + +class ContextBlock(nn.Module): + + def __init__(self, + inplanes, + ratio, + pooling_type='att', + fusion_types=('channel_add', )): + super(ContextBlock, self).__init__() + assert pooling_type in ['avg', 'att'] + assert isinstance(fusion_types, (list, tuple)) + valid_fusion_types = ['channel_add', 'channel_mul'] + assert all([f in valid_fusion_types for f in fusion_types]) + assert len(fusion_types) > 0, 'at least one fusion should be used' + self.inplanes = inplanes + self.ratio = ratio + self.planes = int(inplanes * ratio) + self.pooling_type = pooling_type + self.fusion_types = fusion_types + if pooling_type == 'att': + self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1) + self.softmax = nn.Softmax(dim=2) + else: + self.avg_pool = nn.AdaptiveAvgPool2d(1) + if 'channel_add' in fusion_types: + self.channel_add_conv = nn.Sequential( + nn.Conv2d(self.inplanes, self.planes, kernel_size=1), + nn.LayerNorm([self.planes, 1, 1]), + nn.ReLU(inplace=True), # yapf: disable + nn.Conv2d(self.planes, self.inplanes, kernel_size=1)) + else: + self.channel_add_conv = None + if 'channel_mul' in fusion_types: + self.channel_mul_conv = nn.Sequential( + nn.Conv2d(self.inplanes, self.planes, kernel_size=1), + nn.LayerNorm([self.planes, 1, 1]), + nn.ReLU(inplace=True), # yapf: disable + nn.Conv2d(self.planes, self.inplanes, kernel_size=1)) + else: + self.channel_mul_conv = None + self.reset_parameters() + + def reset_parameters(self): + if self.pooling_type == 'att': + kaiming_init(self.conv_mask, mode='fan_in') + self.conv_mask.inited = True + + if self.channel_add_conv is not None: + last_zero_init(self.channel_add_conv) + if self.channel_mul_conv is not None: + last_zero_init(self.channel_mul_conv) + + def spatial_pool(self, x): + batch, channel, height, width = x.size() + if self.pooling_type == 'att': + input_x = x + # [N, C, H * W] + input_x = input_x.view(batch, channel, height * width) + # [N, 1, C, H * W] + input_x = input_x.unsqueeze(1) + # [N, 1, H, W] + context_mask = self.conv_mask(x) + # [N, 1, H * W] + context_mask = context_mask.view(batch, 1, height * width) + # [N, 1, H * W] + context_mask = self.softmax(context_mask) + # [N, 1, H * W, 1] + context_mask = context_mask.unsqueeze(-1) + # [N, 1, C, 1] + context = torch.matmul(input_x, context_mask) + # [N, C, 1, 1] + context = context.view(batch, channel, 1, 1) + else: + # [N, C, 1, 1] + context = self.avg_pool(x) + + return context + + def forward(self, x): + # [N, C, 1, 1] + context = self.spatial_pool(x) + + out = x + if self.channel_mul_conv is not None: + # [N, C, 1, 1] + channel_mul_term = torch.sigmoid(self.channel_mul_conv(context)) + out = out * channel_mul_term + if self.channel_add_conv is not None: + # [N, C, 1, 1] + channel_add_term = self.channel_add_conv(context) + out = out + channel_add_term + + return out diff --git a/CDARTS_detection/mmdet/ops/masked_conv/__init__.py b/CDARTS_detection/mmdet/ops/masked_conv/__init__.py new file mode 100644 index 0000000..feab953 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/masked_conv/__init__.py @@ -0,0 +1,4 @@ +from .functions.masked_conv import masked_conv2d +from .modules.masked_conv import MaskedConv2d + +__all__ = ['masked_conv2d', 'MaskedConv2d'] diff --git a/CDARTS_detection/mmdet/ops/masked_conv/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/masked_conv/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b15bcdca5d64a65478366de1eccae5c142269fd7 GIT binary patch literal 299 zcmXr!<>j*fm614sfq~&M5W@izkmUfx#VSA|g&~D8has0Sijfh-X3Am8WsYJ7vYAs@ z(wU-IQdol-G}&G<0+nhq-r~(oEY40%iBHbYD>F&~3h?-X1)RbBB9O^N%s|3VlQl{- ztu!yWBr`v+SPw}TSWT30ZhlH>PAaMlNaHQ``1r(}ocQ>a3`MLU(?G;85B-e%+*BhV znU`K!nwY1bT$z$stY2J^U#y>-o03|R3bqKs*3T~hiXz;pA0MBYmst`YuUAlci^B$D QvmMAo#VkOAhmnT~06{HLzW@LL literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/masked_conv/functions/__init__.py b/CDARTS_detection/mmdet/ops/masked_conv/functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/ops/masked_conv/functions/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/masked_conv/functions/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..506b991962bd9bb6eec80a7de05b325d6610fa0b GIT binary patch literal 165 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnuOR)5{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*p5$MH7p&Q&ZxT^YhB|(@OKe(#87m e@tJv9T}6^RqQYd5a*`xGP; z>6}7~+QcU|abDpy4jtfJ(t3peyTpUIZ8dysygoEecd;1;7aL6yZ|nXTR;UPi2$TN} zDrwEp8mh3c*oS~IvDTQ_3ya?s)^Av1qjf<04dA!h2U{(My)Hq&TOVjonzI`YJUrJn z!j|qDKeKAD_5rsHZr6cqLlm3;Ap^a#*TNy7GZ9_>r)UAPwZCUh-Ko2EukP>S*L%Vf zP;no#5&IY&Lk7o4w(r1t1QlB2djEz!@L=!%&pxIR37M#oNjD5YSRf!H^D~t7t`eG>nDPxgpQK3%E+iF#i%T@%h4Gf-5aIF z=$$?tB^6_|5ZUEq^Dm6?P}wBSmBZ63s`@{0PML1^9~mnd(;-#xOPp2I2$Yv{)9LD9 zT8f|ZWkz#wMoFW!(?TdGkHt*6=UG`Y<%*IeGvyU!aYb3F+67%j8gbF zMO9XuxezYAz0EhxPm?l>&P%q68BuObNDg_vNa^a@1`JNqd}lL0R(2v@D*v3uqGFWm zT=+t@Ry4hsL1t%Z%$2tUPib+XTHKUPxgdxjW4)}D-$+5QL#wb;Rw7DfaZ%7L4ANqU zmA{}2WZHO(Rw_7)MKX&_Jzc#KTuoqrZ53*9{Tf#W8Mb@lOkQ`uQg!LNR2%@rHBlpjS!oYN>$K@{aBsWOeb zQS@sSXIo8|i&&&dPQ|PwY@&DH(?lQC4Wd(GEg(KjZh&Rs?q^0e?y!N@4}pYzb7>}V zB})BuOlKY*Ho~HqG->cjCDM#HR}`76V?4n{SuV3nwdM!P9 OAU+(HKI*Q$|J8pK%>%#y literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/masked_conv/functions/masked_conv.py b/CDARTS_detection/mmdet/ops/masked_conv/functions/masked_conv.py new file mode 100644 index 0000000..eed32b7 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/masked_conv/functions/masked_conv.py @@ -0,0 +1,56 @@ +import math +import torch +from torch.autograd import Function +from torch.nn.modules.utils import _pair +from .. import masked_conv2d_cuda + + +class MaskedConv2dFunction(Function): + + @staticmethod + def forward(ctx, features, mask, weight, bias, padding=0, stride=1): + assert mask.dim() == 3 and mask.size(0) == 1 + assert features.dim() == 4 and features.size(0) == 1 + assert features.size()[2:] == mask.size()[1:] + pad_h, pad_w = _pair(padding) + stride_h, stride_w = _pair(stride) + if stride_h != 1 or stride_w != 1: + raise ValueError( + 'Stride could not only be 1 in masked_conv2d currently.') + if not features.is_cuda: + raise NotImplementedError + + out_channel, in_channel, kernel_h, kernel_w = weight.size() + + batch_size = features.size(0) + out_h = int( + math.floor((features.size(2) + 2 * pad_h - + (kernel_h - 1) - 1) / stride_h + 1)) + out_w = int( + math.floor((features.size(3) + 2 * pad_w - + (kernel_h - 1) - 1) / stride_w + 1)) + mask_inds = torch.nonzero(mask[0] > 0) + output = features.new_zeros(batch_size, out_channel, out_h, out_w) + if mask_inds.numel() > 0: + mask_h_idx = mask_inds[:, 0].contiguous() + mask_w_idx = mask_inds[:, 1].contiguous() + data_col = features.new_zeros(in_channel * kernel_h * kernel_w, + mask_inds.size(0)) + masked_conv2d_cuda.masked_im2col_forward(features, mask_h_idx, + mask_w_idx, kernel_h, + kernel_w, pad_h, pad_w, + data_col) + + masked_output = torch.addmm(1, bias[:, None], 1, + weight.view(out_channel, -1), data_col) + masked_conv2d_cuda.masked_col2im_forward(masked_output, mask_h_idx, + mask_w_idx, out_h, out_w, + out_channel, output) + return output + + @staticmethod + def backward(ctx, grad_output): + return (None, ) * 5 + + +masked_conv2d = MaskedConv2dFunction.apply diff --git a/CDARTS_detection/mmdet/ops/masked_conv/modules/__init__.py b/CDARTS_detection/mmdet/ops/masked_conv/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/ops/masked_conv/modules/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/masked_conv/modules/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31f6c29a6ce8cea917812b5ee5645c371dd8ffd1 GIT binary patch literal 163 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnuK@jw{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*p5$MH7p&Q&ZxT^YhB|bMsS5b5e`- btr3x#B literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/masked_conv/modules/__pycache__/masked_conv.cpython-36.pyc b/CDARTS_detection/mmdet/ops/masked_conv/modules/__pycache__/masked_conv.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9ff6c38b6db0a6dd96365b8671ff985575d5fdc GIT binary patch literal 1173 zcmY*Yy>8nu5GM6!*|CGPO@MYIx)cTqyG_@kKj_e@S(3>Mp@_6aTcT7_YT!Vd1LRry z0DT#*o$?BqdZg^yp}h44#TiXY`Rkme*0x*WR?zQ2 z;0U~~hIM?5t=i5JD$)QeeGLdFP z$c!sdYf@*Nl=HbrMUs>5$Yd}eu&%(;B}wL`x=9oxtmIn0sfj46oEKcy?)Y_*uAF(^ z$g~!vB#C6Cl=<5E(M?sAY8y(|N-$2UL?s2UxgvE*t~q&6(ncw8n6D@9&;y3rm4&Oc zzvaWe?e<~o*8nxDaf>$I(ra<+wSF74VH>r*Hr~V-x&v(Bi?MG)NP#P5duL)wg%mZV zbyuTUv}BI{oV0*4npfOw2IZsLmoFUtF`a= zM9ZK>%Mf<72`~Ti*;Cd@$7XLk8Or)J(G3@itX3!1J^~o~Dz=7D$f~JLU<(^#^~~Cz zTXTgz zd~4}Z%F2{dN1%&rM%!kkJkP=4^0Kf?)a7Vr$A;3!0C5;Z2zSqM=$IHsCaz1BW)mrm l2m5Sy8T90!om-x|d*%-A + +#include +#include + +int MaskedIm2colForwardLaucher(const at::Tensor im, const int height, + const int width, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const at::Tensor mask_h_idx, + const at::Tensor mask_w_idx, const int mask_cnt, + at::Tensor col); + +int MaskedCol2imForwardLaucher(const at::Tensor col, const int height, + const int width, const int channels, + const at::Tensor mask_h_idx, + const at::Tensor mask_w_idx, const int mask_cnt, + at::Tensor im); + +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +int masked_im2col_forward_cuda(const at::Tensor im, const at::Tensor mask_h_idx, + const at::Tensor mask_w_idx, const int kernel_h, + const int kernel_w, const int pad_h, + const int pad_w, at::Tensor col) { + CHECK_INPUT(im); + CHECK_INPUT(mask_h_idx); + CHECK_INPUT(mask_w_idx); + CHECK_INPUT(col); + // im: (n, ic, h, w), kernel size (kh, kw) + // kernel: (oc, ic * kh * kw), col: (kh * kw * ic, ow * oh) + + int channels = im.size(1); + int height = im.size(2); + int width = im.size(3); + int mask_cnt = mask_h_idx.size(0); + + MaskedIm2colForwardLaucher(im, height, width, channels, kernel_h, kernel_w, + pad_h, pad_w, mask_h_idx, mask_w_idx, mask_cnt, + col); + + return 1; +} + +int masked_col2im_forward_cuda(const at::Tensor col, + const at::Tensor mask_h_idx, + const at::Tensor mask_w_idx, int height, + int width, int channels, at::Tensor im) { + CHECK_INPUT(col); + CHECK_INPUT(mask_h_idx); + CHECK_INPUT(mask_w_idx); + CHECK_INPUT(im); + // im: (n, ic, h, w), kernel size (kh, kw) + // kernel: (oc, ic * kh * kh), col: (kh * kw * ic, ow * oh) + + int mask_cnt = mask_h_idx.size(0); + + MaskedCol2imForwardLaucher(col, height, width, channels, mask_h_idx, + mask_w_idx, mask_cnt, im); + + return 1; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("masked_im2col_forward", &masked_im2col_forward_cuda, + "masked_im2col forward (CUDA)"); + m.def("masked_col2im_forward", &masked_col2im_forward_cuda, + "masked_col2im forward (CUDA)"); +} diff --git a/CDARTS_detection/mmdet/ops/masked_conv/src/masked_conv2d_kernel.cu b/CDARTS_detection/mmdet/ops/masked_conv/src/masked_conv2d_kernel.cu new file mode 100644 index 0000000..394af13 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/masked_conv/src/masked_conv2d_kernel.cu @@ -0,0 +1,113 @@ +#include +#include + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +#define THREADS_PER_BLOCK 1024 + +inline int GET_BLOCKS(const int N) { + int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + int max_block_num = 65000; + return min(optimal_block_num, max_block_num); +} + +template +__global__ void MaskedIm2colForward(const int n, const scalar_t *data_im, + const int height, const int width, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const long *mask_h_idx, + const long *mask_w_idx, const int mask_cnt, + scalar_t *data_col) { + // mask_cnt * channels + CUDA_1D_KERNEL_LOOP(index, n) { + const int m_index = index % mask_cnt; + const int h_col = mask_h_idx[m_index]; + const int w_col = mask_w_idx[m_index]; + const int c_im = index / mask_cnt; + const int c_col = c_im * kernel_h * kernel_w; + const int h_offset = h_col - pad_h; + const int w_offset = w_col - pad_w; + scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index; + for (int i = 0; i < kernel_h; ++i) { + int h_im = h_offset + i; + for (int j = 0; j < kernel_w; ++j) { + int w_im = w_offset + j; + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + *data_col_ptr = + (scalar_t)data_im[(c_im * height + h_im) * width + w_im]; + } else { + *data_col_ptr = 0.0; + } + data_col_ptr += mask_cnt; + } + } + } +} + +int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height, + const int width, const int channels, + const int kernel_h, const int kernel_w, + const int pad_h, const int pad_w, + const at::Tensor mask_h_idx, + const at::Tensor mask_w_idx, const int mask_cnt, + at::Tensor top_data) { + const int output_size = mask_cnt * channels; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + bottom_data.type(), "MaskedIm2colLaucherForward", ([&] { + const scalar_t *bottom_data_ = bottom_data.data(); + const long *mask_h_idx_ = mask_h_idx.data(); + const long *mask_w_idx_ = mask_w_idx.data(); + scalar_t *top_data_ = top_data.data(); + MaskedIm2colForward + <<>>( + output_size, bottom_data_, height, width, kernel_h, kernel_w, + pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} + +template +__global__ void MaskedCol2imForward(const int n, const scalar_t *data_col, + const int height, const int width, + const int channels, const long *mask_h_idx, + const long *mask_w_idx, const int mask_cnt, + scalar_t *data_im) { + CUDA_1D_KERNEL_LOOP(index, n) { + const int m_index = index % mask_cnt; + const int h_im = mask_h_idx[m_index]; + const int w_im = mask_w_idx[m_index]; + const int c_im = index / mask_cnt; + // int kernel_extent_w = (kernel_w - 1) + 1; + // int kernel_extent_h = (kernel_h - 1) + 1; + // compute the start and end of the output + data_im[(c_im * height + h_im) * width + w_im] = data_col[index]; + } +} + +int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height, + const int width, const int channels, + const at::Tensor mask_h_idx, + const at::Tensor mask_w_idx, const int mask_cnt, + at::Tensor top_data) { + const int output_size = mask_cnt * channels; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + bottom_data.type(), "MaskedCol2imLaucherForward", ([&] { + const scalar_t *bottom_data_ = bottom_data.data(); + const long *mask_h_idx_ = mask_h_idx.data(); + const long *mask_w_idx_ = mask_w_idx.data(); + scalar_t *top_data_ = top_data.data(); + + MaskedCol2imForward + <<>>( + output_size, bottom_data_, height, width, channels, mask_h_idx_, + mask_w_idx_, mask_cnt, top_data_); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} diff --git a/CDARTS_detection/mmdet/ops/nms/__init__.py b/CDARTS_detection/mmdet/ops/nms/__init__.py new file mode 100644 index 0000000..c440704 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/nms/__init__.py @@ -0,0 +1,3 @@ +from .nms_wrapper import nms, soft_nms + +__all__ = ['nms', 'soft_nms'] diff --git a/CDARTS_detection/mmdet/ops/nms/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/nms/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e29b985946f86f50d21749d01db6222f24308a26 GIT binary patch literal 234 zcmYL@O$x#=5QUS}Ka_$8@B-Z!(5%crHqB0srFz&OEML)R19$R85 zy8xq@xjW8a`2|>VwqLj)2cC&VsYc_5QPZ=dx*52pud0b2oy^DR13o=y0km7t)bAj) z(>mJCy}2hZGk4~<&MXjL*;PLeV0D{$unJxt#PS*oM9jiGY`wWV3qdbxow=L$+7RY} zSk*C$@=zzZiofsOHpo-fyW+6kfhWI%*#*`Ey*`Vddb3CnSXq)k>E--l8?^2m*ZO+p zeAfE%*EY&8&7FL8?#$r6Hex>WfAl8a6&O!E$VWn-d5&{CQ#IA&DbW?-nVxbmx#? zq(>WPMh~;1K&?h`X6%YPU`=_=5rx0x&fGMkpo`q(*#ff)e;Qqwhqo_fC_Z8 z%lcW$8P$LpEDBZs@e)s1gmuZdv%774@b$QdFYVTo+3`oV6lJ5}!Ug;-6y+Rw>tqk0 zx?gGXWF{>GxF%$H?cf!YW-1d(Q!(Zyw3RhcXT^A;27c%&Ya*ylNwrBQvMNUw*T&P8 zNp```7gAQz1ZdKPYD%F?1V0=db6F`96qVRFy{w}giOgbrNUcO(kr7TqYjDsr0mcPq zJEWN8tct@J^zA7iH#h@8GEui{0!Xxa=Eyg})XQ6&)2igxK#KjNhKkMc5u<8T)s@;T zOHk!wL{T^1tZKChu3#G-N?O-kZq!HeGFXbCsrNuQt9>_cBNydmch&Qt@B7Oziri(d z@4?Kq8YU_-!Kr+XN{;jA&FB9-`}>c3d-rdS!*0m|s<)JC>GU_4EYRsYK&N5rF0?~&xw--Nd7;2vtH}9y@Q_2HBe`sAoU`#>V6xw zZVS6WgMJ$Tp@!dhvv`)Yq5K8zU25aGYx@KUcbO$mfiicTHqI~VC|`lS#Eve_U0i1l z5NW!-d*}ay5jfa+fnwmvx1bxw`?OIiqvH7LUGbO}KzSwC)2btf9r!>2L2Lnz2fm5A z9ROUmUs98lnHX8d8MqR^T7y-IC6oyY%sb*m)A7_oPQHfA7f|$3te{vzaS4P;zotdg z;aXztOp18!h+V#l3lUWo?>$Rq*RPonIO@Y2gH*nbR<59U7X?B{z7N7A0O8KW35G>I z>~~R&#uejg7~?GnZ}|plA)q^P74_rOFZ2zVsdW&J1?8%hQ3p&fbz^tg_3SSK*#UIf zlOMqOfxA8MZLTGOseBvPl7(bowv1z6((e7l=q + +template +at::Tensor nms_cpu_kernel(const at::Tensor& dets, const float threshold) { + AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); + } + + auto x1_t = dets.select(1, 0).contiguous(); + auto y1_t = dets.select(1, 1).contiguous(); + auto x2_t = dets.select(1, 2).contiguous(); + auto y2_t = dets.select(1, 3).contiguous(); + auto scores = dets.select(1, 4).contiguous(); + + at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = + at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU)); + + auto suppressed = suppressed_t.data(); + auto order = order_t.data(); + auto x1 = x1_t.data(); + auto y1 = y1_t.data(); + auto x2 = x2_t.data(); + auto y2 = y2_t.data(); + auto areas = areas_t.data(); + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) continue; + auto ix1 = x1[i]; + auto iy1 = y1[i]; + auto ix2 = x2[i]; + auto iy2 = y2[i]; + auto iarea = areas[i]; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) continue; + auto xx1 = std::max(ix1, x1[j]); + auto yy1 = std::max(iy1, y1[j]); + auto xx2 = std::min(ix2, x2[j]); + auto yy2 = std::min(iy2, y2[j]); + + auto w = std::max(static_cast(0), xx2 - xx1 + 1); + auto h = std::max(static_cast(0), yy2 - yy1 + 1); + auto inter = w * h; + auto ovr = inter / (iarea + areas[j] - inter); + if (ovr >= threshold) suppressed[j] = 1; + } + } + return at::nonzero(suppressed_t == 0).squeeze(1); +} + +at::Tensor nms(const at::Tensor& dets, const float threshold) { + at::Tensor result; + AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms", [&] { + result = nms_cpu_kernel(dets, threshold); + }); + return result; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("nms", &nms, "non-maximum suppression"); +} \ No newline at end of file diff --git a/CDARTS_detection/mmdet/ops/nms/src/nms_cuda.cpp b/CDARTS_detection/mmdet/ops/nms/src/nms_cuda.cpp new file mode 100644 index 0000000..f53359d --- /dev/null +++ b/CDARTS_detection/mmdet/ops/nms/src/nms_cuda.cpp @@ -0,0 +1,17 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +#include + +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") + +at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); + +at::Tensor nms(const at::Tensor& dets, const float threshold) { + CHECK_CUDA(dets); + if (dets.numel() == 0) + return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); + return nms_cuda(dets, threshold); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("nms", &nms, "non-maximum suppression"); +} diff --git a/CDARTS_detection/mmdet/ops/nms/src/nms_kernel.cu b/CDARTS_detection/mmdet/ops/nms/src/nms_kernel.cu new file mode 100644 index 0000000..9254f2a --- /dev/null +++ b/CDARTS_detection/mmdet/ops/nms/src/nms_kernel.cu @@ -0,0 +1,131 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +#include +#include + +#include +#include + +#include +#include + +int const threadsPerBlock = sizeof(unsigned long long) * 8; + +__device__ inline float devIoU(float const * const a, float const * const b) { + float left = max(a[0], b[0]), right = min(a[2], b[2]); + float top = max(a[1], b[1]), bottom = min(a[3], b[3]); + float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); + float interS = width * height; + float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); + float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); + return interS / (Sa + Sb - interS); +} + +__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, + const float *dev_boxes, unsigned long long *dev_mask) { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + __shared__ float block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const float *cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +// boxes is a N x 5 tensor +at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { + using scalar_t = float; + AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); + auto scores = boxes.select(1, 4); + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto boxes_sorted = boxes.index_select(0, order_t); + + int boxes_num = boxes.size(0); + + const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); + + scalar_t* boxes_dev = boxes_sorted.data(); + + THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState + + unsigned long long* mask_dev = NULL; + //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, + // boxes_num * col_blocks * sizeof(unsigned long long))); + + mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); + + dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), + THCCeilDiv(boxes_num, threadsPerBlock)); + dim3 threads(threadsPerBlock); + nms_kernel<<>>(boxes_num, + nms_overlap_thresh, + boxes_dev, + mask_dev); + + std::vector mask_host(boxes_num * col_blocks); + THCudaCheck(cudaMemcpy(&mask_host[0], + mask_dev, + sizeof(unsigned long long) * boxes_num * col_blocks, + cudaMemcpyDeviceToHost)); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data(); + + int num_to_keep = 0; + for (int i = 0; i < boxes_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long *p = &mask_host[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + THCudaFree(state, mask_dev); + // TODO improve this part + return std::get<0>(order_t.index({ + keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( + order_t.device(), keep.scalar_type()) + }).sort(0, false)); +} \ No newline at end of file diff --git a/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.cpp b/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.cpp new file mode 100644 index 0000000..763ee4c --- /dev/null +++ b/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.cpp @@ -0,0 +1,10246 @@ +/* Generated by Cython 0.28.3 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [ + "/home/work/anaconda3/lib/python3.6/site-packages/numpy/core/include/numpy/arrayobject.h", + "/home/work/anaconda3/lib/python3.6/site-packages/numpy/core/include/numpy/ufuncobject.h" + ], + "extra_compile_args": { + "cc": [ + "-Wno-unused-function", + "-Wno-write-strings" + ], + "nvcc": [ + "-c", + "--compiler-options", + "-fPIC" + ] + }, + "include_dirs": [ + "/home/work/anaconda3/lib/python3.6/site-packages/numpy/core/include" + ], + "language": "c++", + "name": "soft_nms_cpu", + "sources": [ + "src/soft_nms_cpu.pyx" + ] + }, + "module_name": "soft_nms_cpu" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_28_3" +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef __cplusplus + #error "Cython files generated with the C++ option must be compiled with a C++ compiler." +#endif +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #else + #define CYTHON_INLINE inline + #endif +#endif +template +void __Pyx_call_destructor(T& x) { + x.~T(); +} +template +class __Pyx_FakeReference { + public: + __Pyx_FakeReference() : ptr(NULL) { } + __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } + T *operator->() { return ptr; } + T *operator&() { return ptr; } + operator T&() { return *ptr; } + template bool operator ==(U other) { return *ptr == other; } + template bool operator !=(U other) { return *ptr != other; } + private: + T *ptr; +}; + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; // PyThread_create_key reports success always +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif // TSS (Thread Specific Storage) API +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact + #define PyObject_Unicode PyObject_Str +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__soft_nms_cpu +#define __PYX_HAVE_API__soft_nms_cpu +/* Early includes */ +#include +#include +#include "numpy/arrayobject.h" +#include "numpy/ufuncobject.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif defined(_Complex_I) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + + +static const char *__pyx_f[] = { + "src/soft_nms_cpu.pyx", + "__init__.pxd", + "type.pxd", +}; +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":730 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":731 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":732 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":733 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":737 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":738 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":739 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":740 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":744 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":745 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":754 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":755 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":756 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":758 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":759 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":760 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":762 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":763 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":765 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":766 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":767 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + + +/*--- Type declarations ---*/ + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":769 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":770 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":771 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":773 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* BufferGetAndValidate.proto */ +#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)\ + ((obj == Py_None || obj == NULL) ?\ + (__Pyx_ZeroBuffer(buf), 0) :\ + __Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack)) +static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj, + __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); +static void __Pyx_ZeroBuffer(Py_buffer* buf); +static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); +static Py_ssize_t __Pyx_minusones[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; +static Py_ssize_t __Pyx_zeros[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* PyObjectCallNoArg.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); +#else +#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) +#endif + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* GetModuleGlobalName.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); + +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); +#else +#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* SetItemInt.proto */ +#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ + __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, + int is_list, int wraparound, int boundscheck); + +/* SliceObject.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( + PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, + PyObject** py_start, PyObject** py_stop, PyObject** py_slice, + int has_cstart, int has_cstop, int wraparound); + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* DictGetItem.proto */ +#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); +#define __Pyx_PyObject_Dict_GetItem(obj, name)\ + (likely(PyDict_CheckExact(obj)) ?\ + __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) +#else +#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) +#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* PyIdentifierFromString.proto */ +#if !defined(__Pyx_PyIdentifier_FromString) +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) +#else + #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) +#endif +#endif + +/* ModuleImport.proto */ +static PyObject *__Pyx_ImportModule(const char *name); + +/* TypeImport.proto */ +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'numpy' */ + +/* Module declarations from 'numpy' */ +static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; +static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; +static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; +static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; +static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ + +/* Module declarations from 'soft_nms_cpu' */ +static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_max(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/ +static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_min(__pyx_t_5numpy_float32_t, __pyx_t_5numpy_float32_t); /*proto*/ +static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "soft_nms_cpu" +extern int __pyx_module_is_main_soft_nms_cpu; +int __pyx_module_is_main_soft_nms_cpu = 0; + +/* Implementation of 'soft_nms_cpu' */ +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_ImportError; +static const char __pyx_k_N[] = "N"; +static const char __pyx_k_i[] = "i"; +static const char __pyx_k_s[] = "s"; +static const char __pyx_k_ih[] = "ih"; +static const char __pyx_k_iw[] = "iw"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_ov[] = "ov"; +static const char __pyx_k_ti[] = "ti"; +static const char __pyx_k_ts[] = "ts"; +static const char __pyx_k_ua[] = "ua"; +static const char __pyx_k_x1[] = "x1"; +static const char __pyx_k_x2[] = "x2"; +static const char __pyx_k_y1[] = "y1"; +static const char __pyx_k_y2[] = "y2"; +static const char __pyx_k_exp[] = "exp"; +static const char __pyx_k_pos[] = "pos"; +static const char __pyx_k_tx1[] = "tx1"; +static const char __pyx_k_tx2[] = "tx2"; +static const char __pyx_k_ty1[] = "ty1"; +static const char __pyx_k_ty2[] = "ty2"; +static const char __pyx_k_area[] = "area"; +static const char __pyx_k_copy[] = "copy"; +static const char __pyx_k_inds[] = "inds"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_boxes[] = "boxes"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_shape[] = "shape"; +static const char __pyx_k_sigma[] = "sigma"; +static const char __pyx_k_arange[] = "arange"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_maxpos[] = "maxpos"; +static const char __pyx_k_method[] = "method"; +static const char __pyx_k_weight[] = "weight"; +static const char __pyx_k_iou_thr[] = "iou_thr"; +static const char __pyx_k_box_area[] = "box_area"; +static const char __pyx_k_boxes_in[] = "boxes_in"; +static const char __pyx_k_maxscore[] = "maxscore"; +static const char __pyx_k_min_score[] = "min_score"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_soft_nms_cpu[] = "soft_nms_cpu"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_src_soft_nms_cpu_pyx[] = "src/soft_nms_cpu.pyx"; +static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; +static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; +static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; +static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; +static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; +static PyObject *__pyx_n_s_ImportError; +static PyObject *__pyx_n_s_N; +static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_arange; +static PyObject *__pyx_n_s_area; +static PyObject *__pyx_n_s_box_area; +static PyObject *__pyx_n_s_boxes; +static PyObject *__pyx_n_s_boxes_in; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_copy; +static PyObject *__pyx_n_s_exp; +static PyObject *__pyx_n_s_i; +static PyObject *__pyx_n_s_ih; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_inds; +static PyObject *__pyx_n_s_iou_thr; +static PyObject *__pyx_n_s_iw; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_maxpos; +static PyObject *__pyx_n_s_maxscore; +static PyObject *__pyx_n_s_method; +static PyObject *__pyx_n_s_min_score; +static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; +static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; +static PyObject *__pyx_n_s_np; +static PyObject *__pyx_n_s_numpy; +static PyObject *__pyx_kp_u_numpy_core_multiarray_failed_to; +static PyObject *__pyx_kp_u_numpy_core_umath_failed_to_impor; +static PyObject *__pyx_n_s_ov; +static PyObject *__pyx_n_s_pos; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_s; +static PyObject *__pyx_n_s_shape; +static PyObject *__pyx_n_s_sigma; +static PyObject *__pyx_n_s_soft_nms_cpu; +static PyObject *__pyx_kp_s_src_soft_nms_cpu_pyx; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_ti; +static PyObject *__pyx_n_s_ts; +static PyObject *__pyx_n_s_tx1; +static PyObject *__pyx_n_s_tx2; +static PyObject *__pyx_n_s_ty1; +static PyObject *__pyx_n_s_ty2; +static PyObject *__pyx_n_s_ua; +static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; +static PyObject *__pyx_n_s_weight; +static PyObject *__pyx_n_s_x1; +static PyObject *__pyx_n_s_x2; +static PyObject *__pyx_n_s_y1; +static PyObject *__pyx_n_s_y2; +static PyObject *__pyx_pf_12soft_nms_cpu_soft_nms_cpu(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes_in, float __pyx_v_iou_thr, unsigned int __pyx_v_method, float __pyx_v_sigma, float __pyx_v_min_score); /* proto */ +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ +static PyObject *__pyx_int_0; +static PyObject *__pyx_int_1; +static PyObject *__pyx_int_2; +static PyObject *__pyx_int_3; +static PyObject *__pyx_int_4; +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_codeobj__11; +/* Late includes */ + +/* "soft_nms_cpu.pyx":15 + * + * + * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< + * return a if a >= b else b + * + */ + +static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_max(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) { + __pyx_t_5numpy_float32_t __pyx_r; + __Pyx_RefNannyDeclarations + __pyx_t_5numpy_float32_t __pyx_t_1; + __Pyx_RefNannySetupContext("max", 0); + + /* "soft_nms_cpu.pyx":16 + * + * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): + * return a if a >= b else b # <<<<<<<<<<<<<< + * + * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): + */ + if (((__pyx_v_a >= __pyx_v_b) != 0)) { + __pyx_t_1 = __pyx_v_a; + } else { + __pyx_t_1 = __pyx_v_b; + } + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "soft_nms_cpu.pyx":15 + * + * + * cdef inline np.float32_t max(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< + * return a if a >= b else b + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "soft_nms_cpu.pyx":18 + * return a if a >= b else b + * + * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< + * return a if a <= b else b + * + */ + +static CYTHON_INLINE __pyx_t_5numpy_float32_t __pyx_f_12soft_nms_cpu_min(__pyx_t_5numpy_float32_t __pyx_v_a, __pyx_t_5numpy_float32_t __pyx_v_b) { + __pyx_t_5numpy_float32_t __pyx_r; + __Pyx_RefNannyDeclarations + __pyx_t_5numpy_float32_t __pyx_t_1; + __Pyx_RefNannySetupContext("min", 0); + + /* "soft_nms_cpu.pyx":19 + * + * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): + * return a if a <= b else b # <<<<<<<<<<<<<< + * + * + */ + if (((__pyx_v_a <= __pyx_v_b) != 0)) { + __pyx_t_1 = __pyx_v_a; + } else { + __pyx_t_1 = __pyx_v_b; + } + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "soft_nms_cpu.pyx":18 + * return a if a >= b else b + * + * cdef inline np.float32_t min(np.float32_t a, np.float32_t b): # <<<<<<<<<<<<<< + * return a if a <= b else b + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "soft_nms_cpu.pyx":22 + * + * + * def soft_nms_cpu( # <<<<<<<<<<<<<< + * np.ndarray[float, ndim=2] boxes_in, + * float iou_thr, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_12soft_nms_cpu_1soft_nms_cpu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_12soft_nms_cpu_1soft_nms_cpu = {"soft_nms_cpu", (PyCFunction)__pyx_pw_12soft_nms_cpu_1soft_nms_cpu, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_12soft_nms_cpu_1soft_nms_cpu(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyArrayObject *__pyx_v_boxes_in = 0; + float __pyx_v_iou_thr; + unsigned int __pyx_v_method; + float __pyx_v_sigma; + float __pyx_v_min_score; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("soft_nms_cpu (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_boxes_in,&__pyx_n_s_iou_thr,&__pyx_n_s_method,&__pyx_n_s_sigma,&__pyx_n_s_min_score,0}; + PyObject* values[5] = {0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_boxes_in)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_iou_thr)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("soft_nms_cpu", 0, 2, 5, 1); __PYX_ERR(0, 22, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_method); + if (value) { values[2] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sigma); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_min_score); + if (value) { values[4] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "soft_nms_cpu") < 0)) __PYX_ERR(0, 22, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_boxes_in = ((PyArrayObject *)values[0]); + __pyx_v_iou_thr = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_iou_thr == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 24, __pyx_L3_error) + if (values[2]) { + __pyx_v_method = __Pyx_PyInt_As_unsigned_int(values[2]); if (unlikely((__pyx_v_method == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25, __pyx_L3_error) + } else { + __pyx_v_method = ((unsigned int)1); + } + if (values[3]) { + __pyx_v_sigma = __pyx_PyFloat_AsFloat(values[3]); if (unlikely((__pyx_v_sigma == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 26, __pyx_L3_error) + } else { + __pyx_v_sigma = ((float)0.5); + } + if (values[4]) { + __pyx_v_min_score = __pyx_PyFloat_AsFloat(values[4]); if (unlikely((__pyx_v_min_score == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 27, __pyx_L3_error) + } else { + __pyx_v_min_score = ((float)0.001); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("soft_nms_cpu", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 22, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("soft_nms_cpu.soft_nms_cpu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_boxes_in), __pyx_ptype_5numpy_ndarray, 1, "boxes_in", 0))) __PYX_ERR(0, 23, __pyx_L1_error) + __pyx_r = __pyx_pf_12soft_nms_cpu_soft_nms_cpu(__pyx_self, __pyx_v_boxes_in, __pyx_v_iou_thr, __pyx_v_method, __pyx_v_sigma, __pyx_v_min_score); + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_12soft_nms_cpu_soft_nms_cpu(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_boxes_in, float __pyx_v_iou_thr, unsigned int __pyx_v_method, float __pyx_v_sigma, float __pyx_v_min_score) { + PyObject *__pyx_v_boxes = NULL; + unsigned int __pyx_v_N; + float __pyx_v_iw; + float __pyx_v_ih; + float __pyx_v_ua; + int __pyx_v_pos; + float __pyx_v_maxscore; + int __pyx_v_maxpos; + float __pyx_v_x1; + float __pyx_v_x2; + float __pyx_v_y1; + float __pyx_v_y2; + float __pyx_v_tx1; + float __pyx_v_tx2; + float __pyx_v_ty1; + float __pyx_v_ty2; + float __pyx_v_ts; + float __pyx_v_area; + float __pyx_v_weight; + float __pyx_v_ov; + PyObject *__pyx_v_inds = NULL; + PyObject *__pyx_v_i = NULL; + PyObject *__pyx_v_ti = NULL; + CYTHON_UNUSED PyObject *__pyx_v_s = NULL; + __Pyx_LocalBuf_ND __pyx_pybuffernd_boxes_in; + __Pyx_Buffer __pyx_pybuffer_boxes_in; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + unsigned int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + Py_ssize_t __pyx_t_7; + PyObject *(*__pyx_t_8)(PyObject *); + float __pyx_t_9; + int __pyx_t_10; + int __pyx_t_11; + PyObject *__pyx_t_12 = NULL; + long __pyx_t_13; + __Pyx_RefNannySetupContext("soft_nms_cpu", 0); + __pyx_pybuffer_boxes_in.pybuffer.buf = NULL; + __pyx_pybuffer_boxes_in.refcount = 0; + __pyx_pybuffernd_boxes_in.data = NULL; + __pyx_pybuffernd_boxes_in.rcbuffer = &__pyx_pybuffer_boxes_in; + { + __Pyx_BufFmt_StackElem __pyx_stack[1]; + if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer, (PyObject*)__pyx_v_boxes_in, &__Pyx_TypeInfo_float, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 22, __pyx_L1_error) + } + __pyx_pybuffernd_boxes_in.diminfo[0].strides = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_boxes_in.diminfo[0].shape = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_boxes_in.diminfo[1].strides = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_boxes_in.diminfo[1].shape = __pyx_pybuffernd_boxes_in.rcbuffer->pybuffer.shape[1]; + + /* "soft_nms_cpu.pyx":29 + * float min_score=0.001, + * ): + * boxes = boxes_in.copy() # <<<<<<<<<<<<<< + * cdef unsigned int N = boxes.shape[0] + * cdef float iw, ih, box_area + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_boxes_in), __pyx_n_s_copy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + if (__pyx_t_3) { + __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else { + __pyx_t_1 = __Pyx_PyObject_CallNoArg(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 29, __pyx_L1_error) + } + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_boxes = __pyx_t_1; + __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":30 + * ): + * boxes = boxes_in.copy() + * cdef unsigned int N = boxes.shape[0] # <<<<<<<<<<<<<< + * cdef float iw, ih, box_area + * cdef float ua + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_boxes, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetItemInt(__pyx_t_1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_4 = __Pyx_PyInt_As_unsigned_int(__pyx_t_2); if (unlikely((__pyx_t_4 == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_N = __pyx_t_4; + + /* "soft_nms_cpu.pyx":33 + * cdef float iw, ih, box_area + * cdef float ua + * cdef int pos = 0 # <<<<<<<<<<<<<< + * cdef float maxscore = 0 + * cdef int maxpos = 0 + */ + __pyx_v_pos = 0; + + /* "soft_nms_cpu.pyx":34 + * cdef float ua + * cdef int pos = 0 + * cdef float maxscore = 0 # <<<<<<<<<<<<<< + * cdef int maxpos = 0 + * cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov + */ + __pyx_v_maxscore = 0.0; + + /* "soft_nms_cpu.pyx":35 + * cdef int pos = 0 + * cdef float maxscore = 0 + * cdef int maxpos = 0 # <<<<<<<<<<<<<< + * cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov + * inds = np.arange(N) + */ + __pyx_v_maxpos = 0; + + /* "soft_nms_cpu.pyx":37 + * cdef int maxpos = 0 + * cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov + * inds = np.arange(N) # <<<<<<<<<<<<<< + * + * for i in range(N): + */ + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_arange); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + if (!__pyx_t_5) { + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_GOTREF(__pyx_t_2); + } else { + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_3)) { + PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { + PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + { + __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_1); + __pyx_t_1 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_inds = __pyx_t_2; + __pyx_t_2 = 0; + + /* "soft_nms_cpu.pyx":39 + * inds = np.arange(N) + * + * for i in range(N): # <<<<<<<<<<<<<< + * maxscore = boxes[i, 4] + * maxpos = i + */ + __pyx_t_2 = __Pyx_PyInt_From_unsigned_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (likely(PyList_CheckExact(__pyx_t_3)) || PyTuple_CheckExact(__pyx_t_3)) { + __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_7 = 0; + __pyx_t_8 = NULL; + } else { + __pyx_t_7 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_8 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 39, __pyx_L1_error) + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + for (;;) { + if (likely(!__pyx_t_8)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 39, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + } else { + if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(0, 39, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_2, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + } + } else { + __pyx_t_3 = __pyx_t_8(__pyx_t_2); + if (unlikely(!__pyx_t_3)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(0, 39, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_3); + } + __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3); + __pyx_t_3 = 0; + + /* "soft_nms_cpu.pyx":40 + * + * for i in range(N): + * maxscore = boxes[i, 4] # <<<<<<<<<<<<<< + * maxpos = i + * + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4); + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 40, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 40, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_v_maxscore = __pyx_t_9; + + /* "soft_nms_cpu.pyx":41 + * for i in range(N): + * maxscore = boxes[i, 4] + * maxpos = i # <<<<<<<<<<<<<< + * + * tx1 = boxes[i, 0] + */ + __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_v_i); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 41, __pyx_L1_error) + __pyx_v_maxpos = __pyx_t_10; + + /* "soft_nms_cpu.pyx":43 + * maxpos = i + * + * tx1 = boxes[i, 0] # <<<<<<<<<<<<<< + * ty1 = boxes[i, 1] + * tx2 = boxes[i, 2] + */ + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_0); + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_tx1 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":44 + * + * tx1 = boxes[i, 0] + * ty1 = boxes[i, 1] # <<<<<<<<<<<<<< + * tx2 = boxes[i, 2] + * ty2 = boxes[i, 3] + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 44, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1); + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 44, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 44, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_v_ty1 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":45 + * tx1 = boxes[i, 0] + * ty1 = boxes[i, 1] + * tx2 = boxes[i, 2] # <<<<<<<<<<<<<< + * ty2 = boxes[i, 3] + * ts = boxes[i, 4] + */ + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 45, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_2); + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 45, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_tx2 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":46 + * ty1 = boxes[i, 1] + * tx2 = boxes[i, 2] + * ty2 = boxes[i, 3] # <<<<<<<<<<<<<< + * ts = boxes[i, 4] + * ti = inds[i] + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3); + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_6); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_v_ty2 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":47 + * tx2 = boxes[i, 2] + * ty2 = boxes[i, 3] + * ts = boxes[i, 4] # <<<<<<<<<<<<<< + * ti = inds[i] + * + */ + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 47, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4); + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 47, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 47, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_ts = __pyx_t_9; + + /* "soft_nms_cpu.pyx":48 + * ty2 = boxes[i, 3] + * ts = boxes[i, 4] + * ti = inds[i] # <<<<<<<<<<<<<< + * + * pos = i + 1 + */ + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_inds, __pyx_v_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 48, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_XDECREF_SET(__pyx_v_ti, __pyx_t_3); + __pyx_t_3 = 0; + + /* "soft_nms_cpu.pyx":50 + * ti = inds[i] + * + * pos = i + 1 # <<<<<<<<<<<<<< + * # get max box + * while pos < N: + */ + __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_pos = __pyx_t_10; + + /* "soft_nms_cpu.pyx":52 + * pos = i + 1 + * # get max box + * while pos < N: # <<<<<<<<<<<<<< + * if maxscore < boxes[pos, 4]: + * maxscore = boxes[pos, 4] + */ + while (1) { + __pyx_t_11 = ((__pyx_v_pos < __pyx_v_N) != 0); + if (!__pyx_t_11) break; + + /* "soft_nms_cpu.pyx":53 + * # get max box + * while pos < N: + * if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<< + * maxscore = boxes[pos, 4] + * maxpos = pos + */ + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_maxscore); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyObject_RichCompare(__pyx_t_3, __pyx_t_6, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 53, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__pyx_t_11) { + + /* "soft_nms_cpu.pyx":54 + * while pos < N: + * if maxscore < boxes[pos, 4]: + * maxscore = boxes[pos, 4] # <<<<<<<<<<<<<< + * maxpos = pos + * pos = pos + 1 + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 54, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 54, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_v_maxscore = __pyx_t_9; + + /* "soft_nms_cpu.pyx":55 + * if maxscore < boxes[pos, 4]: + * maxscore = boxes[pos, 4] + * maxpos = pos # <<<<<<<<<<<<<< + * pos = pos + 1 + * + */ + __pyx_v_maxpos = __pyx_v_pos; + + /* "soft_nms_cpu.pyx":53 + * # get max box + * while pos < N: + * if maxscore < boxes[pos, 4]: # <<<<<<<<<<<<<< + * maxscore = boxes[pos, 4] + * maxpos = pos + */ + } + + /* "soft_nms_cpu.pyx":56 + * maxscore = boxes[pos, 4] + * maxpos = pos + * pos = pos + 1 # <<<<<<<<<<<<<< + * + * # add max box as a detection + */ + __pyx_v_pos = (__pyx_v_pos + 1); + } + + /* "soft_nms_cpu.pyx":59 + * + * # add max box as a detection + * boxes[i, 0] = boxes[maxpos, 0] # <<<<<<<<<<<<<< + * boxes[i, 1] = boxes[maxpos, 1] + * boxes[i, 2] = boxes[maxpos, 2] + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 59, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_0); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 59, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_0); + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 59, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":60 + * # add max box as a detection + * boxes[i, 0] = boxes[maxpos, 0] + * boxes[i, 1] = boxes[maxpos, 1] # <<<<<<<<<<<<<< + * boxes[i, 2] = boxes[maxpos, 2] + * boxes[i, 3] = boxes[maxpos, 3] + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_1); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_1); + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":61 + * boxes[i, 0] = boxes[maxpos, 0] + * boxes[i, 1] = boxes[maxpos, 1] + * boxes[i, 2] = boxes[maxpos, 2] # <<<<<<<<<<<<<< + * boxes[i, 3] = boxes[maxpos, 3] + * boxes[i, 4] = boxes[maxpos, 4] + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_2); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_2); + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":62 + * boxes[i, 1] = boxes[maxpos, 1] + * boxes[i, 2] = boxes[maxpos, 2] + * boxes[i, 3] = boxes[maxpos, 3] # <<<<<<<<<<<<<< + * boxes[i, 4] = boxes[maxpos, 4] + * inds[i] = inds[maxpos] + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_3); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_3); + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":63 + * boxes[i, 2] = boxes[maxpos, 2] + * boxes[i, 3] = boxes[maxpos, 3] + * boxes[i, 4] = boxes[maxpos, 4] # <<<<<<<<<<<<<< + * inds[i] = inds[maxpos] + * + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_4); + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 63, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":64 + * boxes[i, 3] = boxes[maxpos, 3] + * boxes[i, 4] = boxes[maxpos, 4] + * inds[i] = inds[maxpos] # <<<<<<<<<<<<<< + * + * # swap ith box with position of max box + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_inds, __pyx_v_maxpos, int, 1, __Pyx_PyInt_From_int, 0, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (unlikely(PyObject_SetItem(__pyx_v_inds, __pyx_v_i, __pyx_t_1) < 0)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":67 + * + * # swap ith box with position of max box + * boxes[maxpos, 0] = tx1 # <<<<<<<<<<<<<< + * boxes[maxpos, 1] = ty1 + * boxes[maxpos, 2] = tx2 + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_tx1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); + __pyx_t_6 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 67, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":68 + * # swap ith box with position of max box + * boxes[maxpos, 0] = tx1 + * boxes[maxpos, 1] = ty1 # <<<<<<<<<<<<<< + * boxes[maxpos, 2] = tx2 + * boxes[maxpos, 3] = ty2 + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_ty1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_1); + __pyx_t_3 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 68, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":69 + * boxes[maxpos, 0] = tx1 + * boxes[maxpos, 1] = ty1 + * boxes[maxpos, 2] = tx2 # <<<<<<<<<<<<<< + * boxes[maxpos, 3] = ty2 + * boxes[maxpos, 4] = ts + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_tx2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_2); + __pyx_t_6 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 69, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":70 + * boxes[maxpos, 1] = ty1 + * boxes[maxpos, 2] = tx2 + * boxes[maxpos, 3] = ty2 # <<<<<<<<<<<<<< + * boxes[maxpos, 4] = ts + * inds[maxpos] = ti + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_ty2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_int_3); + __pyx_t_3 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_6, __pyx_t_1) < 0)) __PYX_ERR(0, 70, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":71 + * boxes[maxpos, 2] = tx2 + * boxes[maxpos, 3] = ty2 + * boxes[maxpos, 4] = ts # <<<<<<<<<<<<<< + * inds[maxpos] = ti + * + */ + __pyx_t_1 = PyFloat_FromDouble(__pyx_v_ts); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_maxpos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4); + __pyx_t_6 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_1) < 0)) __PYX_ERR(0, 71, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":72 + * boxes[maxpos, 3] = ty2 + * boxes[maxpos, 4] = ts + * inds[maxpos] = ti # <<<<<<<<<<<<<< + * + * tx1 = boxes[i, 0] + */ + if (unlikely(__Pyx_SetItemInt(__pyx_v_inds, __pyx_v_maxpos, __pyx_v_ti, int, 1, __Pyx_PyInt_From_int, 0, 1, 0) < 0)) __PYX_ERR(0, 72, __pyx_L1_error) + + /* "soft_nms_cpu.pyx":74 + * inds[maxpos] = ti + * + * tx1 = boxes[i, 0] # <<<<<<<<<<<<<< + * ty1 = boxes[i, 1] + * tx2 = boxes[i, 2] + */ + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_0); + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 74, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_tx1 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":75 + * + * tx1 = boxes[i, 0] + * ty1 = boxes[i, 1] # <<<<<<<<<<<<<< + * tx2 = boxes[i, 2] + * ty2 = boxes[i, 3] + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_v_ty1 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":76 + * tx1 = boxes[i, 0] + * ty1 = boxes[i, 1] + * tx2 = boxes[i, 2] # <<<<<<<<<<<<<< + * ty2 = boxes[i, 3] + * ts = boxes[i, 4] + */ + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_2); + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 76, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_tx2 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":77 + * ty1 = boxes[i, 1] + * tx2 = boxes[i, 2] + * ty2 = boxes[i, 3] # <<<<<<<<<<<<<< + * ts = boxes[i, 4] + * + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3); + __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_1); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 77, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_v_ty2 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":78 + * tx2 = boxes[i, 2] + * ty2 = boxes[i, 3] + * ts = boxes[i, 4] # <<<<<<<<<<<<<< + * + * pos = i + 1 + */ + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_i); + __Pyx_GIVEREF(__pyx_v_i); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_i); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4); + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 78, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_ts = __pyx_t_9; + + /* "soft_nms_cpu.pyx":80 + * ts = boxes[i, 4] + * + * pos = i + 1 # <<<<<<<<<<<<<< + * # NMS iterations, note that N changes if detection boxes fall below + * # threshold + */ + __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_v_i, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_10 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_10 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 80, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_pos = __pyx_t_10; + + /* "soft_nms_cpu.pyx":83 + * # NMS iterations, note that N changes if detection boxes fall below + * # threshold + * while pos < N: # <<<<<<<<<<<<<< + * x1 = boxes[pos, 0] + * y1 = boxes[pos, 1] + */ + while (1) { + __pyx_t_11 = ((__pyx_v_pos < __pyx_v_N) != 0); + if (!__pyx_t_11) break; + + /* "soft_nms_cpu.pyx":84 + * # threshold + * while pos < N: + * x1 = boxes[pos, 0] # <<<<<<<<<<<<<< + * y1 = boxes[pos, 1] + * x2 = boxes[pos, 2] + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_0); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 84, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_x1 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":85 + * while pos < N: + * x1 = boxes[pos, 0] + * y1 = boxes[pos, 1] # <<<<<<<<<<<<<< + * x2 = boxes[pos, 2] + * y2 = boxes[pos, 3] + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_1); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 85, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_y1 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":86 + * x1 = boxes[pos, 0] + * y1 = boxes[pos, 1] + * x2 = boxes[pos, 2] # <<<<<<<<<<<<<< + * y2 = boxes[pos, 3] + * s = boxes[pos, 4] + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_2); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 86, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_x2 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":87 + * y1 = boxes[pos, 1] + * x2 = boxes[pos, 2] + * y2 = boxes[pos, 3] # <<<<<<<<<<<<<< + * s = boxes[pos, 4] + * + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_3); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 87, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_y2 = __pyx_t_9; + + /* "soft_nms_cpu.pyx":88 + * x2 = boxes[pos, 2] + * y2 = boxes[pos, 3] + * s = boxes[pos, 4] # <<<<<<<<<<<<<< + * + * area = (x2 - x1 + 1) * (y2 - y1 + 1) + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_4); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF_SET(__pyx_v_s, __pyx_t_3); + __pyx_t_3 = 0; + + /* "soft_nms_cpu.pyx":90 + * s = boxes[pos, 4] + * + * area = (x2 - x1 + 1) * (y2 - y1 + 1) # <<<<<<<<<<<<<< + * iw = (min(tx2, x2) - max(tx1, x1) + 1) + * if iw > 0: + */ + __pyx_v_area = (((__pyx_v_x2 - __pyx_v_x1) + 1.0) * ((__pyx_v_y2 - __pyx_v_y1) + 1.0)); + + /* "soft_nms_cpu.pyx":91 + * + * area = (x2 - x1 + 1) * (y2 - y1 + 1) + * iw = (min(tx2, x2) - max(tx1, x1) + 1) # <<<<<<<<<<<<<< + * if iw > 0: + * ih = (min(ty2, y2) - max(ty1, y1) + 1) + */ + __pyx_v_iw = ((__pyx_f_12soft_nms_cpu_min(__pyx_v_tx2, __pyx_v_x2) - __pyx_f_12soft_nms_cpu_max(__pyx_v_tx1, __pyx_v_x1)) + 1.0); + + /* "soft_nms_cpu.pyx":92 + * area = (x2 - x1 + 1) * (y2 - y1 + 1) + * iw = (min(tx2, x2) - max(tx1, x1) + 1) + * if iw > 0: # <<<<<<<<<<<<<< + * ih = (min(ty2, y2) - max(ty1, y1) + 1) + * if ih > 0: + */ + __pyx_t_11 = ((__pyx_v_iw > 0.0) != 0); + if (__pyx_t_11) { + + /* "soft_nms_cpu.pyx":93 + * iw = (min(tx2, x2) - max(tx1, x1) + 1) + * if iw > 0: + * ih = (min(ty2, y2) - max(ty1, y1) + 1) # <<<<<<<<<<<<<< + * if ih > 0: + * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) + */ + __pyx_v_ih = ((__pyx_f_12soft_nms_cpu_min(__pyx_v_ty2, __pyx_v_y2) - __pyx_f_12soft_nms_cpu_max(__pyx_v_ty1, __pyx_v_y1)) + 1.0); + + /* "soft_nms_cpu.pyx":94 + * if iw > 0: + * ih = (min(ty2, y2) - max(ty1, y1) + 1) + * if ih > 0: # <<<<<<<<<<<<<< + * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) + * ov = iw * ih / ua # iou between max box and detection box + */ + __pyx_t_11 = ((__pyx_v_ih > 0.0) != 0); + if (__pyx_t_11) { + + /* "soft_nms_cpu.pyx":95 + * ih = (min(ty2, y2) - max(ty1, y1) + 1) + * if ih > 0: + * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) # <<<<<<<<<<<<<< + * ov = iw * ih / ua # iou between max box and detection box + * + */ + __pyx_v_ua = ((double)(((((__pyx_v_tx2 - __pyx_v_tx1) + 1.0) * ((__pyx_v_ty2 - __pyx_v_ty1) + 1.0)) + __pyx_v_area) - (__pyx_v_iw * __pyx_v_ih))); + + /* "soft_nms_cpu.pyx":96 + * if ih > 0: + * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) + * ov = iw * ih / ua # iou between max box and detection box # <<<<<<<<<<<<<< + * + * if method == 1: # linear + */ + __pyx_t_9 = (__pyx_v_iw * __pyx_v_ih); + if (unlikely(__pyx_v_ua == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 96, __pyx_L1_error) + } + __pyx_v_ov = (__pyx_t_9 / __pyx_v_ua); + + /* "soft_nms_cpu.pyx":98 + * ov = iw * ih / ua # iou between max box and detection box + * + * if method == 1: # linear # <<<<<<<<<<<<<< + * if ov > iou_thr: + * weight = 1 - ov + */ + switch (__pyx_v_method) { + case 1: + + /* "soft_nms_cpu.pyx":99 + * + * if method == 1: # linear + * if ov > iou_thr: # <<<<<<<<<<<<<< + * weight = 1 - ov + * else: + */ + __pyx_t_11 = ((__pyx_v_ov > __pyx_v_iou_thr) != 0); + if (__pyx_t_11) { + + /* "soft_nms_cpu.pyx":100 + * if method == 1: # linear + * if ov > iou_thr: + * weight = 1 - ov # <<<<<<<<<<<<<< + * else: + * weight = 1 + */ + __pyx_v_weight = (1.0 - __pyx_v_ov); + + /* "soft_nms_cpu.pyx":99 + * + * if method == 1: # linear + * if ov > iou_thr: # <<<<<<<<<<<<<< + * weight = 1 - ov + * else: + */ + goto __pyx_L12; + } + + /* "soft_nms_cpu.pyx":102 + * weight = 1 - ov + * else: + * weight = 1 # <<<<<<<<<<<<<< + * elif method == 2: # gaussian + * weight = np.exp(-(ov * ov) / sigma) + */ + /*else*/ { + __pyx_v_weight = 1.0; + } + __pyx_L12:; + + /* "soft_nms_cpu.pyx":98 + * ov = iw * ih / ua # iou between max box and detection box + * + * if method == 1: # linear # <<<<<<<<<<<<<< + * if ov > iou_thr: + * weight = 1 - ov + */ + break; + + /* "soft_nms_cpu.pyx":103 + * else: + * weight = 1 + * elif method == 2: # gaussian # <<<<<<<<<<<<<< + * weight = np.exp(-(ov * ov) / sigma) + * else: # original NMS + */ + case 2: + + /* "soft_nms_cpu.pyx":104 + * weight = 1 + * elif method == 2: # gaussian + * weight = np.exp(-(ov * ov) / sigma) # <<<<<<<<<<<<<< + * else: # original NMS + * if ov > iou_thr: + */ + __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_exp); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_9 = (-(__pyx_v_ov * __pyx_v_ov)); + if (unlikely(__pyx_v_sigma == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 104, __pyx_L1_error) + } + __pyx_t_1 = PyFloat_FromDouble((__pyx_t_9 / __pyx_v_sigma)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + } + } + if (!__pyx_t_5) { + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_GOTREF(__pyx_t_3); + } else { + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1}; + __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_1}; + __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + { + __pyx_t_12 = PyTuple_New(1+1); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_5); __pyx_t_5 = NULL; + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_12, 0+1, __pyx_t_1); + __pyx_t_1 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + } + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_9 = __pyx_PyFloat_AsFloat(__pyx_t_3); if (unlikely((__pyx_t_9 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 104, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_weight = __pyx_t_9; + + /* "soft_nms_cpu.pyx":103 + * else: + * weight = 1 + * elif method == 2: # gaussian # <<<<<<<<<<<<<< + * weight = np.exp(-(ov * ov) / sigma) + * else: # original NMS + */ + break; + default: + + /* "soft_nms_cpu.pyx":106 + * weight = np.exp(-(ov * ov) / sigma) + * else: # original NMS + * if ov > iou_thr: # <<<<<<<<<<<<<< + * weight = 0 + * else: + */ + __pyx_t_11 = ((__pyx_v_ov > __pyx_v_iou_thr) != 0); + if (__pyx_t_11) { + + /* "soft_nms_cpu.pyx":107 + * else: # original NMS + * if ov > iou_thr: + * weight = 0 # <<<<<<<<<<<<<< + * else: + * weight = 1 + */ + __pyx_v_weight = 0.0; + + /* "soft_nms_cpu.pyx":106 + * weight = np.exp(-(ov * ov) / sigma) + * else: # original NMS + * if ov > iou_thr: # <<<<<<<<<<<<<< + * weight = 0 + * else: + */ + goto __pyx_L13; + } + + /* "soft_nms_cpu.pyx":109 + * weight = 0 + * else: + * weight = 1 # <<<<<<<<<<<<<< + * + * boxes[pos, 4] = weight * boxes[pos, 4] + */ + /*else*/ { + __pyx_v_weight = 1.0; + } + __pyx_L13:; + break; + } + + /* "soft_nms_cpu.pyx":111 + * weight = 1 + * + * boxes[pos, 4] = weight * boxes[pos, 4] # <<<<<<<<<<<<<< + * + * # if box score falls below threshold, discard the box by + */ + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_weight); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_4); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __pyx_t_12 = PyNumber_Multiply(__pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4); + __pyx_t_6 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_12) < 0)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + + /* "soft_nms_cpu.pyx":115 + * # if box score falls below threshold, discard the box by + * # swapping with last box update N + * if boxes[pos, 4] < min_score: # <<<<<<<<<<<<<< + * boxes[pos, 0] = boxes[N-1, 0] + * boxes[pos, 1] = boxes[N-1, 1] + */ + __pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_12); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4); + __pyx_t_12 = 0; + __pyx_t_12 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyFloat_FromDouble(__pyx_v_min_score); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_6 = PyObject_RichCompare(__pyx_t_12, __pyx_t_3, Py_LT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_11 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (__pyx_t_11) { + + /* "soft_nms_cpu.pyx":116 + * # swapping with last box update N + * if boxes[pos, 4] < min_score: + * boxes[pos, 0] = boxes[N-1, 0] # <<<<<<<<<<<<<< + * boxes[pos, 1] = boxes[N-1, 1] + * boxes[pos, 2] = boxes[N-1, 2] + */ + __pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_0); + __pyx_t_3 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_12, __pyx_t_6) < 0)) __PYX_ERR(0, 116, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "soft_nms_cpu.pyx":117 + * if boxes[pos, 4] < min_score: + * boxes[pos, 0] = boxes[N-1, 0] + * boxes[pos, 1] = boxes[N-1, 1] # <<<<<<<<<<<<<< + * boxes[pos, 2] = boxes[N-1, 2] + * boxes[pos, 3] = boxes[N-1, 3] + */ + __pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_1); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_12); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12); + __Pyx_INCREF(__pyx_int_1); + __Pyx_GIVEREF(__pyx_int_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_1); + __pyx_t_12 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_6) < 0)) __PYX_ERR(0, 117, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "soft_nms_cpu.pyx":118 + * boxes[pos, 0] = boxes[N-1, 0] + * boxes[pos, 1] = boxes[N-1, 1] + * boxes[pos, 2] = boxes[N-1, 2] # <<<<<<<<<<<<<< + * boxes[pos, 3] = boxes[N-1, 3] + * boxes[pos, 4] = boxes[N-1, 4] + */ + __pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 118, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_2); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 118, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 118, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 118, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_2); + __pyx_t_3 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_12, __pyx_t_6) < 0)) __PYX_ERR(0, 118, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "soft_nms_cpu.pyx":119 + * boxes[pos, 1] = boxes[N-1, 1] + * boxes[pos, 2] = boxes[N-1, 2] + * boxes[pos, 3] = boxes[N-1, 3] # <<<<<<<<<<<<<< + * boxes[pos, 4] = boxes[N-1, 4] + * inds[pos] = inds[N - 1] + */ + __pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_3); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_12); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __pyx_t_12 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_12); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_12); + __Pyx_INCREF(__pyx_int_3); + __Pyx_GIVEREF(__pyx_int_3); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_3); + __pyx_t_12 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_3, __pyx_t_6) < 0)) __PYX_ERR(0, 119, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "soft_nms_cpu.pyx":120 + * boxes[pos, 2] = boxes[N-1, 2] + * boxes[pos, 3] = boxes[N-1, 3] + * boxes[pos, 4] = boxes[N-1, 4] # <<<<<<<<<<<<<< + * inds[pos] = inds[N - 1] + * N = N - 1 + */ + __pyx_t_6 = __Pyx_PyInt_From_long((__pyx_v_N - 1)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_4); + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_pos); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_3); + __Pyx_INCREF(__pyx_int_4); + __Pyx_GIVEREF(__pyx_int_4); + PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_int_4); + __pyx_t_3 = 0; + if (unlikely(PyObject_SetItem(__pyx_v_boxes, __pyx_t_12, __pyx_t_6) < 0)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "soft_nms_cpu.pyx":121 + * boxes[pos, 3] = boxes[N-1, 3] + * boxes[pos, 4] = boxes[N-1, 4] + * inds[pos] = inds[N - 1] # <<<<<<<<<<<<<< + * N = N - 1 + * pos = pos - 1 + */ + __pyx_t_13 = (__pyx_v_N - 1); + __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_inds, __pyx_t_13, long, 1, __Pyx_PyInt_From_long, 0, 1, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (unlikely(__Pyx_SetItemInt(__pyx_v_inds, __pyx_v_pos, __pyx_t_6, int, 1, __Pyx_PyInt_From_int, 0, 1, 0) < 0)) __PYX_ERR(0, 121, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "soft_nms_cpu.pyx":122 + * boxes[pos, 4] = boxes[N-1, 4] + * inds[pos] = inds[N - 1] + * N = N - 1 # <<<<<<<<<<<<<< + * pos = pos - 1 + * + */ + __pyx_v_N = (__pyx_v_N - 1); + + /* "soft_nms_cpu.pyx":123 + * inds[pos] = inds[N - 1] + * N = N - 1 + * pos = pos - 1 # <<<<<<<<<<<<<< + * + * pos = pos + 1 + */ + __pyx_v_pos = (__pyx_v_pos - 1); + + /* "soft_nms_cpu.pyx":115 + * # if box score falls below threshold, discard the box by + * # swapping with last box update N + * if boxes[pos, 4] < min_score: # <<<<<<<<<<<<<< + * boxes[pos, 0] = boxes[N-1, 0] + * boxes[pos, 1] = boxes[N-1, 1] + */ + } + + /* "soft_nms_cpu.pyx":94 + * if iw > 0: + * ih = (min(ty2, y2) - max(ty1, y1) + 1) + * if ih > 0: # <<<<<<<<<<<<<< + * ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) + * ov = iw * ih / ua # iou between max box and detection box + */ + } + + /* "soft_nms_cpu.pyx":92 + * area = (x2 - x1 + 1) * (y2 - y1 + 1) + * iw = (min(tx2, x2) - max(tx1, x1) + 1) + * if iw > 0: # <<<<<<<<<<<<<< + * ih = (min(ty2, y2) - max(ty1, y1) + 1) + * if ih > 0: + */ + } + + /* "soft_nms_cpu.pyx":125 + * pos = pos - 1 + * + * pos = pos + 1 # <<<<<<<<<<<<<< + * + * return boxes[:N], inds[:N] + */ + __pyx_v_pos = (__pyx_v_pos + 1); + } + + /* "soft_nms_cpu.pyx":39 + * inds = np.arange(N) + * + * for i in range(N): # <<<<<<<<<<<<<< + * maxscore = boxes[i, 4] + * maxpos = i + */ + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "soft_nms_cpu.pyx":127 + * pos = pos + 1 + * + * return boxes[:N], inds[:N] # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_v_boxes, 0, __pyx_v_N, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_6 = __Pyx_PyObject_GetSlice(__pyx_v_inds, 0, __pyx_v_N, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_12 = PyTuple_New(2); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 127, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_12); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_6); + __pyx_t_2 = 0; + __pyx_t_6 = 0; + __pyx_r = __pyx_t_12; + __pyx_t_12 = 0; + goto __pyx_L0; + + /* "soft_nms_cpu.pyx":22 + * + * + * def soft_nms_cpu( # <<<<<<<<<<<<<< + * np.ndarray[float, ndim=2] boxes_in, + * float iou_thr, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_12); + { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer); + __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} + __Pyx_AddTraceback("soft_nms_cpu.soft_nms_cpu", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + goto __pyx_L2; + __pyx_L0:; + __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_boxes_in.rcbuffer->pybuffer); + __pyx_L2:; + __Pyx_XDECREF(__pyx_v_boxes); + __Pyx_XDECREF(__pyx_v_inds); + __Pyx_XDECREF(__pyx_v_i); + __Pyx_XDECREF(__pyx_v_ti); + __Pyx_XDECREF(__pyx_v_s); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":215 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fulfill the PEP. + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_i; + int __pyx_v_ndim; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + int __pyx_v_t; + char *__pyx_v_f; + PyArray_Descr *__pyx_v_descr = 0; + int __pyx_v_offset; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + char *__pyx_t_8; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":222 + * + * cdef int i, ndim + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":223 + * cdef int i, ndim + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * + * ndim = PyArray_NDIM(self) + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":225 + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * + * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + */ + __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":227 + * ndim = PyArray_NDIM(self) + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":228 + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not C contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":227 + * ndim = PyArray_NDIM(self) + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + if (unlikely(__pyx_t_1)) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 229, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 229, __pyx_L1_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":227 + * ndim = PyArray_NDIM(self) + * + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L7_bool_binop_done; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":232 + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< + * raise ValueError(u"ndarray is not Fortran contiguous") + * + */ + __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L7_bool_binop_done:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + if (unlikely(__pyx_t_1)) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 233, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 233, __pyx_L1_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":231 + * raise ValueError(u"ndarray is not C contiguous") + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":235 + * raise ValueError(u"ndarray is not Fortran contiguous") + * + * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< + * info.ndim = ndim + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":236 + * + * info.buf = PyArray_DATA(self) + * info.ndim = ndim # <<<<<<<<<<<<<< + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * # Allocate new buffer for strides and shape info. + */ + __pyx_v_info->ndim = __pyx_v_ndim; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":240 + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) # <<<<<<<<<<<<<< + * info.shape = info.strides + ndim + * for i in range(ndim): + */ + __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":241 + * # This is allocated as one block, strides first. + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim # <<<<<<<<<<<<<< + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + */ + __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":242 + * info.strides = PyObject_Malloc(sizeof(Py_ssize_t) * 2 * ndim) + * info.shape = info.strides + ndim + * for i in range(ndim): # <<<<<<<<<<<<<< + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] + */ + __pyx_t_4 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":243 + * info.shape = info.strides + ndim + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + */ + (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":244 + * for i in range(ndim): + * info.strides[i] = PyArray_STRIDES(self)[i] + * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< + * else: + * info.strides = PyArray_STRIDES(self) + */ + (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":237 + * info.buf = PyArray_DATA(self) + * info.ndim = ndim + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * # Allocate new buffer for strides and shape info. + * # This is allocated as one block, strides first. + */ + goto __pyx_L9; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":246 + * info.shape[i] = PyArray_DIMS(self)[i] + * else: + * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<< + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + */ + /*else*/ { + __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":247 + * else: + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + */ + __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); + } + __pyx_L9:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":248 + * info.strides = PyArray_STRIDES(self) + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) + */ + __pyx_v_info->suboffsets = NULL; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":249 + * info.shape = PyArray_DIMS(self) + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< + * info.readonly = not PyArray_ISWRITEABLE(self) + * + */ + __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":250 + * info.suboffsets = NULL + * info.itemsize = PyArray_ITEMSIZE(self) + * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< + * + * cdef int t + */ + __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":253 + * + * cdef int t + * cdef char* f = NULL # <<<<<<<<<<<<<< + * cdef dtype descr = self.descr + * cdef int offset + */ + __pyx_v_f = NULL; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":254 + * cdef int t + * cdef char* f = NULL + * cdef dtype descr = self.descr # <<<<<<<<<<<<<< + * cdef int offset + * + */ + __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":257 + * cdef int offset + * + * info.obj = self # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(descr): + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 + * info.obj = self + * + * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + __pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0); + if (__pyx_t_1) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":260 + * + * if not PyDataType_HASFIELDS(descr): + * t = descr.type_num # <<<<<<<<<<<<<< + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + */ + __pyx_t_4 = __pyx_v_descr->type_num; + __pyx_v_t = __pyx_t_4; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":261 + * if not PyDataType_HASFIELDS(descr): + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); + if (!__pyx_t_2) { + goto __pyx_L15_next_or; + } else { + } + __pyx_t_2 = (__pyx_v_little_endian != 0); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L14_bool_binop_done; + } + __pyx_L15_next_or:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":262 + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + */ + __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L14_bool_binop_done; + } + __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L14_bool_binop_done:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":261 + * if not PyDataType_HASFIELDS(descr): + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (unlikely(__pyx_t_1)) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 263, __pyx_L1_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":261 + * if not PyDataType_HASFIELDS(descr): + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":264 + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + */ + switch (__pyx_v_t) { + case NPY_BYTE: + __pyx_v_f = ((char *)"b"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":265 + * raise ValueError(u"Non-native byte order not supported") + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + */ + case NPY_UBYTE: + __pyx_v_f = ((char *)"B"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":266 + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + */ + case NPY_SHORT: + __pyx_v_f = ((char *)"h"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":267 + * elif t == NPY_UBYTE: f = "B" + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + */ + case NPY_USHORT: + __pyx_v_f = ((char *)"H"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":268 + * elif t == NPY_SHORT: f = "h" + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + */ + case NPY_INT: + __pyx_v_f = ((char *)"i"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":269 + * elif t == NPY_USHORT: f = "H" + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + */ + case NPY_UINT: + __pyx_v_f = ((char *)"I"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":270 + * elif t == NPY_INT: f = "i" + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + */ + case NPY_LONG: + __pyx_v_f = ((char *)"l"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":271 + * elif t == NPY_UINT: f = "I" + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + */ + case NPY_ULONG: + __pyx_v_f = ((char *)"L"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":272 + * elif t == NPY_LONG: f = "l" + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + */ + case NPY_LONGLONG: + __pyx_v_f = ((char *)"q"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":273 + * elif t == NPY_ULONG: f = "L" + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + */ + case NPY_ULONGLONG: + __pyx_v_f = ((char *)"Q"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":274 + * elif t == NPY_LONGLONG: f = "q" + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + */ + case NPY_FLOAT: + __pyx_v_f = ((char *)"f"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":275 + * elif t == NPY_ULONGLONG: f = "Q" + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + */ + case NPY_DOUBLE: + __pyx_v_f = ((char *)"d"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":276 + * elif t == NPY_FLOAT: f = "f" + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + */ + case NPY_LONGDOUBLE: + __pyx_v_f = ((char *)"g"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":277 + * elif t == NPY_DOUBLE: f = "d" + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + */ + case NPY_CFLOAT: + __pyx_v_f = ((char *)"Zf"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":278 + * elif t == NPY_LONGDOUBLE: f = "g" + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" + */ + case NPY_CDOUBLE: + __pyx_v_f = ((char *)"Zd"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":279 + * elif t == NPY_CFLOAT: f = "Zf" + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f = "O" + * else: + */ + case NPY_CLONGDOUBLE: + __pyx_v_f = ((char *)"Zg"); + break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":280 + * elif t == NPY_CDOUBLE: f = "Zd" + * elif t == NPY_CLONGDOUBLE: f = "Zg" + * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + case NPY_OBJECT: + __pyx_v_f = ((char *)"O"); + break; + default: + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":282 + * elif t == NPY_OBJECT: f = "O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * info.format = f + * return + */ + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 282, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_7 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 282, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 282, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 282, __pyx_L1_error) + break; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":283 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f # <<<<<<<<<<<<<< + * return + * else: + */ + __pyx_v_info->format = __pyx_v_f; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":284 + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * info.format = f + * return # <<<<<<<<<<<<<< + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":259 + * info.obj = self + * + * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< + * t = descr.type_num + * if ((descr.byteorder == c'>' and little_endian) or + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":286 + * return + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + */ + /*else*/ { + __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":287 + * else: + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, + */ + (__pyx_v_info->format[0]) = '^'; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":288 + * info.format = PyObject_Malloc(_buffer_format_string_len) + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 # <<<<<<<<<<<<<< + * f = _util_dtypestring(descr, info.format + 1, + * info.format + _buffer_format_string_len, + */ + __pyx_v_offset = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":289 + * info.format[0] = c'^' # Native data types, manual alignment + * offset = 0 + * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< + * info.format + _buffer_format_string_len, + * &offset) + */ + __pyx_t_8 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_8 == ((char *)NULL))) __PYX_ERR(1, 289, __pyx_L1_error) + __pyx_v_f = __pyx_t_8; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":292 + * info.format + _buffer_format_string_len, + * &offset) + * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + */ + (__pyx_v_f[0]) = '\x00'; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":215 + * # experimental exception made for __getbuffer__ and __releasebuffer__ + * # -- the details of this may change. + * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< + * # This implementation of getbuffer is geared towards Cython + * # requirements, and does not yet fulfill the PEP. + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_XDECREF((PyObject *)__pyx_v_descr); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":294 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + +/* Python wrapper */ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ +static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); + __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__releasebuffer__", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); + if (__pyx_t_1) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":296 + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) # <<<<<<<<<<<<<< + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) + */ + PyObject_Free(__pyx_v_info->format); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":295 + * + * def __releasebuffer__(ndarray self, Py_buffer* info): + * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); + if (__pyx_t_1) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":298 + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): + * PyObject_Free(info.strides) # <<<<<<<<<<<<<< + * # info.shape was stored after info.strides in the same block + * + */ + PyObject_Free(__pyx_v_info->strides); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":297 + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< + * PyObject_Free(info.strides) + * # info.shape was stored after info.strides in the same block + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":294 + * f[0] = c'\0' # Terminate format string + * + * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< + * if PyArray_HASFIELDS(self): + * PyObject_Free(info.format) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":776 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 776, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":775 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":779 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 779, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":778 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":781 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":782 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 782, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":781 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":785 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 785, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":784 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":787 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":788 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 788, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":787 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); + if (__pyx_t_1) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":792 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":791 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":794 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":790 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":796 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { + PyArray_Descr *__pyx_v_child = 0; + int __pyx_v_endian_detector; + int __pyx_v_little_endian; + PyObject *__pyx_v_fields = 0; + PyObject *__pyx_v_childname = NULL; + PyObject *__pyx_v_new_offset = NULL; + PyObject *__pyx_v_t = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + long __pyx_t_8; + char *__pyx_t_9; + __Pyx_RefNannySetupContext("_util_dtypestring", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":801 + * + * cdef dtype child + * cdef int endian_detector = 1 # <<<<<<<<<<<<<< + * cdef bint little_endian = ((&endian_detector)[0] != 0) + * cdef tuple fields + */ + __pyx_v_endian_detector = 1; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":802 + * cdef dtype child + * cdef int endian_detector = 1 + * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<< + * cdef tuple fields + * + */ + __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + if (unlikely(__pyx_v_descr->names == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 805, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; + for (;;) { + if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 805, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 805, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); + __pyx_t_3 = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":806 + * + * for childname in descr.names: + * fields = descr.fields[childname] # <<<<<<<<<<<<<< + * child, new_offset = fields + * + */ + if (unlikely(__pyx_v_descr->fields == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 806, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 806, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":807 + * for childname in descr.names: + * fields = descr.fields[childname] + * child, new_offset = fields # <<<<<<<<<<<<<< + * + * if (end - f) - (new_offset - offset[0]) < 15: + */ + if (likely(__pyx_v_fields != Py_None)) { + PyObject* sequence = __pyx_v_fields; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 807, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 807, __pyx_L1_error) + } + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 807, __pyx_L1_error) + __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); + __pyx_t_3 = 0; + __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 809, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); + if (unlikely(__pyx_t_6)) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":810 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 810, __pyx_L1_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":809 + * child, new_offset = fields + * + * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); + if (!__pyx_t_7) { + goto __pyx_L8_next_or; + } else { + } + __pyx_t_7 = (__pyx_v_little_endian != 0); + if (!__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_L8_next_or:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":813 + * + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< + * raise ValueError(u"Non-native byte order not supported") + * # One could encode it in the format string and have Cython + */ + __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); + if (__pyx_t_7) { + } else { + __pyx_t_6 = __pyx_t_7; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); + __pyx_t_6 = __pyx_t_7; + __pyx_L7_bool_binop_done:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + if (unlikely(__pyx_t_6)) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 814, __pyx_L1_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":812 + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") + * + * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":824 + * + * # Output padding bytes + * while offset[0] < new_offset: # <<<<<<<<<<<<<< + * f[0] = 120 # "x"; pad byte + * f += 1 + */ + while (1) { + __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 824, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 824, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 824, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (!__pyx_t_6) break; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":825 + * # Output padding bytes + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< + * f += 1 + * offset[0] += 1 + */ + (__pyx_v_f[0]) = 0x78; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":826 + * while offset[0] < new_offset: + * f[0] = 120 # "x"; pad byte + * f += 1 # <<<<<<<<<<<<<< + * offset[0] += 1 + * + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":827 + * f[0] = 120 # "x"; pad byte + * f += 1 + * offset[0] += 1 # <<<<<<<<<<<<<< + * + * offset[0] += child.itemsize + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":829 + * offset[0] += 1 + * + * offset[0] += child.itemsize # <<<<<<<<<<<<<< + * + * if not PyDataType_HASFIELDS(child): + */ + __pyx_t_8 = 0; + (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":831 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); + if (__pyx_t_6) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":832 + * + * if not PyDataType_HASFIELDS(child): + * t = child.type_num # <<<<<<<<<<<<<< + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") + */ + __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); + __pyx_t_4 = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); + if (unlikely(__pyx_t_6)) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":834 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 834, __pyx_L1_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":833 + * if not PyDataType_HASFIELDS(child): + * t = child.type_num + * if end - f < 5: # <<<<<<<<<<<<<< + * raise RuntimeError(u"Format string allocated too short.") + * + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":837 + * + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 98; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":838 + * # Until ticket #99 is fixed, use integers to avoid warnings + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 66; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":839 + * if t == NPY_BYTE: f[0] = 98 #"b" + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x68; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":840 + * elif t == NPY_UBYTE: f[0] = 66 #"B" + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 72; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":841 + * elif t == NPY_SHORT: f[0] = 104 #"h" + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x69; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":842 + * elif t == NPY_USHORT: f[0] = 72 #"H" + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 73; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":843 + * elif t == NPY_INT: f[0] = 105 #"i" + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 843, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 843, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 843, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x6C; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":844 + * elif t == NPY_UINT: f[0] = 73 #"I" + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 844, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 76; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":845 + * elif t == NPY_LONG: f[0] = 108 #"l" + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 845, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x71; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":846 + * elif t == NPY_ULONG: f[0] = 76 #"L" + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 846, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 846, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 846, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 81; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":847 + * elif t == NPY_LONGLONG: f[0] = 113 #"q" + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 847, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x66; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":848 + * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 848, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 848, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 848, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x64; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":849 + * elif t == NPY_FLOAT: f[0] = 102 #"f" + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 849, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 849, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 849, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 0x67; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":850 + * elif t == NPY_DOUBLE: f[0] = 100 #"d" + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 850, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x66; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":851 + * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x64; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":852 + * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + */ + __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 852, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + (__pyx_v_f[0]) = 90; + (__pyx_v_f[1]) = 0x67; + __pyx_v_f = (__pyx_v_f + 1); + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":853 + * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd + * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg + * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + */ + __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 853, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (likely(__pyx_t_6)) { + (__pyx_v_f[0]) = 79; + goto __pyx_L15; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":855 + * elif t == NPY_OBJECT: f[0] = 79 #"O" + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< + * f += 1 + * else: + */ + /*else*/ { + __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 855, __pyx_L1_error) + } + __pyx_L15:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":856 + * else: + * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) + * f += 1 # <<<<<<<<<<<<<< + * else: + * # Cython ignores struct boundary information ("T{...}"), + */ + __pyx_v_f = (__pyx_v_f + 1); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":831 + * offset[0] += child.itemsize + * + * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< + * t = child.type_num + * if end - f < 5: + */ + goto __pyx_L13; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":860 + * # Cython ignores struct boundary information ("T{...}"), + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< + * return f + * + */ + /*else*/ { + __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 860, __pyx_L1_error) + __pyx_v_f = __pyx_t_9; + } + __pyx_L13:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":805 + * cdef tuple fields + * + * for childname in descr.names: # <<<<<<<<<<<<<< + * fields = descr.fields[childname] + * child, new_offset = fields + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":861 + * # so don't output it + * f = _util_dtypestring(child, f, end, offset) + * return f # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_f; + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":796 + * return () + * + * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< + * # Recursive utility function used in __getbuffer__ to get format + * # string. The new location in the format string is returned. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_child); + __Pyx_XDECREF(__pyx_v_fields); + __Pyx_XDECREF(__pyx_v_childname); + __Pyx_XDECREF(__pyx_v_new_offset); + __Pyx_XDECREF(__pyx_v_t); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":977 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + PyObject *__pyx_v_baseptr; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":979 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + __pyx_t_1 = (__pyx_v_base == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":980 + * cdef PyObject* baseptr + * if base is None: + * baseptr = NULL # <<<<<<<<<<<<<< + * else: + * Py_INCREF(base) # important to do this before decref below! + */ + __pyx_v_baseptr = NULL; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":979 + * cdef inline void set_array_base(ndarray arr, object base): + * cdef PyObject* baseptr + * if base is None: # <<<<<<<<<<<<<< + * baseptr = NULL + * else: + */ + goto __pyx_L3; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":982 + * baseptr = NULL + * else: + * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< + * baseptr = base + * Py_XDECREF(arr.base) + */ + /*else*/ { + Py_INCREF(__pyx_v_base); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":983 + * else: + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base # <<<<<<<<<<<<<< + * Py_XDECREF(arr.base) + * arr.base = baseptr + */ + __pyx_v_baseptr = ((PyObject *)__pyx_v_base); + } + __pyx_L3:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":984 + * Py_INCREF(base) # important to do this before decref below! + * baseptr = base + * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< + * arr.base = baseptr + * + */ + Py_XDECREF(__pyx_v_arr->base); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":985 + * baseptr = base + * Py_XDECREF(arr.base) + * arr.base = baseptr # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + __pyx_v_arr->base = __pyx_v_baseptr; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":977 + * + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * cdef PyObject* baseptr + * if base is None: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":987 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":988 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); + if (__pyx_t_1) { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":989 + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: + * return None # <<<<<<<<<<<<<< + * else: + * return arr.base + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":988 + * + * cdef inline object get_array_base(ndarray arr): + * if arr.base is NULL: # <<<<<<<<<<<<<< + * return None + * else: + */ + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":991 + * return None + * else: + * return arr.base # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); + __pyx_r = ((PyObject *)__pyx_v_arr->base); + goto __pyx_L0; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":987 + * arr.base = baseptr + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * if arr.base is NULL: + * return None + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":998 + * cdef inline int import_array() except -1: + * try: + * _import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 998, __pyx_L3_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":999 + * try: + * _import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 999, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1000, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1000, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":997 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * _import_array() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":996 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * _import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1003 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1004 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1004, __pyx_L3_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1003 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1005 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1005, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1006 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1006, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1006, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1003 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1002 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1008 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1010 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1010, __pyx_L3_error) + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1011 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1011, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GOTREF(__pyx_t_7); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1012, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(1, 1012, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1009 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1008 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_soft_nms_cpu(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_soft_nms_cpu}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "soft_nms_cpu", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, + {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, + {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1}, + {&__pyx_n_s_area, __pyx_k_area, sizeof(__pyx_k_area), 0, 0, 1, 1}, + {&__pyx_n_s_box_area, __pyx_k_box_area, sizeof(__pyx_k_box_area), 0, 0, 1, 1}, + {&__pyx_n_s_boxes, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, + {&__pyx_n_s_boxes_in, __pyx_k_boxes_in, sizeof(__pyx_k_boxes_in), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1}, + {&__pyx_n_s_exp, __pyx_k_exp, sizeof(__pyx_k_exp), 0, 0, 1, 1}, + {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, + {&__pyx_n_s_ih, __pyx_k_ih, sizeof(__pyx_k_ih), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_inds, __pyx_k_inds, sizeof(__pyx_k_inds), 0, 0, 1, 1}, + {&__pyx_n_s_iou_thr, __pyx_k_iou_thr, sizeof(__pyx_k_iou_thr), 0, 0, 1, 1}, + {&__pyx_n_s_iw, __pyx_k_iw, sizeof(__pyx_k_iw), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_maxpos, __pyx_k_maxpos, sizeof(__pyx_k_maxpos), 0, 0, 1, 1}, + {&__pyx_n_s_maxscore, __pyx_k_maxscore, sizeof(__pyx_k_maxscore), 0, 0, 1, 1}, + {&__pyx_n_s_method, __pyx_k_method, sizeof(__pyx_k_method), 0, 0, 1, 1}, + {&__pyx_n_s_min_score, __pyx_k_min_score, sizeof(__pyx_k_min_score), 0, 0, 1, 1}, + {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, + {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_u_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 1, 0, 0}, + {&__pyx_kp_u_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 1, 0, 0}, + {&__pyx_n_s_ov, __pyx_k_ov, sizeof(__pyx_k_ov), 0, 0, 1, 1}, + {&__pyx_n_s_pos, __pyx_k_pos, sizeof(__pyx_k_pos), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, + {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, + {&__pyx_n_s_sigma, __pyx_k_sigma, sizeof(__pyx_k_sigma), 0, 0, 1, 1}, + {&__pyx_n_s_soft_nms_cpu, __pyx_k_soft_nms_cpu, sizeof(__pyx_k_soft_nms_cpu), 0, 0, 1, 1}, + {&__pyx_kp_s_src_soft_nms_cpu_pyx, __pyx_k_src_soft_nms_cpu_pyx, sizeof(__pyx_k_src_soft_nms_cpu_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_ti, __pyx_k_ti, sizeof(__pyx_k_ti), 0, 0, 1, 1}, + {&__pyx_n_s_ts, __pyx_k_ts, sizeof(__pyx_k_ts), 0, 0, 1, 1}, + {&__pyx_n_s_tx1, __pyx_k_tx1, sizeof(__pyx_k_tx1), 0, 0, 1, 1}, + {&__pyx_n_s_tx2, __pyx_k_tx2, sizeof(__pyx_k_tx2), 0, 0, 1, 1}, + {&__pyx_n_s_ty1, __pyx_k_ty1, sizeof(__pyx_k_ty1), 0, 0, 1, 1}, + {&__pyx_n_s_ty2, __pyx_k_ty2, sizeof(__pyx_k_ty2), 0, 0, 1, 1}, + {&__pyx_n_s_ua, __pyx_k_ua, sizeof(__pyx_k_ua), 0, 0, 1, 1}, + {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, + {&__pyx_n_s_weight, __pyx_k_weight, sizeof(__pyx_k_weight), 0, 0, 1, 1}, + {&__pyx_n_s_x1, __pyx_k_x1, sizeof(__pyx_k_x1), 0, 0, 1, 1}, + {&__pyx_n_s_x2, __pyx_k_x2, sizeof(__pyx_k_x2), 0, 0, 1, 1}, + {&__pyx_n_s_y1, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1}, + {&__pyx_n_s_y2, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 39, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 229, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 810, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1000, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":229 + * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): + * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< + * + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 229, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":233 + * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) + * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): + * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< + * + * info.buf = PyArray_DATA(self) + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 233, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":263 + * if ((descr.byteorder == c'>' and little_endian) or + * (descr.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * if t == NPY_BYTE: f = "b" + * elif t == NPY_UBYTE: f = "B" + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":810 + * + * if (end - f) - (new_offset - offset[0]) < 15: + * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< + * + * if ((child.byteorder == c'>' and little_endian) or + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 810, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":814 + * if ((child.byteorder == c'>' and little_endian) or + * (child.byteorder == c'<' and not little_endian)): + * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< + * # One could encode it in the format string and have Cython + * # complain instead, BUT: < and > in format strings also imply + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 814, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":834 + * t = child.type_num + * if end - f < 5: + * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< + * + * # Until ticket #99 is fixed, use integers to avoid warnings + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 834, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1000 + * _import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 1000, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1006 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 1006, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1012 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 1012, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "soft_nms_cpu.pyx":22 + * + * + * def soft_nms_cpu( # <<<<<<<<<<<<<< + * np.ndarray[float, ndim=2] boxes_in, + * float iou_thr, + */ + __pyx_tuple__10 = PyTuple_Pack(30, __pyx_n_s_boxes_in, __pyx_n_s_iou_thr, __pyx_n_s_method, __pyx_n_s_sigma, __pyx_n_s_min_score, __pyx_n_s_boxes, __pyx_n_s_N, __pyx_n_s_iw, __pyx_n_s_ih, __pyx_n_s_box_area, __pyx_n_s_ua, __pyx_n_s_pos, __pyx_n_s_maxscore, __pyx_n_s_maxpos, __pyx_n_s_x1, __pyx_n_s_x2, __pyx_n_s_y1, __pyx_n_s_y2, __pyx_n_s_tx1, __pyx_n_s_tx2, __pyx_n_s_ty1, __pyx_n_s_ty2, __pyx_n_s_ts, __pyx_n_s_area, __pyx_n_s_weight, __pyx_n_s_ov, __pyx_n_s_inds, __pyx_n_s_i, __pyx_n_s_ti, __pyx_n_s_s); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + __pyx_codeobj__11 = (PyObject*)__Pyx_PyCode_New(5, 0, 30, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__10, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_src_soft_nms_cpu_pyx, __pyx_n_s_soft_nms_cpu, 22, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__11)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_modinit_global_init_code(void); /*proto*/ +static int __Pyx_modinit_variable_export_code(void); /*proto*/ +static int __Pyx_modinit_function_export_code(void); /*proto*/ +static int __Pyx_modinit_type_init_code(void); /*proto*/ +static int __Pyx_modinit_type_import_code(void); /*proto*/ +static int __Pyx_modinit_variable_import_code(void); /*proto*/ +static int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) + __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 164, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 186, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 190, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 199, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 872, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#if PY_MAJOR_VERSION < 3 +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC void +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#else +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (!(defined(__cplusplus)) || (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 4))) + #define CYTHON_SMALL_CODE __attribute__((optimize("Os"))) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initsoft_nms_cpu(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initsoft_nms_cpu(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_soft_nms_cpu(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_soft_nms_cpu(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + result = PyDict_SetItemString(moddict, to_name, value); + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static int __pyx_pymod_exec_soft_nms_cpu(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_soft_nms_cpu(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("soft_nms_cpu", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_soft_nms_cpu) { + if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "soft_nms_cpu")) { + if (unlikely(PyDict_SetItemString(modules, "soft_nms_cpu", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + (void)__Pyx_modinit_type_init_code(); + if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error; + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "soft_nms_cpu.pyx":11 + * # cython: language_level=3, boundscheck=False + * + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":22 + * + * + * def soft_nms_cpu( # <<<<<<<<<<<<<< + * np.ndarray[float, ndim=2] boxes_in, + * float iou_thr, + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_12soft_nms_cpu_1soft_nms_cpu, NULL, __pyx_n_s_soft_nms_cpu); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_soft_nms_cpu, __pyx_t_1) < 0) __PYX_ERR(0, 22, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "soft_nms_cpu.pyx":1 + * # ---------------------------------------------------------- # <<<<<<<<<<<<<< + * # Soft-NMS: Improving Object Detection With One Line of Code + * # Copyright (c) University of Maryland, College Park + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "../../../../../anaconda3/lib/python3.6/site-packages/Cython/Includes/numpy/__init__.pxd":1008 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init soft_nms_cpu", 0, __pyx_lineno, __pyx_filename); + } + Py_DECREF(__pyx_m); __pyx_m = 0; + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init soft_nms_cpu"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule((char *)modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t < '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number; + int ndim = ctx->head->field->type->ndim; +; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if (ctx->enc_type == *ts && got_Z == ctx->is_complex && + ctx->enc_packmode == ctx->new_packmode) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* BufferGetAndValidate */ + static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { + if (unlikely(info->buf == NULL)) return; + if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; + __Pyx_ReleaseBuffer(info); +} +static void __Pyx_ZeroBuffer(Py_buffer* buf) { + buf->buf = NULL; + buf->obj = NULL; + buf->strides = __Pyx_zeros; + buf->shape = __Pyx_zeros; + buf->suboffsets = __Pyx_minusones; +} +static int __Pyx__GetBufferAndValidate( + Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, + int nd, int cast, __Pyx_BufFmt_StackElem* stack) +{ + buf->buf = NULL; + if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) { + __Pyx_ZeroBuffer(buf); + return -1; + } + if (unlikely(buf->ndim != nd)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + nd, buf->ndim); + goto fail; + } + if (!cast) { + __Pyx_BufFmt_Context ctx; + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; + } + if (unlikely((unsigned)buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", + buf->itemsize, (buf->itemsize > 1) ? "s" : "", + dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; + return 0; +fail:; + __Pyx_SafeReleaseBuffer(buf); + return -1; +} + +/* PyCFunctionFastCall */ + #if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ + #if CYTHON_FAST_PYCALL +#include "frameobject.h" +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = f->f_localsplus; + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ + #if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* PyObjectCallNoArg */ + #if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, NULL, 0); + } +#endif +#ifdef __Pyx_CyFunction_USED + if (likely(PyCFunction_Check(func) || __Pyx_TypeCheck(func, __pyx_CyFunctionType))) { +#else + if (likely(PyCFunction_Check(func))) { +#endif + if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { + return __Pyx_PyObject_CallMethO(func, NULL); + } + } + return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); +} +#endif + +/* GetItemInt */ + static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (!j) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return m->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* GetModuleGlobalName */ + static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + if (likely(result)) { + Py_INCREF(result); + } else if (unlikely(PyErr_Occurred())) { + result = NULL; + } else { +#else + result = PyDict_GetItem(__pyx_d, name); + if (likely(result)) { + Py_INCREF(result); + } else { +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + if (!result) { + PyErr_Clear(); +#endif + result = __Pyx_GetBuiltinName(name); + } + return result; +} + +/* ObjectGetItem */ + #if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { + PyObject *runerr; + Py_ssize_t key_value; + PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; + if (unlikely(!(m && m->sq_item))) { + PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); + return NULL; + } + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { + PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; + if (likely(m && m->mp_subscript)) { + return m->mp_subscript(obj, key); + } + return __Pyx_PyObject_GetIndex(obj, key); +} +#endif + +/* PyIntBinop */ + #if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(op1))) { + const long b = intval; + long x; + long a = PyInt_AS_LONG(op1); + x = (long)((unsigned long)a + b); + if (likely((x^a) >= 0 || (x^b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a, x; +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla, llx; +#endif + const digit* digits = ((PyLongObject*)op1)->ob_digit; + const Py_ssize_t size = Py_SIZE(op1); + if (likely(__Pyx_sst_abs(size) <= 1)) { + a = likely(size) ? digits[0] : 0; + if (size == -1) a = -a; + } else { + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + default: return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + } + x = a + b; + return PyLong_FromLong(x); +#ifdef HAVE_LONG_LONG + long_long: + llx = lla + llb; + return PyLong_FromLongLong(llx); +#endif + + + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = PyFloat_AS_DOUBLE(op1); + double result; + PyFPE_START_PROTECT("add", return NULL) + result = ((double)a) + (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + } + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); +} +#endif + +/* SetItemInt */ + static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { + int r; + if (!j) return -1; + r = PyObject_SetItem(o, j, v); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, + CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); + if ((!boundscheck) || likely((n >= 0) & (n < PyList_GET_SIZE(o)))) { + PyObject* old = PyList_GET_ITEM(o, n); + Py_INCREF(v); + PyList_SET_ITEM(o, n, v); + Py_DECREF(old); + return 1; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_ass_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return -1; + PyErr_Clear(); + } + } + return m->sq_ass_item(o, i, v); + } + } +#else +#if CYTHON_COMPILING_IN_PYPY + if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) { +#else + if (is_list || PySequence_Check(o)) { +#endif + return PySequence_SetItem(o, i, v); + } +#endif + return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); +} + +/* SliceObject */ + static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, + Py_ssize_t cstart, Py_ssize_t cstop, + PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, + int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { +#if CYTHON_USE_TYPE_SLOTS + PyMappingMethods* mp; +#if PY_MAJOR_VERSION < 3 + PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; + if (likely(ms && ms->sq_slice)) { + if (!has_cstart) { + if (_py_start && (*_py_start != Py_None)) { + cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); + if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; + } else + cstart = 0; + } + if (!has_cstop) { + if (_py_stop && (*_py_stop != Py_None)) { + cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); + if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; + } else + cstop = PY_SSIZE_T_MAX; + } + if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { + Py_ssize_t l = ms->sq_length(obj); + if (likely(l >= 0)) { + if (cstop < 0) { + cstop += l; + if (cstop < 0) cstop = 0; + } + if (cstart < 0) { + cstart += l; + if (cstart < 0) cstart = 0; + } + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + goto bad; + PyErr_Clear(); + } + } + return ms->sq_slice(obj, cstart, cstop); + } +#endif + mp = Py_TYPE(obj)->tp_as_mapping; + if (likely(mp && mp->mp_subscript)) +#endif + { + PyObject* result; + PyObject *py_slice, *py_start, *py_stop; + if (_py_slice) { + py_slice = *_py_slice; + } else { + PyObject* owned_start = NULL; + PyObject* owned_stop = NULL; + if (_py_start) { + py_start = *_py_start; + } else { + if (has_cstart) { + owned_start = py_start = PyInt_FromSsize_t(cstart); + if (unlikely(!py_start)) goto bad; + } else + py_start = Py_None; + } + if (_py_stop) { + py_stop = *_py_stop; + } else { + if (has_cstop) { + owned_stop = py_stop = PyInt_FromSsize_t(cstop); + if (unlikely(!py_stop)) { + Py_XDECREF(owned_start); + goto bad; + } + } else + py_stop = Py_None; + } + py_slice = PySlice_New(py_start, py_stop, Py_None); + Py_XDECREF(owned_start); + Py_XDECREF(owned_stop); + if (unlikely(!py_slice)) goto bad; + } +#if CYTHON_USE_TYPE_SLOTS + result = mp->mp_subscript(obj, py_slice); +#else + result = PyObject_GetItem(obj, py_slice); +#endif + if (!_py_slice) { + Py_DECREF(py_slice); + } + return result; + } + PyErr_Format(PyExc_TypeError, + "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); +bad: + return NULL; +} + +/* PyErrFetchRestore */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ + #if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* DictGetItem */ + #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY +static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { + PyObject *value; + value = PyDict_GetItemWithError(d, key); + if (unlikely(!value)) { + if (!PyErr_Occurred()) { + PyObject* args = PyTuple_Pack(1, key); + if (likely(args)) + PyErr_SetObject(PyExc_KeyError, args); + Py_XDECREF(args); + } + return NULL; + } + Py_INCREF(value); + return value; +} +#endif + +/* RaiseTooManyValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ + static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ + static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* ExtTypeTest */ + static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* SaveResetException */ + #if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if PY_VERSION_HEX >= 0x030700A2 + *type = tstate->exc_state.exc_type; + *value = tstate->exc_state.exc_value; + *tb = tstate->exc_state.exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = type; + tstate->exc_state.exc_value = value; + tstate->exc_state.exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ + #if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { +#endif + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if PY_VERSION_HEX >= 0x030700A2 + tmp_type = tstate->exc_state.exc_type; + tmp_value = tstate->exc_state.exc_value; + tmp_tb = tstate->exc_state.exc_traceback; + tstate->exc_state.exc_type = local_type; + tstate->exc_state.exc_value = local_value; + tstate->exc_state.exc_traceback = local_tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* Import */ + static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* CLineInTraceback */ + #ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + use_cline = __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (PyObject_Not(use_cline) != 0) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ + static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ + #include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + + /* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(unsigned int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(unsigned int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(unsigned int), + little, !is_unsigned); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = 1.0 / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = 1.0 / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0, -1); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = 1.0 / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = 1.0 / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if (b.imag == 0) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0, -1); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { + const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(enum NPY_TYPES) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(enum NPY_TYPES) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), + little, !is_unsigned); + } +} + +/* CIntFromPy */ + static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { + const unsigned int neg_one = (unsigned int) -1, const_zero = (unsigned int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 2 * PyLong_SHIFT) { + return (unsigned int) (((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 3 * PyLong_SHIFT) { + return (unsigned int) (((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) >= 4 * PyLong_SHIFT) { + return (unsigned int) (((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned int) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned int, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + return (unsigned int) ((((((unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + return (unsigned int) ((((((((unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) (((unsigned int)-1)*(((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned int) - 1 > 4 * PyLong_SHIFT) { + return (unsigned int) ((((((((((unsigned int)digits[3]) << PyLong_SHIFT) | (unsigned int)digits[2]) << PyLong_SHIFT) | (unsigned int)digits[1]) << PyLong_SHIFT) | (unsigned int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned int) -1; + } + } else { + unsigned int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned int) -1; + val = __Pyx_PyInt_As_unsigned_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned int"); + return (unsigned int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned int"); + return (unsigned int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) -1, const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) -1, const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ + #if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { + if (likely(err == exc_type)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); + } + return PyErr_GivenExceptionMatches(err, exc_type); +} +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { + if (likely(err == exc_type1 || err == exc_type2)) return 1; + if (likely(PyExceptionClass_Check(err))) { + return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); + } + return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* ModuleImport */ + #ifndef __PYX_HAVE_RT_ImportModule +#define __PYX_HAVE_RT_ImportModule +static PyObject *__Pyx_ImportModule(const char *name) { + PyObject *py_name = 0; + PyObject *py_module = 0; + py_name = __Pyx_PyIdentifier_FromString(name); + if (!py_name) + goto bad; + py_module = PyImport_Import(py_name); + Py_DECREF(py_name); + return py_module; +bad: + Py_XDECREF(py_name); + return 0; +} +#endif + +/* TypeImport */ + #ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, + size_t size, int strict) +{ + PyObject *py_module = 0; + PyObject *result = 0; + PyObject *py_name = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + py_module = __Pyx_ImportModule(module_name); + if (!py_module) + goto bad; + py_name = __Pyx_PyIdentifier_FromString(class_name); + if (!py_name) + goto bad; + result = PyObject_GetAttr(py_module, py_name); + Py_DECREF(py_name); + py_name = 0; + Py_DECREF(py_module); + py_module = 0; + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (!strict && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + else if ((size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", + module_name, class_name, basicsize, size); + goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(py_module); + Py_XDECREF(result); + return NULL; +} +#endif + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(x); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.pyx b/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.pyx new file mode 100644 index 0000000..c35f8f1 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/nms/src/soft_nms_cpu.pyx @@ -0,0 +1,127 @@ +# ---------------------------------------------------------- +# Soft-NMS: Improving Object Detection With One Line of Code +# Copyright (c) University of Maryland, College Park +# Licensed under The MIT License [see LICENSE for details] +# Written by Navaneeth Bodla and Bharat Singh +# Modified by Kai Chen +# ---------------------------------------------------------- + +# cython: language_level=3, boundscheck=False + +import numpy as np +cimport numpy as np + + +cdef inline np.float32_t max(np.float32_t a, np.float32_t b): + return a if a >= b else b + +cdef inline np.float32_t min(np.float32_t a, np.float32_t b): + return a if a <= b else b + + +def soft_nms_cpu( + np.ndarray[float, ndim=2] boxes_in, + float iou_thr, + unsigned int method=1, + float sigma=0.5, + float min_score=0.001, +): + boxes = boxes_in.copy() + cdef unsigned int N = boxes.shape[0] + cdef float iw, ih, box_area + cdef float ua + cdef int pos = 0 + cdef float maxscore = 0 + cdef int maxpos = 0 + cdef float x1, x2, y1, y2, tx1, tx2, ty1, ty2, ts, area, weight, ov + inds = np.arange(N) + + for i in range(N): + maxscore = boxes[i, 4] + maxpos = i + + tx1 = boxes[i, 0] + ty1 = boxes[i, 1] + tx2 = boxes[i, 2] + ty2 = boxes[i, 3] + ts = boxes[i, 4] + ti = inds[i] + + pos = i + 1 + # get max box + while pos < N: + if maxscore < boxes[pos, 4]: + maxscore = boxes[pos, 4] + maxpos = pos + pos = pos + 1 + + # add max box as a detection + boxes[i, 0] = boxes[maxpos, 0] + boxes[i, 1] = boxes[maxpos, 1] + boxes[i, 2] = boxes[maxpos, 2] + boxes[i, 3] = boxes[maxpos, 3] + boxes[i, 4] = boxes[maxpos, 4] + inds[i] = inds[maxpos] + + # swap ith box with position of max box + boxes[maxpos, 0] = tx1 + boxes[maxpos, 1] = ty1 + boxes[maxpos, 2] = tx2 + boxes[maxpos, 3] = ty2 + boxes[maxpos, 4] = ts + inds[maxpos] = ti + + tx1 = boxes[i, 0] + ty1 = boxes[i, 1] + tx2 = boxes[i, 2] + ty2 = boxes[i, 3] + ts = boxes[i, 4] + + pos = i + 1 + # NMS iterations, note that N changes if detection boxes fall below + # threshold + while pos < N: + x1 = boxes[pos, 0] + y1 = boxes[pos, 1] + x2 = boxes[pos, 2] + y2 = boxes[pos, 3] + s = boxes[pos, 4] + + area = (x2 - x1 + 1) * (y2 - y1 + 1) + iw = (min(tx2, x2) - max(tx1, x1) + 1) + if iw > 0: + ih = (min(ty2, y2) - max(ty1, y1) + 1) + if ih > 0: + ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih) + ov = iw * ih / ua # iou between max box and detection box + + if method == 1: # linear + if ov > iou_thr: + weight = 1 - ov + else: + weight = 1 + elif method == 2: # gaussian + weight = np.exp(-(ov * ov) / sigma) + else: # original NMS + if ov > iou_thr: + weight = 0 + else: + weight = 1 + + boxes[pos, 4] = weight * boxes[pos, 4] + + # if box score falls below threshold, discard the box by + # swapping with last box update N + if boxes[pos, 4] < min_score: + boxes[pos, 0] = boxes[N-1, 0] + boxes[pos, 1] = boxes[N-1, 1] + boxes[pos, 2] = boxes[N-1, 2] + boxes[pos, 3] = boxes[N-1, 3] + boxes[pos, 4] = boxes[N-1, 4] + inds[pos] = inds[N - 1] + N = N - 1 + pos = pos - 1 + + pos = pos + 1 + + return boxes[:N], inds[:N] diff --git a/CDARTS_detection/mmdet/ops/roi_align/__init__.py b/CDARTS_detection/mmdet/ops/roi_align/__init__.py new file mode 100644 index 0000000..6da9829 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_align/__init__.py @@ -0,0 +1,3 @@ +from .roi_align import RoIAlign, roi_align + +__all__ = ['roi_align', 'RoIAlign'] diff --git a/CDARTS_detection/mmdet/ops/roi_align/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/roi_align/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4c212a814be57a42872b9dd5cf1e4aba7b45c52 GIT binary patch literal 233 zcmYLDOA5j;6nsf7R-xblyg)YwbSr)~f*Tih*G&jbXao6_rd{+%Ua4DGUZE=!g!*8H zdCc(U9ny4to@$l@z}sUU?A67?Z9Q?|NP$KL(og^bj(Jq!0`o- z%8aYlO3H$4Bk#50*dvSn%ebW8SCqb>jYA^PbR)IZV(-~hL(dE;2hJREjdet8E;>5?A6K#*e&`3w0=yymoj!KZvhO^;{BQG=yoi7ZyJK7Q;EX0!AU->l0oGRFSP zZao3?KceXsgkXX#MU5h<*x9K>9Ki zfe5eIf%=h%5Ke4QKJT7=L{5+4C|a(Hu07B3j?q3w)Bl1ntoIN6OK*sViQ#=@yo0As zp-o8BKQ{i~c^~ww2>K8bZ4>q0fpgaL&E&vfi%0FtV2FogNQY_fZ6*Ri4H>>9eby&^ z)K3ns%Fgtz#4b05uF6K*RF~UbBa3#wwGVaqLKdq|70=7+tCyt`HqiB@wAo6Q zX0K%7dPyr^7MD_W+D>Zi3ZiJw>;zNYtT)a(^NieXQBf(}!i%}}EAv+yU;ENFKvA`g zyS;IK)f7GyfXZx0w)OaWY18MWsn+A#8R9Ieb=kJE(RM0IQx0PJKwp$^MbhsS#vu>x)Zjkq_ zw%ol^kKm00)<1w?UdA8u2)~r4-id1wpW_{PmyhqGYNLQ0m;M(_380plWJBYpfN0oH`?U7~wTyrUK+Zq}_^m>KEf~S}+DDMDDf~sI9^iGW zb~6HzD2!}%r?h%VUsDp4r9Ek-6uPKe)TFpjrBEmI&A}(WL0wvPV1kQ%x7Wb+n>2As zgNw14_m)&|lZ)?>_&y1$i25OkCnO$2*wjsT%i6^0gbs&^QT&L8{siI_O_N|g6_iT) z2<4^1#w1~xlij;~@}0sS69C7JhjL=$qG-#lEDDg-Agu7v}_8f~JX{V$b8*n;utW{SGt2#`F^6$c!TSnyt9-MR1km zK9Ttq%Y%`|^YCk6B`0=FZG246rSeG?k}Ev)rtqlgUqKiK3Szq*7;aLt9n1wJ8e6Wa=HVg{!73%?K~bc;tr%do2so zscQWKbcbss1X0Om<>q{gMfCw@;Pyz0qH0R5;rWz&xIO$Cv=7lVfs=)Ek7}Fp%ZJCb zm&b4_%9I`=SVHQ@X!=hOI8jW!D>yDqzV!xNnm9Q`);O(bbZWx`&W-W@pVjSxe@)+r z@`h;^k&E0uxRv4(T%>5X@8!4%)Gx^T7702Cd1hV^^^Mo%E%>;siMF1hY2sN1922rL zZ_Y1oF2fso;xq&gopUO>OPdy>v_Vb0=YntUChPy2IXl3BQPF{qeU#fjzkL7QBiDcU zx8dpOor`F++}uUzUcUA8^j@7|T`VT*mxw|A6v9TJ(k<{oSh-(!&uxrN$03C_>fUA$ z;FR2{qWB!sM;!1Nd-8~%+{M8-YT=D10TIyR>M4o$Nf4ICbNd@;c6N=oN;b_AbEo(K zR&b%aN)Y6Ki#DF&knwpOC+;9D9@yAbe%ZFxYukK2_R78K9A#2L@^|hUm#gc)RE|3G zw~r2%Bj@Pl-qejA|09fi>-fTk%mLXO5p-5V!W~6wA3YW8>eA-hNNe&_w6} N5B + +#include +#include + +int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor output); + +int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor bottom_grad); + +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +int roi_align_forward_cuda(at::Tensor features, at::Tensor rois, + int pooled_height, int pooled_width, + float spatial_scale, int sample_num, + at::Tensor output) { + CHECK_INPUT(features); + CHECK_INPUT(rois); + CHECK_INPUT(output); + + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + + int num_channels = features.size(1); + int data_height = features.size(2); + int data_width = features.size(3); + + ROIAlignForwardLaucher(features, rois, spatial_scale, sample_num, + num_channels, data_height, data_width, num_rois, + pooled_height, pooled_width, output); + + return 1; +} + +int roi_align_backward_cuda(at::Tensor top_grad, at::Tensor rois, + int pooled_height, int pooled_width, + float spatial_scale, int sample_num, + at::Tensor bottom_grad) { + CHECK_INPUT(top_grad); + CHECK_INPUT(rois); + CHECK_INPUT(bottom_grad); + + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + + int num_channels = bottom_grad.size(1); + int data_height = bottom_grad.size(2); + int data_width = bottom_grad.size(3); + + ROIAlignBackwardLaucher(top_grad, rois, spatial_scale, sample_num, + num_channels, data_height, data_width, num_rois, + pooled_height, pooled_width, bottom_grad); + + return 1; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &roi_align_forward_cuda, "Roi_Align forward (CUDA)"); + m.def("backward", &roi_align_backward_cuda, "Roi_Align backward (CUDA)"); +} diff --git a/CDARTS_detection/mmdet/ops/roi_align/src/roi_align_kernel.cu b/CDARTS_detection/mmdet/ops/roi_align/src/roi_align_kernel.cu new file mode 100644 index 0000000..eb7cdaf --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_align/src/roi_align_kernel.cu @@ -0,0 +1,282 @@ +#include +#include + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +#define THREADS_PER_BLOCK 1024 + +inline int GET_BLOCKS(const int N) { + int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + int max_block_num = 65000; + return min(optimal_block_num, max_block_num); +} + +template +__device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data, + const int height, const int width, + scalar_t y, scalar_t x) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + return 0; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (scalar_t)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (scalar_t)x_low; + } else { + x_high = x_low + 1; + } + + scalar_t ly = y - y_low; + scalar_t lx = x - x_low; + scalar_t hy = 1. - ly; + scalar_t hx = 1. - lx; + // do bilinear interpolation + scalar_t lt = bottom_data[y_low * width + x_low]; + scalar_t rt = bottom_data[y_low * width + x_high]; + scalar_t lb = bottom_data[y_high * width + x_low]; + scalar_t rb = bottom_data[y_high * width + x_high]; + scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb); + + return val; +} + +template +__global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data, + const scalar_t *bottom_rois, + const scalar_t spatial_scale, + const int sample_num, const int channels, + const int height, const int width, + const int pooled_height, const int pooled_width, + scalar_t *top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the aligned output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; + scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; + scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; + scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; + + // Force malformed ROIs to be 1x1 + scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); + scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); + + scalar_t bin_size_h = roi_height / pooled_height; + scalar_t bin_size_w = roi_width / pooled_width; + + const scalar_t *offset_bottom_data = + bottom_data + (roi_batch_ind * channels + c) * height * width; + + int sample_num_h = (sample_num > 0) + ? sample_num + : ceil(roi_height / pooled_height); // e.g., = 2 + int sample_num_w = + (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); + + scalar_t output_val = 0; + for (int iy = 0; iy < sample_num_h; iy++) { + const scalar_t y = roi_start_h + ph * bin_size_h + + (scalar_t)(iy + scalar_t(.5f)) * bin_size_h / + (scalar_t)(sample_num_h); + for (int ix = 0; ix < sample_num_w; ix++) { + const scalar_t x = roi_start_w + pw * bin_size_w + + (scalar_t)(ix + scalar_t(.5f)) * bin_size_w / + (scalar_t)(sample_num_w); + scalar_t val = bilinear_interpolate(offset_bottom_data, + height, width, y, x); + output_val += val; + } + } + output_val /= (sample_num_h * sample_num_w); + top_data[index] = output_val; + } +} + +int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor output) { + const int output_size = num_rois * pooled_height * pooled_width * channels; + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.scalar_type(), "ROIAlignLaucherForward", ([&] { + const scalar_t *bottom_data = features.data(); + const scalar_t *rois_data = rois.data(); + scalar_t *top_data = output.data(); + + ROIAlignForward + <<>>( + output_size, bottom_data, rois_data, scalar_t(spatial_scale), + sample_num, channels, height, width, pooled_height, + pooled_width, top_data); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} + +template +__device__ void bilinear_interpolate_gradient(const int height, const int width, + scalar_t y, scalar_t x, + scalar_t &w1, scalar_t &w2, + scalar_t &w3, scalar_t &w4, + int &x_low, int &x_high, + int &y_low, int &y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) y = 0; + if (x <= 0) x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (scalar_t)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (scalar_t)x_low; + } else { + x_high = x_low + 1; + } + + scalar_t ly = y - y_low; + scalar_t lx = x - x_low; + scalar_t hy = 1. - ly; + scalar_t hx = 1. - lx; + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +__global__ void ROIAlignBackward( + const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois, + const scalar_t spatial_scale, const int sample_num, const int channels, + const int height, const int width, const int pooled_height, + const int pooled_width, scalar_t *bottom_diff) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the aligned output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const scalar_t *offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; + scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; + scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; + scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; + + // Force malformed ROIs to be 1x1 + scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); + scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); + + scalar_t bin_size_h = roi_height / pooled_height; + scalar_t bin_size_w = roi_width / pooled_width; + + scalar_t *offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + int offset_top = (n * channels + c) * pooled_height * pooled_width + + ph * pooled_width + pw; + scalar_t offset_top_diff = top_diff[offset_top]; + + int sample_num_h = (sample_num > 0) + ? sample_num + : ceil(roi_height / pooled_height); // e.g., = 2 + int sample_num_w = + (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); + + const scalar_t count = (scalar_t)(sample_num_h * sample_num_w); + + for (int iy = 0; iy < sample_num_h; iy++) { + const scalar_t y = + roi_start_h + ph * bin_size_h + + (scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h); + for (int ix = 0; ix < sample_num_w; ix++) { + const scalar_t x = + roi_start_w + pw * bin_size_w + + (scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w); + scalar_t w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + scalar_t g1 = offset_top_diff * w1 / count; + scalar_t g2 = offset_top_diff * w2 / count; + scalar_t g3 = offset_top_diff * w3 / count; + scalar_t g4 = offset_top_diff * w4 / count; + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); + atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); + atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); + atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); + } + } + } + } +} + +int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const float spatial_scale, const int sample_num, + const int channels, const int height, + const int width, const int num_rois, + const int pooled_height, const int pooled_width, + at::Tensor bottom_grad) { + const int output_size = num_rois * pooled_height * pooled_width * channels; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] { + const scalar_t *top_diff = top_grad.data(); + const scalar_t *rois_data = rois.data(); + scalar_t *bottom_diff = bottom_grad.data(); + if (sizeof(scalar_t) == sizeof(double)) { + fprintf(stderr, "double is not supported\n"); + exit(-1); + } + + ROIAlignBackward + <<>>( + output_size, top_diff, rois_data, spatial_scale, sample_num, + channels, height, width, pooled_height, pooled_width, + bottom_diff); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} diff --git a/CDARTS_detection/mmdet/ops/roi_pool/__init__.py b/CDARTS_detection/mmdet/ops/roi_pool/__init__.py new file mode 100644 index 0000000..eb2c57e --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_pool/__init__.py @@ -0,0 +1,4 @@ +from .functions.roi_pool import roi_pool +from .modules.roi_pool import RoIPool + +__all__ = ['roi_pool', 'RoIPool'] diff --git a/CDARTS_detection/mmdet/ops/roi_pool/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/roi_pool/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e479222166e85e7fae805e4cb8d7a0613bc9868 GIT binary patch literal 280 zcmYk0!HU8#5QdYqwX!IP&)}sO1M5|G_u|2eh~9D;is@=0$&{uQ`bfS~uY1~8@Z>~M z49v$z_%nYNBj*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnFMs`v{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*p5~>lfu`#uw!0=jf-E=7Hsk_2c6+ c^D;}~q_G*_rWRd)V%3 zav{C)LHJ5O!fC%kPrT=&RU zk+dRne6xzyY|bc=k&LUTj^{BYKMxvhh6b1SnMg+zMi2W3lB6BLqoT*4s$zGE!oyTB9{$M9Efjp_d z_gw$@45A&<6bY=waRicqd<$a#0_8{_Il5)$VIOT0khFOgFb3EQ7xmGWN_xfYnTvm? zI3w99!4>x6EfVdK=ZN&t3${rhPUaFBZz&*jo!TbMk@cWwTS^IFnS|II_y!BGP1a}q zpilaA5pN=xA7bv!aA&!EO=a>2+vM(OJ?d%C(6f*GVV`dY5HIctA7j)l0qeNWLEr0# zSdL4BH8d2|9je>&C%SXombdE3r0%Snyi^m>Ov=Wor7{z3Ca!BMHPMR$W}!!kbT_S< zp7^}9Wn-Oa3gshMqyM6Q?)6e#$D)7g`foxAf0^?gSfFs1_4tYt| zY{NiDz&EmvH^8BFBIzl?KS7eKp*=@GNn)}|fvIGj;;fMnV-VPxx(s+q*Ez5*0S>Z0 z-6p`sv>(6)K^8Iak6i}pO^?hmq&Fi}*aGt*Dy*y@BOxSrs|;>akTg6{ewKwcKm*wAp)G6aO!V&%WGk^een1ED|S!6OW>k&;9|Cf0ai7 literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/roi_pool/functions/roi_pool.py b/CDARTS_detection/mmdet/ops/roi_pool/functions/roi_pool.py new file mode 100644 index 0000000..068da60 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_pool/functions/roi_pool.py @@ -0,0 +1,55 @@ +import torch +from torch.autograd import Function + +from .. import roi_pool_cuda + + +class RoIPoolFunction(Function): + + @staticmethod + def forward(ctx, features, rois, out_size, spatial_scale): + if isinstance(out_size, int): + out_h = out_size + out_w = out_size + elif isinstance(out_size, tuple): + assert len(out_size) == 2 + assert isinstance(out_size[0], int) + assert isinstance(out_size[1], int) + out_h, out_w = out_size + else: + raise TypeError( + '"out_size" must be an integer or tuple of integers') + assert features.is_cuda + ctx.save_for_backward(rois) + num_channels = features.size(1) + num_rois = rois.size(0) + out_size = (num_rois, num_channels, out_h, out_w) + output = features.new_zeros(out_size) + argmax = features.new_zeros(out_size, dtype=torch.int) + roi_pool_cuda.forward(features, rois, out_h, out_w, spatial_scale, + output, argmax) + ctx.spatial_scale = spatial_scale + ctx.feature_size = features.size() + ctx.argmax = argmax + + return output + + @staticmethod + def backward(ctx, grad_output): + assert grad_output.is_cuda + spatial_scale = ctx.spatial_scale + feature_size = ctx.feature_size + argmax = ctx.argmax + rois = ctx.saved_tensors[0] + assert feature_size is not None + + grad_input = grad_rois = None + if ctx.needs_input_grad[0]: + grad_input = grad_output.new_zeros(feature_size) + roi_pool_cuda.backward(grad_output.contiguous(), rois, argmax, + spatial_scale, grad_input) + + return grad_input, grad_rois, None, None + + +roi_pool = RoIPoolFunction.apply diff --git a/CDARTS_detection/mmdet/ops/roi_pool/gradcheck.py b/CDARTS_detection/mmdet/ops/roi_pool/gradcheck.py new file mode 100644 index 0000000..c396160 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_pool/gradcheck.py @@ -0,0 +1,15 @@ +import torch +from torch.autograd import gradcheck + +import os.path as osp +import sys +sys.path.append(osp.abspath(osp.join(__file__, '../../'))) +from roi_pool import RoIPool # noqa: E402 + +feat = torch.randn(4, 16, 15, 15, requires_grad=True).cuda() +rois = torch.Tensor([[0, 0, 0, 50, 50], [0, 10, 30, 43, 55], + [1, 67, 40, 110, 120]]).cuda() +inputs = (feat, rois) +print('Gradcheck for roi pooling...') +test = gradcheck(RoIPool(4, 1.0 / 8), inputs, eps=1e-5, atol=1e-3) +print(test) diff --git a/CDARTS_detection/mmdet/ops/roi_pool/modules/__init__.py b/CDARTS_detection/mmdet/ops/roi_pool/modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/ops/roi_pool/modules/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/roi_pool/modules/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee6d1a0ab6d7ea3d350cafd038958bdaa83e53c9 GIT binary patch literal 160 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnFJJwP{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*p5~>lfu`#uw!0=ji9=rm literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/roi_pool/modules/__pycache__/roi_pool.cpython-36.pyc b/CDARTS_detection/mmdet/ops/roi_pool/modules/__pycache__/roi_pool.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..904db2f2047976bfc2b7759337c2c55fbb3e4dd5 GIT binary patch literal 811 zcmY*Yy^ho{5FW=#HY~dbTtkm&BB6~0ErOFM5XGGkbZIOrcG#SpIM|NRt+v~h?kRWx zUMVfry~0(D^P^oXdHi`M^YeXH-mTZmkB?jan*jWUvlSw~!foH65g>?yn&gCl2n1C@ zO>;`%Gl)<`M-Y)D2bzZ>{toHl0W%;aeSED&r{qI0UK67$)@rQ^ADS0g#(iYFRvHswN}n)RHI-wmPju&)t#-1HN~O8$*S6)Z;)>aVE1A+hv{IEO z!8Ef(gJhpDRw!;QV}IcBI@{`6zD3*oxa)Y66?cN$%(mKQbuFZmg{yQk;!L+To3+X6 zL9>%%)83f{w(#4=Xn8OTagw02XGd?)Bpeg literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/roi_pool/modules/roi_pool.py b/CDARTS_detection/mmdet/ops/roi_pool/modules/roi_pool.py new file mode 100644 index 0000000..d7fffd0 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_pool/modules/roi_pool.py @@ -0,0 +1,14 @@ +from torch.nn.modules.module import Module +from ..functions.roi_pool import roi_pool + + +class RoIPool(Module): + + def __init__(self, out_size, spatial_scale): + super(RoIPool, self).__init__() + + self.out_size = out_size + self.spatial_scale = float(spatial_scale) + + def forward(self, features, rois): + return roi_pool(features, rois, self.out_size, self.spatial_scale) diff --git a/CDARTS_detection/mmdet/ops/roi_pool/setup.py b/CDARTS_detection/mmdet/ops/roi_pool/setup.py new file mode 100644 index 0000000..16991b8 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_pool/setup.py @@ -0,0 +1,12 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='roi_pool', + ext_modules=[ + CUDAExtension('roi_pool_cuda', [ + 'src/roi_pool_cuda.cpp', + 'src/roi_pool_kernel.cu', + ]) + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp b/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp new file mode 100644 index 0000000..7818aa7 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_cuda.cpp @@ -0,0 +1,86 @@ +#include + +#include +#include + +int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int channels, + const int height, const int width, const int num_rois, + const int pooled_h, const int pooled_w, + at::Tensor output, at::Tensor argmax); + +int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const at::Tensor argmax, const float spatial_scale, + const int batch_size, const int channels, + const int height, const int width, + const int num_rois, const int pooled_h, + const int pooled_w, at::Tensor bottom_grad); + +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +int roi_pooling_forward_cuda(at::Tensor features, at::Tensor rois, + int pooled_height, int pooled_width, + float spatial_scale, at::Tensor output, + at::Tensor argmax) { + CHECK_INPUT(features); + CHECK_INPUT(rois); + CHECK_INPUT(output); + CHECK_INPUT(argmax); + + // Number of ROIs + int num_rois = rois.size(0); + int size_rois = rois.size(1); + + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + + int channels = features.size(1); + int height = features.size(2); + int width = features.size(3); + + ROIPoolForwardLaucher(features, rois, spatial_scale, channels, height, width, + num_rois, pooled_height, pooled_width, output, argmax); + + return 1; +} + +int roi_pooling_backward_cuda(at::Tensor top_grad, at::Tensor rois, + at::Tensor argmax, float spatial_scale, + at::Tensor bottom_grad) { + CHECK_INPUT(top_grad); + CHECK_INPUT(rois); + CHECK_INPUT(argmax); + CHECK_INPUT(bottom_grad); + + int pooled_height = top_grad.size(2); + int pooled_width = top_grad.size(3); + int num_rois = rois.size(0); + int size_rois = rois.size(1); + + if (size_rois != 5) { + printf("wrong roi size\n"); + return 0; + } + int batch_size = bottom_grad.size(0); + int channels = bottom_grad.size(1); + int height = bottom_grad.size(2); + int width = bottom_grad.size(3); + + ROIPoolBackwardLaucher(top_grad, rois, argmax, spatial_scale, batch_size, + channels, height, width, num_rois, pooled_height, + pooled_width, bottom_grad); + + return 1; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &roi_pooling_forward_cuda, "Roi_Pooling forward (CUDA)"); + m.def("backward", &roi_pooling_backward_cuda, "Roi_Pooling backward (CUDA)"); +} diff --git a/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_kernel.cu b/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_kernel.cu new file mode 100644 index 0000000..b51bb04 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/roi_pool/src/roi_pool_kernel.cu @@ -0,0 +1,156 @@ +#include +#include + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +#define THREADS_PER_BLOCK 1024 + +inline int GET_BLOCKS(const int N) { + int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + int max_block_num = 65000; + return min(optimal_block_num, max_block_num); +} + +template +__global__ void ROIPoolForward(const int nthreads, const scalar_t *bottom_data, + const scalar_t *rois, + const scalar_t spatial_scale, const int channels, + const int height, const int width, + const int pooled_h, const int pooled_w, + scalar_t *top_data, int *argmax_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_w; + int ph = (index / pooled_w) % pooled_h; + int c = (index / pooled_w / pooled_h) % channels; + int n = index / pooled_w / pooled_h / channels; + + const scalar_t *offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + // calculate the roi region on feature maps + scalar_t roi_x1 = offset_rois[1] * spatial_scale; + scalar_t roi_y1 = offset_rois[2] * spatial_scale; + scalar_t roi_x2 = (offset_rois[3] + 1) * spatial_scale; + scalar_t roi_y2 = (offset_rois[4] + 1) * spatial_scale; + + // force malformed rois to be 1x1 + scalar_t roi_w = roi_x2 - roi_x1; + scalar_t roi_h = roi_y2 - roi_y1; + if (roi_w <= 0 || roi_h <= 0) continue; + + scalar_t bin_size_w = roi_w / static_cast(pooled_w); + scalar_t bin_size_h = roi_h / static_cast(pooled_h); + + // the corresponding bin region + int bin_x1 = floor(static_cast(pw) * bin_size_w + roi_x1); + int bin_y1 = floor(static_cast(ph) * bin_size_h + roi_y1); + int bin_x2 = ceil(static_cast(pw + 1) * bin_size_w + roi_x1); + int bin_y2 = ceil(static_cast(ph + 1) * bin_size_h + roi_y1); + + // add roi offsets and clip to input boundaries + bin_x1 = min(max(bin_x1, 0), width); + bin_y1 = min(max(bin_y1, 0), height); + bin_x2 = min(max(bin_x2, 0), width); + bin_y2 = min(max(bin_y2, 0), height); + bool is_empty = (bin_y2 <= bin_y1) || (bin_x2 <= bin_x1); + + // If nothing is pooled, argmax = -1 causes nothing to be backprop'd + int max_idx = -1; + bottom_data += (roi_batch_ind * channels + c) * height * width; + + // Define an empty pooling region to be zero + scalar_t max_val = is_empty ? static_cast(0) + : bottom_data[bin_y1 * width + bin_x1] - 1; + + for (int h = bin_y1; h < bin_y2; ++h) { + for (int w = bin_x1; w < bin_x2; ++w) { + int offset = h * width + w; + if (bottom_data[offset] > max_val) { + max_val = bottom_data[offset]; + max_idx = offset; + } + } + } + top_data[index] = max_val; + if (argmax_data != NULL) argmax_data[index] = max_idx; + } +} + +int ROIPoolForwardLaucher(const at::Tensor features, const at::Tensor rois, + const float spatial_scale, const int channels, + const int height, const int width, const int num_rois, + const int pooled_h, const int pooled_w, + at::Tensor output, at::Tensor argmax) { + const int output_size = num_rois * channels * pooled_h * pooled_w; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + features.type(), "ROIPoolLaucherForward", ([&] { + const scalar_t *bottom_data = features.data(); + const scalar_t *rois_data = rois.data(); + scalar_t *top_data = output.data(); + int *argmax_data = argmax.data(); + + ROIPoolForward + <<>>( + output_size, bottom_data, rois_data, scalar_t(spatial_scale), + channels, height, width, pooled_h, pooled_w, top_data, + argmax_data); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} + +template +__global__ void ROIPoolBackward(const int nthreads, const scalar_t *top_diff, + const scalar_t *rois, const int *argmax_data, + const scalar_t spatial_scale, + const int channels, const int height, + const int width, const int pooled_h, + const int pooled_w, scalar_t *bottom_diff) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + int pw = index % pooled_w; + int ph = (index / pooled_w) % pooled_h; + int c = (index / pooled_w / pooled_h) % channels; + int n = index / pooled_w / pooled_h / channels; + + int roi_batch_ind = rois[n * 5]; + int bottom_index = argmax_data[(n * channels + c) * pooled_h * pooled_w + + ph * pooled_w + pw]; + + atomicAdd(bottom_diff + (roi_batch_ind * channels + c) * height * width + + bottom_index, + top_diff[index]); + } +} + +int ROIPoolBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, + const at::Tensor argmax, const float spatial_scale, + const int batch_size, const int channels, + const int height, const int width, + const int num_rois, const int pooled_h, + const int pooled_w, at::Tensor bottom_grad) { + const int output_size = num_rois * pooled_h * pooled_w * channels; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + top_grad.type(), "ROIPoolLaucherBackward", ([&] { + const scalar_t *top_diff = top_grad.data(); + const scalar_t *rois_data = rois.data(); + const int *argmax_data = argmax.data(); + scalar_t *bottom_diff = bottom_grad.data(); + + if (sizeof(scalar_t) == sizeof(double)) { + fprintf(stderr, "double is not supported\n"); + exit(-1); + } + + ROIPoolBackward + <<>>( + output_size, top_diff, rois_data, argmax_data, + scalar_t(spatial_scale), channels, height, width, pooled_h, + pooled_w, bottom_diff); + })); + THCudaCheck(cudaGetLastError()); + return 1; +} diff --git a/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__init__.py b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__init__.py new file mode 100644 index 0000000..71058ad --- /dev/null +++ b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__init__.py @@ -0,0 +1,3 @@ +from .modules.sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss + +__all__ = ['SigmoidFocalLoss', 'sigmoid_focal_loss'] diff --git a/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3989b7a53fa5fd03a83b1a2fab3596889cf428b5 GIT binary patch literal 287 zcmXr!<>j*fm66!Sz`*brh~a<<$Z`PUVi_Qj!jQt4!;s4u#hA+!#l*;v!j!_C&J@L* z!V=7&$@-EJs6>cGmQcF^kOEUBGAZ-2o0-zLzWAx+WGxIV_;^XxSDsOSvKpbNS@=h_xnF34z DlF(5+ literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/__init__.py b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78721d4a4dea5dbd6951d802a5a065757b8d5047 GIT binary patch literal 172 zcmXr!<>j*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnuSorj{M=L{Aeon5 zS(=!qpIn)eSgc=MkYB8yo12nalA2tSnV$z?>*p5$r83iV^D|T8)AExObK-OIi;MNs lO7p;~iuL2;GxIV_;^XxSDsOSvHBjN=W5`a_MbBvfQ=vSoVdz3#8hd(l5ie z;k;K4s9&KcX1ob4YHQ20<44AS|7P-bFz6p&&%~!5A-~D326J)(!yJK;MADRGG@=xJ zma@!=9Pl04OM96cxs-e$(v{vNk)EO}C-Rh+_)#GJGcpXWAO;!IHaM;GxSAI^_)m&F zR$R^}6QxyN&Bb}Du2^?&-^{03F_(NJQsVGNIJDPoMXY5x?yh80C3DoMnYBLh@* zNmn%LDMz`Tsuj*Xt7fY`IP1cnV)Ty?d?V}6I`+xh`E?w->m?t)9*p-eER%*Lq@fL4 zF$&*=O8T5E-NtPQXN?2aX?kFLm792Y4t8K=n>Lg|tsS?5s}%dhzn1EF7+%6=OAE3t%9B%l;G`WdE5h*%CM> zbP1dj2JA`V09~5&R%A&ncgzHZJh+Xw@|J$%L-pD|aUlnE4CDrgU-C-jrqITsx*^y0 z;{s}PS{JqHm?x3uol(gyFkR~_LW#|jE*$Uj*fm66B*1dl-k3@`#24nSPY0whuxf*CX!{Z=v*frJsnuW*p5$r83iV^D|T8)AExObK-OIi-8LB hQ%ZAEi}mB literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/sigmoid_focal_loss.cpython-36.pyc b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/__pycache__/sigmoid_focal_loss.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40c3297c3b66613e3d13ee9807b91b87a5c8f7ed GIT binary patch literal 1144 zcmZuv&2AGh5caI?%{C+zDo6!{5C;ydR3#D|xU~ET0T<|{x4l@_dXsFlf7rW1jnWIL z)Mw!Zc$mF%>MP{L*qgLfTT7muwZ}8_ee=B?4*MV0Gx^se`OghcWREbF$%S>dY zF|m!>F1m5d0XhkZK#|D1AQMoayz?NUiYj(aBCMx;{PGLJNaU)u4^O(>Uv28|HuE-{ z{cg8;fj>2=VM$ir+*=VKOY{X@dT5y<*f|`Byyd2;60JFgw}B98k=8=Ad@A!?wp?b_ zOop^&Cdno`z#zSY1|jYU5ogjEA^woH9M8%;d4sl?o;I==$EQk~v8hTk&U2O2NnEF8 z(fQ-D!mB$3jq_49S#sais5;d>ENe>}9}R(B@L}=vHWi~SfGbYyWG%MOxXmjxOPCYW zui-b;9x{S#%qecRVrO(o=X|wc#&3J65pknryZgzQL|Xt~v@W%_Ij^~BscG_%={}ye zepXJ?+O)k|>S=-wv$NL&`@)%_jp|KG{Yz@Ki$5DYeb|G=lRJ1efiYmDT~=SVyn}Oy z4z^C_D4Tt921^tVf?>y$#+vuk6JwfcQ)&l@{U=CNY*70s}<22f_VQ>!QP+-nUizLUdxl zx&tA8HZtqFa4OJ`F-AYLfy0dm<0Q+l + +at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const int num_classes, + const float gamma, const float alpha); + +at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const at::Tensor &d_losses, + const int num_classes, + const float gamma, const float alpha); + +// Interface for Python +at::Tensor SigmoidFocalLoss_forward(const at::Tensor &logits, + const at::Tensor &targets, + const int num_classes, const float gamma, + const float alpha) { + if (logits.type().is_cuda()) { + return SigmoidFocalLoss_forward_cuda(logits, targets, num_classes, gamma, + alpha); + } +} + +at::Tensor SigmoidFocalLoss_backward(const at::Tensor &logits, + const at::Tensor &targets, + const at::Tensor &d_losses, + const int num_classes, const float gamma, + const float alpha) { + if (logits.type().is_cuda()) { + return SigmoidFocalLoss_backward_cuda(logits, targets, d_losses, + num_classes, gamma, alpha); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &SigmoidFocalLoss_forward, + "SigmoidFocalLoss forward (CUDA)"); + m.def("backward", &SigmoidFocalLoss_backward, + "SigmoidFocalLoss backward (CUDA)"); +} diff --git a/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu new file mode 100644 index 0000000..aa1e4b9 --- /dev/null +++ b/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/src/sigmoid_focal_loss_cuda.cu @@ -0,0 +1,169 @@ +// modify from +// https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu + +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +// This file is modified from +// https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu +// Cheng-Yang Fu +// cyfu@cs.unc.edu +#include +#include + +#include +#include +#include + +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__global__ void SigmoidFocalLossForward(const int nthreads, + const scalar_t *logits, + const long *targets, + const int num_classes, + const float gamma, const float alpha, + const int num, scalar_t *losses) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + int n = i / num_classes; + int d = i % num_classes; // current class[0~79]; + int t = targets[n]; // target class [1~80]; + + // Decide it is positive or negative case. + scalar_t c1 = (t == (d + 1)); + scalar_t c2 = (t >= 0 & t != (d + 1)); + + scalar_t zn = (1.0 - alpha); + scalar_t zp = (alpha); + + // p = 1. / 1. + expf(-x); p = sigmoid(x) + scalar_t p = 1. / (1. + expf(-logits[i])); + + // (1-p)**gamma * log(p) where + scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); + + // p**gamma * log(1-p) + scalar_t term2 = + powf(p, gamma) * + (-1. * logits[i] * (logits[i] >= 0) - + logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); + + losses[i] = 0.0; + losses[i] += -c1 * term1 * zp; + losses[i] += -c2 * term2 * zn; + + } // CUDA_1D_KERNEL_LOOP +} // SigmoidFocalLossForward + +template +__global__ void SigmoidFocalLossBackward( + const int nthreads, const scalar_t *logits, const long *targets, + const scalar_t *d_losses, const int num_classes, const float gamma, + const float alpha, const int num, scalar_t *d_logits) { + CUDA_1D_KERNEL_LOOP(i, nthreads) { + int n = i / num_classes; + int d = i % num_classes; // current class[0~79]; + int t = targets[n]; // target class [1~80], 0 is background; + + // Decide it is positive or negative case. + scalar_t c1 = (t == (d + 1)); + scalar_t c2 = (t >= 0 & t != (d + 1)); + + scalar_t zn = (1.0 - alpha); + scalar_t zp = (alpha); + // p = 1. / 1. + expf(-x); p = sigmoid(x) + scalar_t p = 1. / (1. + expf(-logits[i])); + + // (1-p)**g * (1 - p - g*p*log(p) + scalar_t term1 = + powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); + + // (p**g) * (g*(1-p)*log(1-p) - p) + scalar_t term2 = + powf(p, gamma) * + ((-1. * logits[i] * (logits[i] >= 0) - + logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * + (1. - p) * gamma - + p); + d_logits[i] = 0.0; + d_logits[i] += -c1 * term1 * zp; + d_logits[i] += -c2 * term2 * zn; + d_logits[i] = d_logits[i] * d_losses[i]; + + } // CUDA_1D_KERNEL_LOOP +} // SigmoidFocalLossBackward + +at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const int num_classes, + const float gamma, const float alpha) { + AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + + const int num_samples = logits.size(0); + + auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); + auto losses_size = num_samples * logits.size(1); + + dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); + dim3 block(512); + + if (losses.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return losses; + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + logits.type(), "SigmoidFocalLoss_forward", [&] { + SigmoidFocalLossForward<<>>( + losses_size, logits.contiguous().data(), + targets.contiguous().data(), num_classes, gamma, alpha, + num_samples, losses.data()); + }); + THCudaCheck(cudaGetLastError()); + return losses; +} + +at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, + const at::Tensor &targets, + const at::Tensor &d_losses, + const int num_classes, + const float gamma, + const float alpha) { + AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); + AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); + AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); + + AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); + + const int num_samples = logits.size(0); + AT_ASSERTM(logits.size(1) == num_classes, + "logits.size(1) should be num_classes"); + + auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); + auto d_logits_size = num_samples * logits.size(1); + + dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); + dim3 block(512); + + if (d_logits.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return d_logits; + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + logits.type(), "SigmoidFocalLoss_backward", [&] { + SigmoidFocalLossBackward<<>>( + d_logits_size, logits.contiguous().data(), + targets.contiguous().data(), + d_losses.contiguous().data(), num_classes, gamma, alpha, + num_samples, d_logits.data()); + }); + + THCudaCheck(cudaGetLastError()); + return d_logits; +} diff --git a/CDARTS_detection/mmdet/utils/__init__.py b/CDARTS_detection/mmdet/utils/__init__.py new file mode 100644 index 0000000..51b5433 --- /dev/null +++ b/CDARTS_detection/mmdet/utils/__init__.py @@ -0,0 +1,10 @@ +from .collect_env import collect_env +from .flops_counter import get_model_complexity_info +from .logger import get_root_logger, print_log +from .registry import Registry, build_from_cfg + + +__all__ = [ + 'Registry', 'build_from_cfg', 'get_model_complexity_info', + 'get_root_logger', 'print_log', 'collect_env' +] \ No newline at end of file diff --git a/CDARTS_detection/mmdet/utils/__pycache__/__init__.cpython-36.pyc b/CDARTS_detection/mmdet/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7cd02b7ec03149b8221e36d6bfe30d081252345 GIT binary patch literal 433 zcmYk2%}&H16osMFzv;~AY<+}oY@%B;>fX4}9UD>$?T}oc0cz|c`ATeE`3kcVs>w*g z$2kG+$-TF`U3GstwvQO0FZ6HaK|h3Tp8^ubj(*V44l&kk(uLka@)Qkh|v>btd2scxq zdR-^}GF}?39R)oM-1zlqgj!siGrBmvKL zwVY(vgC1>&os{EddFu1upz{}Bn!l8DRbNor0#N#i%qEtNI31d!mi#7A4fE8hrkh!7 w8#`*-NXfVhO$)7lbu&2uwxOX@;V8WxXJ6ihF-*V@aZt7j*y1hT<9(XrAFYaVF#rGn literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/utils/__pycache__/collect_env.cpython-36.pyc b/CDARTS_detection/mmdet/utils/__pycache__/collect_env.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec342adb18afab695daccaa07bbfb96ce899e601 GIT binary patch literal 1776 zcmY*ZNpBoQ6t3!RdU`fz?0CzNo;5Zwc1@}Pq%wZb2k86N7n<917w8pYMSa?%3$<2bj9y0jki%27(fAwkLAx7R zvh+b=;F+gKPW@OcC`w{fEfBdO4khp=dK((><^6OAl*v;4yD?+sMH}&4|3}B#Tr+}$;W(2 z<#)A4H3o8SR%U4^&2nkhcnw7>d{rVv(VzS1A}xJY*JV=Jr9D4%2B6%=@=rRksJ5dxLVo;nH9; z{p1;3FpDvba5#v2xg(VlDU?0{_T z>^z{H1{_Qqnpirb+>ZnRgabtiRQQTQ8Up|J*iTy5wLmQlTlEul-LeNjo=fyeD)Z zSlAZ~H_pQ}7zJfOg~??_!jOSPJ1o2l#@#h#+L~~vYb(fekDhRv%EA=&LeGj^m|;Oe zCX6TxQ{fbogPdgnEeg>N#xywgvI);8Tv)QfKCFZW8G*0`XJIaSPcJS!WGrJ$gPsZ_ z4ht?!=BGzgv0x*T z*eJ%?52B^&nyMBFJ(%7TMv??mVI~Q5(QT?_E6WSkglw22A70C~X*%`7^e}U~swo#d zDKcC`zCqDAO|204SQs)t7Yd$+q;Pe3T5tXRj@Vsr`{w>|oF()g$nK zrQXI%_$KU@wJz?$-oahXkpryqgoD`@coMefC4QKC-rRaseLi?qZ7hmL27g zl1Mj_l&KlXN$g2-N?fVAWvV8Zu?2`4fK8A78bqGl0z^?L(SFnn&8dnyD`MIbbfL^$f~q{jxtfr}@)JGe`?a zi%8GTY1QXM27kJ*HJ)F!L{{V;=v7Bt6a_Jc+6$s6#vf?a1Hu+1#xcc&D5K{^F)60d zb5Kl+8I&)HS+Ni0Au%WBQNAqpiv^U2#iDoyWkozIoXN7)4t6X|a$GTF{cDjfZ{1dWJi}Y-}t$ZDh8!Ms{1%waAiJB8%|)xhT7;MY+%QzTG|*<{I`Yup4=iiS)lT!UDb;+a^eH zM}1tFpPBu9l#TM+Iz}_(UxX#JGQ#@IKs(!**w*@mupDJE!x-9=JgX^x9!^CCtadsY z+tyc&8(LI|#;B}hR~{SK&0`3N26|>hxY_mNtnlyFYd*+rc7m{?%K~-J#1{8v!d|!O z$ED>U@MT!9xapd(Cb6;d(~|vE|?On(<^1dNSOJ z6KBGXtgXjZr|k!EzU|+0{Z=>JjIFkRKa2}qPkI=`mw_C^IwYM%yssut>HN?W46# z;RQ!YB969N0wk=_ajE*zURZAiM|Yf+FPFNT@%|x^h?GtLem&fD>+RLf3Cs~3My6%U zdRe#iqHa-c6!a|WQ@U+fC~f*ft!&6cz>TeUuX;6^N3~|s$M{j1O4EO%5$QtPuw@i! zVqyWRcf*oLLM_zME-!-9w++ZvW=;d=YGN|MxRPK@r5L#cV>-cTCm1s+MxHPryO1qW zpn5yR++!Pt{BeRio8lJdur3}^7vsSAGQrrlCq`+*_%luZD#4k15@+YE0y9_US00mG zJtopf62LVi{0^Sq$~Z){$E1Bwze;tY8S39feamu)q%Q;VmF`q6>aSYVQ`i3ai??|g z{};bluAjd!p$7e<2TRNK|G*Gxit|4@pCWOE`qM7x0TYZFXCx$8RLnTTOcG~$?Rp3S z?MlB^55P#wbt-!aYmv_(yN(o4MPmlp2ZrEelnh&60Bk`o=nDqp)@al8qcY`(D`?=9 z^T`<=NGMuZ)Pyy{2V)DGJfHLy(_ZEQMe7@yutDeXtzSaapbkhIIZE7}u;VDHE)*R6 z&agZ=*r*peiEIJ$OoFa(q#yQVJD?BK4(>U3?%d#`R5u(x?w_u3@5{%J9Xqyk%(?pV z(~(d3ZzRYk7;GXnf8;p$VvC|4wl4Qywlo2^u;I<6ss(b}4R5n(a9aKiF!2D1# zs+wO<@WPZT+G45S9t9PWFM_w?t0Z543&;U+R%X)-E(28IdXS zWa${1At=jHVI^fGnv&8qO??iTVgxx8X$}36)-V%_n!g8+}18>AC~(@q(?@> z+ScCG`ud#%A3*GZMIvgfFfl8cc(xAx-RpKcGV}%1A(VQ%=Er$AF&S<=>AI|_mEnWl zcVoM@UT+EsyIA&WwO*^&^kA7-njltI;5SzxYQ0vhw_s!)tapbry@}=@A_ZjcSel?z zOHjO9$M&{;OJ|#%n%4}@E$yBxr4QM9Iv%FaF;qYW98Dj`($GWD;gRtOUhro)mIfS} zNBSZZC2&BSHFKLhF>7VZc+~;h(AV-fvaPX?&n%sMsqb7}z8F+8lGYttjZVGILLO&V z;j4LJJl&D?wR+oYy0GffgRSgJjPn-ItNVs49=Z?kJWQS;9t%4mzGK3viOJqcQaW*g zp*-3nv{PzXx&cj7)VGd&9kqr7jL?Z_#dNxW3V4T&Mx?o*imLaCGN9RpDbI0Db3rp; z7^>AUH_YH%Xng|X2FmFxV56Mtl6`d*l?p>j$Zug5Sw;q$oIz17t91=g&_T47?@%@* zK#5$gA=aOxHz7h11SmjCv-;N3*AZcKkdzRlN=Tq?(5R$gNT7tiZODDtWU|?hbcVu8 z%Y%{j5ke|#*IZFsq9wx<&#ilb7lu*+mjUF>fbD6syOd2_BCl?^r@sP<(mT8#3;EaY zXmkiDH*2-t^ubb#7%k3t-EMO;&M;jnStWcaPqRNJ>EPw1!SaAKB)>yY;(ke5M)4%u zzy&lTq=1eRQfn0R<$TGubHt01dQ;jGgAr{nA_deFg0j$#B$PU-rfrH8bW%p3=E!IC zX+Y^2WRdpa452kN32vlM--kuLg9uk~oe_^q|4y$C=hR)3o`^HBY)l<% z|0;iW&4=gRmFIp!Q_x(NwzV|$3)x7pL!LO#L(P+Z3T8z(uF3t#PRi^ zl9MwSPoAgjS!D4f2n5+wV#kg(vx=l?V`muHw7g)$z3a)kcf0Ak9Z6olk0_}+(Wdyq ztG6jE={5b`d!52Se~T2*#F|OYbCQgrVVea*5=OdrqDaagr_jJ2SXK9p`v(R*^5MR| z3B?WN3?qlKVZw+RcV=M3zzk&WD#q4N+3{F*R3zqv>u_@J)tgP{w(od%y?T>&;&`iw zv3BfpcK~;|;?x5k309`*3#Zd|JV*EeI7>L#yVJSdK!~x#IM)+rsn(M)fFb7g+T>q# z1BU{i9wI!rkPgBmr`|%KroK|<){<{vyKzpDF_0(GlCLAHSjt|Ev!w+;t7s^%&WtQ+ zc5l*?%>NuoX~tX@gfD^oC4-MRCHV-u?E}8@ADDe$AKB#~aWuC6?HeHJGQzjJ@E~x* zu=>kENFN#@w;V#MBrcOKv?c`HrN-f-{K(t{Tn!l8@MVUHl_oh zn1YVuH{)-n|GjnSSHvSs{(ByT1JOv=>jnPqgB>2m0qM6ocm1zFj{)NU|42v){_Dt* z5ahQwk}tl2Be_mGm_joML=e_%gX4i_3OdfJCx@gw_#|Hsw0V%KbH&^*4z)gO8Zh?& zHN|`igI@*hew9))3kD}hjf@%nNU;wR8~=yeAUHxiLf)p*Hp-P6IRyG_8Y!4ZMhYs_ z8wRYLA!i<;*Gr_GuLd)4czQ zel?|0wB(0qjSwVt5K8DF+J$hOlHd?tx52Z@6*UP@O%v|eCM-V-*Ko%F0RnqInl34X z9hr{~;36RmECl8=WQlI7g^@&#=E}`)W9AQl7Ej!`b?w@F*Kc0Fzi;tuo~2?$m(bAKgVtOp+7M8 zQ>0zQb$BMqoqv(P0pT<5ct+ za^>kRB&7&ZBc;f5Xy879;%YP9BOJ6HdjwaS@;kueeaRYXaBY}1hx?lK$lm+z=<;{3 zd`|k!-X5#QVPv($V|;#RQNa}`of^jw$w>~*1`#dJ6z4a?ds4NLy^Nlh zyp8dG&906>l8pLvkaouG=Rum6eVTPQ1<{_ov2Xv?F<4QKpdaJ9TqTE1JHYxj>lc{mxEi)S(1sPt{*^w5QSr+y*IM}-BAp0lDHc+f@~o6-8k$c!x1GpLV8#b_KQG;n_c zU*RQpBQO7&WKbm92cwt!lNigyH)WN8TZ$&;wEh%JX*SG7QwfE$gDVVdSY4%7Y^G;C zw&BY*E+<&oj3Lykt$S?%g1886Gi+toaC_dxqu1@a7YzBVG98c6W%M9I95NWmMO;)b zL+gHhZ9R-JJUuGitBY{GI-X3d`l`jG$(`~JEmKk!&@j`u>7e&AmmZ;#1;%qcFG27g zRHv{(ov>V|Pp05pN(2XpsIWTb3FuO+J*kf;x-?lDZ8lw7JU;k%PhQ$D{NbSK?I;Ba z72>A@m|in%ydq8UOHx+PqE!ZWQFsg}y)_wcFIm zdMl_7Se_cYXSz=xZ&F5FBqPfD$g0V;Ec=sN1pSz@KcH-v@>hJRT(5zXP0VqLnBxdO zc#=7$@S0RGreKCi4dJm7Eh^Yq5ubgNt?z!|$u=Ut7n~JDZizT~h^$Fq180>U&XD6a z#ZmVWuR30U>j?@LS?fBaKX9Gm9AAR1nWTZ=rHt-9p6~?rw3$;+M&=Eom{QMph{byw zQAKWJ$PxF|0Dx&i`w@JT#*8+2?9kOJR8X+%wB#*(bC>3!Q8a_w>F5MAk$Ch2sDQGV zft_OnGGKkaL?K55af+GD8qe>3Ie|(QaK&6uf`*D`rN0JVhw1`N?|D-26$9@~J^%t7 zZStMstom^Gntum4SAwAF%F#PqIo(O7E1#!TLv8RK;z!Cw1*Ldelb6T! zIICiar}*|aFyuBZLHROtFE0r;vVinpNWIYlaH$8I22!Qd(mJu*lPFk-pEBM1=>L?a zBN?E#BT0~SvXOK8NAx6- zpx`v;iTySCzS`!aVZDQC|VI4sbH*d@x7JzRU9UNK+sQ+#f9sauiQ$YEUwHwPOe8K zQgmQ$UYWInas-eMkv)aqZldcav{WTwv{WpHpd*Xfo58{834?R6&b&=_>nU?l9RGJT zSIXNQ*&F=-5d7?2`4(PPHv@j>=x!=&$hRLh|D(}TyDh(mL8=Rb=2CLeyu>=SjrU4P zORdg6fk2LoB$`T^y*mE{0>uT;@{+t0Ny093lo4Izrg8V5#tn2=j zTXE(Z7bci7&R_ApjZFon{FMA-!v2h~?ORE>DnBQfOR#JZ3V3DwPdEJXs-ET-2!S*j zKQ5>jjTvgvt20(J%!GXS zeCM)*t(n1zw`2RPBKbL59{-a7>^HM`5$@i+2`3OO%IB$OJ(Dr;o@?l(#loyTJ5{kW i_KaP&$M9FM^R{K@kn(_LkqY*reaJ4NW^;Sq&i@~gQPwvA literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/utils/__pycache__/logger.cpython-36.pyc b/CDARTS_detection/mmdet/utils/__pycache__/logger.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b092ec48372f55c2a3f0b0fc7a3cb838a85f379b GIT binary patch literal 2348 zcmZ`)O>f&q5ap5-Magy>1Z{w{JuD0uFi~3$+8zQU=vR<50SqUw(~D75UTIenWs)oG zu3|W)E>#abw11;Of&QYs_S9pqIdx{2wq+wFu+(xtX5PG+dHQ@~qx;p{3Hz%P1b+uB zPXzO?Vd#hO;sGZ`aDu;KLCWwXB(NfaX*3FX#N%JcNqqI);t71WI5`fo_CIhtILwIM z7z@+qxi+rN;#4sMi(7(aU1XE(dX~fBr@bG@HB#W>Pr|~jSv<{C09xWm` z)2gGI@c4HCVv=R>L%4_+tvcr6S)^XV`8IDY2??r)b-u3pHNC4mM+Rwu2TAjEx@Rgd|Fw{&0RZ<#S$njXHUbjo(c?GL}fu83@K?j27 zRc>-ttW_pPBGEH~s);3b!OdYtD z?-JsMBxgDcZ8DOoWX87KA=`R#boAor(310P-L`>VgkU?+#vX=E`+c<3@7tE16}ho% zQ0YwxYP&XIIv+lk)hHh;>~@<1LkCz|c5S6t?K&(!b)Jn-wsSr0Led*^S=*+VU@Rcp z!I{UHAS|Iyx*mETs?NrkH%t0<9Z&ZszM1|E)O`T)_R-!%mf|ybSL2I0tM-N$oasG1 zlX}k;HY7Tim7nj;O4AA~81!k1Z0eyrRuH;Y7je+tOHmH&qCERWEm`?I+}t2!X8Ln^}vAa znRwb-hU!^OR(9^etq8>LN0ac_%h{6r47NeWTI^q_jF$=;1C#=w(*c z)Evd;gUWSr^BZjk3834!eqEx_`tK3|Jn7BMH7eRMq~b=KI89)M_oATjl(-fb?8#la ziS%#o-xdNY2ZrSpi0*lACiL`HqPDZsYs`%y--KYQl9zm3>CFte0*E)z?K(a4?RV-< zfLXX|CKBCx{l~~KNVmVrwZ1myikCJzb>Nx1)TbAyO6)bu3pN0$IiJ9v#;IdnInien zF9>x5nH)@^ZmCoaJ>JzJ^a7A&6CdfD@4XqSCvsjm9TLD5S89G7AkCnEVdDp zoB9yus)YyJO=r1o^{C;$Ke literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/utils/__pycache__/registry.cpython-36.pyc b/CDARTS_detection/mmdet/utils/__pycache__/registry.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f6dc107051f98710487ac459958212dbb345701 GIT binary patch literal 2652 zcma)8&5j#I5bmBCkH=o`Pc~tbh=5uvkk+u;MQ~e%kUxYFut*^ik_DQ~PPe_|wP&2} zc7&ZVhj`D3XW+&waONrc%86IViK=dU;x!x?tN*L3tE%h!+MjH0Ha~lPo_yONwr^^2o(zQvc$R9@iiZUs z4@8Wkwr2t*23-@N#P38#pyu_xujKo<-)q7(Zob?oaLF0W7MWF$b^P;dTr^!(Q-YdxCS4(QG;S#c@H`z>61R?wC_RB z*8$YEemIAq&AppsMoYW`?{s?o01f~2e&#Bn-El!{|C21TG3b_+R7 zw8UF7!2pAaXPV$bOjO&GAr6{OD27EF3|B$o5Y!~aNXBssMhc|~ZPLA~jyZ|SF?0}% zbqu$lWA0w4HKa}Lxoc<1I*49_qU>dR)6FV&=*<69kIwzKdb6>T18=PybE7(DMc_)p zVeqUoThMX0R6?51J?E6?LhAPfcP0U7bs(t?FFZSe=JwVfNVb5KntM(H*Glg$KpLzKrhRv+kSt-& z-=k)eExre`r%tL7PC5~Ga8jK0PoCxZ(Ki-I&)O%<^_VW$nPAdFK}f-ms{Pab#qQx) zX%tGBP0&MjHrA|HX!de8efQ;zWr`&ODH1+e(ZjMF)=%3FQ_pfW0{B5kSp}x?bTSg3 zOIb*{2_Z6phOxvHbypF6B_?hebE@szOX;+nISwUNI$&Cv;%zi^nLX)1t(&yHX3>Xek)xR=o!+Xf zSQ9&KZ`in)X1wIU80av5K28RXB4iUZNyMFrMzVl7>WPJ-M0%B&wGxe!?_h=Owo)NS zNc0b3uF$v0i-M@u3R*$rN85HS#hk_uH&?m+Ck!;838>qXyBuizB2;_YdqrW^!&!A$ zdfdB+)FIA79{Y1L^Gm-EkI|gY0uAg~26HNNz(E~*v!D!chx}DX-0wHa09Kj~3HiR> z=(nu=&t)*H<-2;52mOZL!np;W@Vtin<0n>n{p~ql{|0RC-sNNo)n!5;QlG+}Y8~*ZL?Y93s|vMP zW8Y}vqMM8dI!=HLoOy?=7~$iW3_#gqw+!&ibe0UVX@W}(q{l;%Yqjw7iO^%2udoRc zh_wVffrnGX~xgdFH$;Vw)pt)CuRgWnJeXkvwAFTfDaaBY=b z+Sg1oQ@|cN$x|UQ^~}}-r69$@WJ?exNDFwq{ZTP8!El(qFkz zau2JUSfRnDX5V1bR6;Kz*N%)2S8JlB)uy!?QxjlhO$5HjXmO{GJ%l`xm8pR*<9{*I z)*0qkjA<2v?vFrH?L$R2_TV*+fQ|4M0WWPKGkM5S5#6ObR33vA{wJzQYySH%G@%Q; dg{Ku*!L)_Ad;IS@#=M8GxqbWr-Tux8{{Rs~iB|vs literal 0 HcmV?d00001 diff --git a/CDARTS_detection/mmdet/utils/collect_env.py b/CDARTS_detection/mmdet/utils/collect_env.py new file mode 100644 index 0000000..dcc1d4e --- /dev/null +++ b/CDARTS_detection/mmdet/utils/collect_env.py @@ -0,0 +1,63 @@ +import os.path as osp +import subprocess +import sys +from collections import defaultdict + +import cv2 +import mmcv +import torch +import torchvision + +import mmdet + + +def collect_env(): + env_info = {} + env_info['sys.platform'] = sys.platform + env_info['Python'] = sys.version.replace('\n', '') + + cuda_available = torch.cuda.is_available() + env_info['CUDA available'] = cuda_available + + if cuda_available: + from torch.utils.cpp_extension import CUDA_HOME + env_info['CUDA_HOME'] = CUDA_HOME + + if CUDA_HOME is not None and osp.isdir(CUDA_HOME): + try: + nvcc = osp.join(CUDA_HOME, 'bin/nvcc') + nvcc = subprocess.check_output( + '"{}" -V | tail -n1'.format(nvcc), shell=True) + nvcc = nvcc.decode('utf-8').strip() + except subprocess.SubprocessError: + nvcc = 'Not Available' + env_info['NVCC'] = nvcc + + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + devices[torch.cuda.get_device_name(k)].append(str(k)) + for name, devids in devices.items(): + env_info['GPU ' + ','.join(devids)] = name + + gcc = subprocess.check_output('gcc --version | head -n1', shell=True) + gcc = gcc.decode('utf-8').strip() + env_info['GCC'] = gcc + + env_info['PyTorch'] = torch.__version__ + env_info['PyTorch compiling details'] = torch.__config__.show() + + env_info['TorchVision'] = torchvision.__version__ + + env_info['OpenCV'] = cv2.__version__ + + env_info['MMCV'] = mmcv.__version__ + env_info['MMDetection'] = mmdet.__version__ + from mmdet.ops import get_compiler_version, get_compiling_cuda_version + env_info['MMDetection Compiler'] = get_compiler_version() + env_info['MMDetection CUDA Compiler'] = get_compiling_cuda_version() + return env_info + + +if __name__ == "__main__": + for name, val in collect_env().items(): + print('{}: {}'.format(name, val)) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/utils/contextmanagers.py b/CDARTS_detection/mmdet/utils/contextmanagers.py new file mode 100644 index 0000000..0363f01 --- /dev/null +++ b/CDARTS_detection/mmdet/utils/contextmanagers.py @@ -0,0 +1,126 @@ +# coding: utf-8 +import asyncio +import contextlib +import logging +import os +import time +from typing import List + +import torch + +logger = logging.getLogger(__name__) + +DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False)) + + +@contextlib.asynccontextmanager +async def completed(trace_name='', + name='', + sleep_interval=0.05, + streams: List[torch.cuda.Stream] = None): + """ + Async context manager that waits for work to complete on + given CUDA streams. + + """ + if not torch.cuda.is_available(): + yield + return + + stream_before_context_switch = torch.cuda.current_stream() + if not streams: + streams = [stream_before_context_switch] + else: + streams = [s if s else stream_before_context_switch for s in streams] + + end_events = [ + torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams + ] + + if DEBUG_COMPLETED_TIME: + start = torch.cuda.Event(enable_timing=True) + stream_before_context_switch.record_event(start) + + cpu_start = time.monotonic() + logger.debug('%s %s starting, streams: %s', trace_name, name, streams) + grad_enabled_before = torch.is_grad_enabled() + try: + yield + finally: + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_end = time.monotonic() + for i, stream in enumerate(streams): + event = end_events[i] + stream.record_event(event) + + grad_enabled_after = torch.is_grad_enabled() + + # observed change of torch.is_grad_enabled() during concurrent run of + # async_test_bboxes code + assert (grad_enabled_before == grad_enabled_after + ), 'Unexpected is_grad_enabled() value change' + + are_done = [e.query() for e in end_events] + logger.debug('%s %s completed: %s streams: %s', trace_name, name, + are_done, streams) + with torch.cuda.stream(stream_before_context_switch): + while not all(are_done): + await asyncio.sleep(sleep_interval) + are_done = [e.query() for e in end_events] + logger.debug( + '%s %s completed: %s streams: %s', + trace_name, + name, + are_done, + streams, + ) + + current_stream = torch.cuda.current_stream() + assert current_stream == stream_before_context_switch + + if DEBUG_COMPLETED_TIME: + cpu_time = (cpu_end - cpu_start) * 1000 + stream_times_ms = '' + for i, stream in enumerate(streams): + elapsed_time = start.elapsed_time(end_events[i]) + stream_times_ms += ' {} {:.2f} ms'.format(stream, elapsed_time) + logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time, + stream_times_ms) + + +@contextlib.asynccontextmanager +async def concurrent(streamqueue: asyncio.Queue, + trace_name='concurrent', + name='stream'): + """Run code concurrently in different streams. + + :param streamqueue: asyncio.Queue instance. + + Queue tasks define the pool of streams used for concurrent execution. + + """ + if not torch.cuda.is_available(): + yield + return + + initial_stream = torch.cuda.current_stream() + + with torch.cuda.stream(initial_stream): + stream = await streamqueue.get() + assert isinstance(stream, torch.cuda.Stream) + + try: + with torch.cuda.stream(stream): + logger.debug('%s %s is starting, stream: %s', trace_name, name, + stream) + yield + current = torch.cuda.current_stream() + assert current == stream + logger.debug('%s %s has finished, stream: %s', trace_name, + name, stream) + finally: + streamqueue.task_done() + streamqueue.put_nowait(stream) diff --git a/CDARTS_detection/mmdet/utils/flops_counter.py b/CDARTS_detection/mmdet/utils/flops_counter.py new file mode 100644 index 0000000..5d9cdfc --- /dev/null +++ b/CDARTS_detection/mmdet/utils/flops_counter.py @@ -0,0 +1,433 @@ +# Modified from flops-counter.pytorch by Vladislav Sovrasov +# original repo: https://github.com/sovrasov/flops-counter.pytorch + +# MIT License + +# Copyright (c) 2018 Vladislav Sovrasov + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sys + +import numpy as np +import torch +import torch.nn as nn +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin +from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, + _AvgPoolNd, _MaxPoolNd) + +CONV_TYPES = (_ConvNd, ) +DECONV_TYPES = (_ConvTransposeMixin, ) +LINEAR_TYPES = (nn.Linear, ) +POOLING_TYPES = (_AvgPoolNd, _MaxPoolNd, _AdaptiveAvgPoolNd, + _AdaptiveMaxPoolNd) +RELU_TYPES = (nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6) +BN_TYPES = (_BatchNorm, ) +UPSAMPLE_TYPES = (nn.Upsample, ) + +SUPPORTED_TYPES = ( + CONV_TYPES + DECONV_TYPES + LINEAR_TYPES + POOLING_TYPES + RELU_TYPES + + BN_TYPES + UPSAMPLE_TYPES) + + +def get_model_complexity_info(model, + input_res, + print_per_layer_stat=True, + as_strings=True, + input_constructor=None, + ost=sys.stdout): + assert type(input_res) is tuple + assert len(input_res) >= 2 + flops_model = add_flops_counting_methods(model) + flops_model.eval().start_flops_count() + if input_constructor: + input = input_constructor(input_res) + _ = flops_model(**input) + else: + batch = torch.ones(()).new_empty( + (1, *input_res), + dtype=next(flops_model.parameters()).dtype, + device=next(flops_model.parameters()).device) + flops_model(batch) + + if print_per_layer_stat: + print_model_with_flops(flops_model, ost=ost) + flops_count = flops_model.compute_average_flops_cost() + params_count = get_model_parameters_number(flops_model) + flops_model.stop_flops_count() + + if as_strings: + return flops_to_string(flops_count), params_to_string(params_count) + + return flops_count, params_count + + +def flops_to_string(flops, units='GMac', precision=2): + if units is None: + if flops // 10**9 > 0: + return str(round(flops / 10.**9, precision)) + ' GMac' + elif flops // 10**6 > 0: + return str(round(flops / 10.**6, precision)) + ' MMac' + elif flops // 10**3 > 0: + return str(round(flops / 10.**3, precision)) + ' KMac' + else: + return str(flops) + ' Mac' + else: + if units == 'GMac': + return str(round(flops / 10.**9, precision)) + ' ' + units + elif units == 'MMac': + return str(round(flops / 10.**6, precision)) + ' ' + units + elif units == 'KMac': + return str(round(flops / 10.**3, precision)) + ' ' + units + else: + return str(flops) + ' Mac' + + +def params_to_string(params_num): + """converting number to string + + :param float params_num: number + :returns str: number + + >>> params_to_string(1e9) + '1000.0 M' + >>> params_to_string(2e5) + '200.0 k' + >>> params_to_string(3e-9) + '3e-09' + """ + if params_num // 10**6 > 0: + return str(round(params_num / 10**6, 2)) + ' M' + elif params_num // 10**3: + return str(round(params_num / 10**3, 2)) + ' k' + else: + return str(params_num) + + +def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout): + total_flops = model.compute_average_flops_cost() + + def accumulate_flops(self): + if is_supported_instance(self): + return self.__flops__ / model.__batch_counter__ + else: + sum = 0 + for m in self.children(): + sum += m.accumulate_flops() + return sum + + def flops_repr(self): + accumulated_flops_cost = self.accumulate_flops() + return ', '.join([ + flops_to_string( + accumulated_flops_cost, units=units, precision=precision), + '{:.3%} MACs'.format(accumulated_flops_cost / total_flops), + self.original_extra_repr() + ]) + + def add_extra_repr(m): + m.accumulate_flops = accumulate_flops.__get__(m) + flops_extra_repr = flops_repr.__get__(m) + if m.extra_repr != flops_extra_repr: + m.original_extra_repr = m.extra_repr + m.extra_repr = flops_extra_repr + assert m.extra_repr != m.original_extra_repr + + def del_extra_repr(m): + if hasattr(m, 'original_extra_repr'): + m.extra_repr = m.original_extra_repr + del m.original_extra_repr + if hasattr(m, 'accumulate_flops'): + del m.accumulate_flops + + model.apply(add_extra_repr) + print(model, file=ost) + model.apply(del_extra_repr) + + +def get_model_parameters_number(model): + params_num = sum(p.numel() for p in model.parameters() if p.requires_grad) + return params_num + + +def add_flops_counting_methods(net_main_module): + # adding additional methods to the existing module object, + # this is done this way so that each function has access to self object + net_main_module.start_flops_count = start_flops_count.__get__( + net_main_module) + net_main_module.stop_flops_count = stop_flops_count.__get__( + net_main_module) + net_main_module.reset_flops_count = reset_flops_count.__get__( + net_main_module) + net_main_module.compute_average_flops_cost = \ + compute_average_flops_cost.__get__(net_main_module) + + net_main_module.reset_flops_count() + + # Adding variables necessary for masked flops computation + net_main_module.apply(add_flops_mask_variable_or_reset) + + return net_main_module + + +def compute_average_flops_cost(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Returns current mean flops consumption per image. + """ + + batches_count = self.__batch_counter__ + flops_sum = 0 + for module in self.modules(): + if is_supported_instance(module): + flops_sum += module.__flops__ + + return flops_sum / batches_count + + +def start_flops_count(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Activates the computation of mean flops consumption per image. + Call it before you run the network. + """ + add_batch_counter_hook_function(self) + self.apply(add_flops_counter_hook_function) + + +def stop_flops_count(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Stops computing the mean flops consumption per image. + Call whenever you want to pause the computation. + """ + remove_batch_counter_hook_function(self) + self.apply(remove_flops_counter_hook_function) + + +def reset_flops_count(self): + """ + A method that will be available after add_flops_counting_methods() is + called on a desired net object. + Resets statistics computed so far. + """ + add_batch_counter_variables_or_reset(self) + self.apply(add_flops_counter_variable_or_reset) + + +def add_flops_mask(module, mask): + + def add_flops_mask_func(module): + if isinstance(module, torch.nn.Conv2d): + module.__mask__ = mask + + module.apply(add_flops_mask_func) + + +def remove_flops_mask(module): + module.apply(add_flops_mask_variable_or_reset) + + +def is_supported_instance(module): + if isinstance(module, SUPPORTED_TYPES): + return True + else: + return False + + +def empty_flops_counter_hook(module, input, output): + module.__flops__ += 0 + + +def upsample_flops_counter_hook(module, input, output): + output_size = output[0] + batch_size = output_size.shape[0] + output_elements_count = batch_size + for val in output_size.shape[1:]: + output_elements_count *= val + module.__flops__ += int(output_elements_count) + + +def relu_flops_counter_hook(module, input, output): + active_elements_count = output.numel() + module.__flops__ += int(active_elements_count) + + +def linear_flops_counter_hook(module, input, output): + input = input[0] + batch_size = input.shape[0] + module.__flops__ += int(batch_size * input.shape[1] * output.shape[1]) + + +def pool_flops_counter_hook(module, input, output): + input = input[0] + module.__flops__ += int(np.prod(input.shape)) + + +def bn_flops_counter_hook(module, input, output): + module.affine + input = input[0] + + batch_flops = np.prod(input.shape) + if module.affine: + batch_flops *= 2 + module.__flops__ += int(batch_flops) + + +def deconv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + input_height, input_width = input.shape[2:] + + kernel_height, kernel_width = conv_module.kernel_size + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = ( + kernel_height * kernel_width * in_channels * filters_per_channel) + + active_elements_count = batch_size * input_height * input_width + overall_conv_flops = conv_per_position_flops * active_elements_count + bias_flops = 0 + if conv_module.bias is not None: + output_height, output_width = output.shape[2:] + bias_flops = out_channels * batch_size * output_height * output_height + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +def conv_flops_counter_hook(conv_module, input, output): + # Can have multiple inputs, getting the first one + input = input[0] + + batch_size = input.shape[0] + output_dims = list(output.shape[2:]) + + kernel_dims = list(conv_module.kernel_size) + in_channels = conv_module.in_channels + out_channels = conv_module.out_channels + groups = conv_module.groups + + filters_per_channel = out_channels // groups + conv_per_position_flops = np.prod( + kernel_dims) * in_channels * filters_per_channel + + active_elements_count = batch_size * np.prod(output_dims) + + if conv_module.__mask__ is not None: + # (b, 1, h, w) + output_height, output_width = output.shape[2:] + flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, + output_width) + active_elements_count = flops_mask.sum() + + overall_conv_flops = conv_per_position_flops * active_elements_count + + bias_flops = 0 + + if conv_module.bias is not None: + + bias_flops = out_channels * active_elements_count + + overall_flops = overall_conv_flops + bias_flops + + conv_module.__flops__ += int(overall_flops) + + +def batch_counter_hook(module, input, output): + batch_size = 1 + if len(input) > 0: + # Can have multiple inputs, getting the first one + input = input[0] + batch_size = len(input) + else: + print('Warning! No positional inputs found for a module, ' + 'assuming batch size is 1.') + module.__batch_counter__ += batch_size + + +def add_batch_counter_variables_or_reset(module): + + module.__batch_counter__ = 0 + + +def add_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + return + + handle = module.register_forward_hook(batch_counter_hook) + module.__batch_counter_handle__ = handle + + +def remove_batch_counter_hook_function(module): + if hasattr(module, '__batch_counter_handle__'): + module.__batch_counter_handle__.remove() + del module.__batch_counter_handle__ + + +def add_flops_counter_variable_or_reset(module): + if is_supported_instance(module): + module.__flops__ = 0 + + +def add_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + return + + if isinstance(module, CONV_TYPES): + handle = module.register_forward_hook(conv_flops_counter_hook) + elif isinstance(module, RELU_TYPES): + handle = module.register_forward_hook(relu_flops_counter_hook) + elif isinstance(module, LINEAR_TYPES): + handle = module.register_forward_hook(linear_flops_counter_hook) + elif isinstance(module, POOLING_TYPES): + handle = module.register_forward_hook(pool_flops_counter_hook) + elif isinstance(module, BN_TYPES): + handle = module.register_forward_hook(bn_flops_counter_hook) + elif isinstance(module, UPSAMPLE_TYPES): + handle = module.register_forward_hook(upsample_flops_counter_hook) + elif isinstance(module, DECONV_TYPES): + handle = module.register_forward_hook(deconv_flops_counter_hook) + else: + handle = module.register_forward_hook(empty_flops_counter_hook) + module.__flops_handle__ = handle + + +def remove_flops_counter_hook_function(module): + if is_supported_instance(module): + if hasattr(module, '__flops_handle__'): + module.__flops_handle__.remove() + del module.__flops_handle__ + + +# --- Masked flops counting +# Also being run in the initialization +def add_flops_mask_variable_or_reset(module): + if is_supported_instance(module): + module.__mask__ = None diff --git a/CDARTS_detection/mmdet/utils/logger.py b/CDARTS_detection/mmdet/utils/logger.py new file mode 100644 index 0000000..91b086e --- /dev/null +++ b/CDARTS_detection/mmdet/utils/logger.py @@ -0,0 +1,66 @@ +import logging + +from mmcv.runner import get_dist_info + + +def get_root_logger(log_file=None, log_level=logging.INFO): + """Get the root logger. + + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "mmdet". + + Args: + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the root logger. + log_level (int): The root logger level. Note that only the process of + rank 0 is affected, while other processes will set the level to + "Error" and be silent most of the time. + + Returns: + logging.Logger: The root logger. + """ + logger = logging.getLogger(__name__.split('.')[0]) # i.e., mmdet + # if the logger has been initialized, just return it + if logger.hasHandlers(): + return logger + + format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + logging.basicConfig(format=format_str, level=log_level) + rank, _ = get_dist_info() + if rank != 0: + logger.setLevel('ERROR') + elif log_file is not None: + file_handler = logging.FileHandler(log_file, 'w') + file_handler.setFormatter(logging.Formatter(format_str)) + file_handler.setLevel(log_level) + logger.addHandler(file_handler) + + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. Some + special loggers are: + - "root": the root logger obtained with `get_root_logger()`. + - "silent": no message will be printed. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif logger == 'root': + _logger = get_root_logger() + _logger.log(level, msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger != 'silent': + raise TypeError( + 'logger should be either a logging.Logger object, "root", ' + '"silent" or None, but got {}'.format(logger)) \ No newline at end of file diff --git a/CDARTS_detection/mmdet/utils/profiling.py b/CDARTS_detection/mmdet/utils/profiling.py new file mode 100644 index 0000000..58b1c87 --- /dev/null +++ b/CDARTS_detection/mmdet/utils/profiling.py @@ -0,0 +1,41 @@ +import contextlib +import sys +import time + +import torch + +if sys.version_info >= (3, 7): + + @contextlib.contextmanager + def profile_time(trace_name, + name, + enabled=True, + stream=None, + end_stream=None): + """Print time spent by CPU and GPU. + + Useful as a temporary context manager to find sweet spots of + code suitable for async implementation. + + """ + if (not enabled) or not torch.cuda.is_available(): + yield + return + stream = stream if stream else torch.cuda.current_stream() + end_stream = end_stream if end_stream else stream + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + stream.record_event(start) + try: + cpu_start = time.monotonic() + yield + finally: + cpu_end = time.monotonic() + end_stream.record_event(end) + end.synchronize() + cpu_time = (cpu_end - cpu_start) * 1000 + gpu_time = start.elapsed_time(end) + msg = "{} {} cpu_time {:.2f} ms ".format(trace_name, name, + cpu_time) + msg += "gpu_time {:.2f} ms stream {}".format(gpu_time, stream) + print(msg, end_stream) diff --git a/CDARTS_detection/mmdet/utils/registry.py b/CDARTS_detection/mmdet/utils/registry.py new file mode 100644 index 0000000..2bbb7f9 --- /dev/null +++ b/CDARTS_detection/mmdet/utils/registry.py @@ -0,0 +1,76 @@ +import inspect + +import mmcv + + +class Registry(object): + + def __init__(self, name): + self._name = name + self._module_dict = dict() + + def __repr__(self): + format_str = self.__class__.__name__ + '(name={}, items={})'.format( + self._name, list(self._module_dict.keys())) + return format_str + + @property + def name(self): + return self._name + + @property + def module_dict(self): + return self._module_dict + + def get(self, key): + return self._module_dict.get(key, None) + + def _register_module(self, module_class): + """Register a module. + + Args: + module (:obj:`nn.Module`): Module to be registered. + """ + if not inspect.isclass(module_class): + raise TypeError('module must be a class, but got {}'.format( + type(module_class))) + module_name = module_class.__name__ + if module_name in self._module_dict: + raise KeyError('{} is already registered in {}'.format( + module_name, self.name)) + self._module_dict[module_name] = module_class + + def register_module(self, cls): + self._register_module(cls) + return cls + + +def build_from_cfg(cfg, registry, default_args=None): + """Build a module from config dict. + + Args: + cfg (dict): Config dict. It should at least contain the key "type". + registry (:obj:`Registry`): The registry to search the type from. + default_args (dict, optional): Default initialization arguments. + + Returns: + obj: The constructed object. + """ + assert isinstance(cfg, dict) and 'type' in cfg + assert isinstance(default_args, dict) or default_args is None + args = cfg.copy() + obj_type = args.pop('type') + if mmcv.is_str(obj_type): + obj_type = registry.get(obj_type) + if obj_type is None: + raise KeyError('{} is not in the {} registry'.format( + obj_type, registry.name)) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError('type must be a str or valid type, but got {}'.format( + type(obj_type))) + if default_args is not None: + for name, value in default_args.items(): + args.setdefault(name, value) + return obj_type(**args) diff --git a/CDARTS_detection/mmdet/utils/util_mixins.py b/CDARTS_detection/mmdet/utils/util_mixins.py new file mode 100644 index 0000000..5585ac6 --- /dev/null +++ b/CDARTS_detection/mmdet/utils/util_mixins.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +""" +This module defines the :class:`NiceRepr` mixin class, which defines a +``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__`` +method, which you must define. This means you only have to overload one +function instead of two. Furthermore, if the object defines a ``__len__`` +method, then the ``__nice__`` method defaults to something sensible, otherwise +it is treated as abstract and raises ``NotImplementedError``. + +To use simply have your object inherit from :class:`NiceRepr` +(multi-inheritance should be ok). + +This code was copied from the ubelt library: https://github.com/Erotemic/ubelt + +Example: + >>> # Objects that define __nice__ have a default __str__ and __repr__ + >>> class Student(NiceRepr): + ... def __init__(self, name): + ... self.name = name + ... def __nice__(self): + ... return self.name + >>> s1 = Student('Alice') + >>> s2 = Student('Bob') + >>> print('s1 = {}'.format(s1)) + >>> print('s2 = {}'.format(s2)) + s1 = + s2 = + +Example: + >>> # Objects that define __len__ have a default __nice__ + >>> class Group(NiceRepr): + ... def __init__(self, data): + ... self.data = data + ... def __len__(self): + ... return len(self.data) + >>> g = Group([1, 2, 3]) + >>> print('g = {}'.format(g)) + g = + +""" +import warnings + + +class NiceRepr(object): + """ + Inherit from this class and define ``__nice__`` to "nicely" print your + objects. + + Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function + Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``. + If the inheriting class has a ``__len__``, method then the default + ``__nice__`` method will return its length. + + Example: + >>> class Foo(NiceRepr): + ... def __nice__(self): + ... return 'info' + >>> foo = Foo() + >>> assert str(foo) == '' + >>> assert repr(foo).startswith('>> class Bar(NiceRepr): + ... pass + >>> bar = Bar() + >>> import pytest + >>> with pytest.warns(None) as record: + >>> assert 'object at' in str(bar) + >>> assert 'object at' in repr(bar) + + Example: + >>> class Baz(NiceRepr): + ... def __len__(self): + ... return 5 + >>> baz = Baz() + >>> assert str(baz) == '' + """ + + def __nice__(self): + if hasattr(self, '__len__'): + # It is a common pattern for objects to use __len__ in __nice__ + # As a convenience we define a default __nice__ for these objects + return str(len(self)) + else: + # In all other cases force the subclass to overload __nice__ + raise NotImplementedError( + 'Define the __nice__ method for {!r}'.format(self.__class__)) + + def __repr__(self): + try: + nice = self.__nice__() + classname = self.__class__.__name__ + return '<{0}({1}) at {2}>'.format(classname, nice, hex(id(self))) + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) + + def __str__(self): + try: + classname = self.__class__.__name__ + nice = self.__nice__() + return '<{0}({1})>'.format(classname, nice) + except NotImplementedError as ex: + warnings.warn(str(ex), category=RuntimeWarning) + return object.__repr__(self) diff --git a/CDARTS_detection/mmdet/version.py b/CDARTS_detection/mmdet/version.py new file mode 100644 index 0000000..6e49834 --- /dev/null +++ b/CDARTS_detection/mmdet/version.py @@ -0,0 +1,5 @@ +# GENERATED VERSION FILE +# TIME: Fri Oct 15 17:01:16 2021 + +__version__ = '0.6.0+0889383' +short_version = '0.6.0' diff --git a/CDARTS_detection/scripts/train_hit_det.sh b/CDARTS_detection/scripts/train_hit_det.sh new file mode 100644 index 0000000..cac2ec5 --- /dev/null +++ b/CDARTS_detection/scripts/train_hit_det.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +cd ../ + +GPUs=8 +CONFIG='configs/nas_trinity/2stage_hitdet.py' +WORKDIR='./work_dirs/hitdet_1x/' + +python -m torch.distributed.launch \ +--nproc_per_node=${GPUs} train.py \ +--validate \ +--gpus ${GPUs} \ +--launcher pytorch \ +--config ${CONFIG} \ +--work_dir ${WORKDIR} diff --git a/CDARTS_detection/setup.py b/CDARTS_detection/setup.py new file mode 100644 index 0000000..6a87520 --- /dev/null +++ b/CDARTS_detection/setup.py @@ -0,0 +1,112 @@ +import os +import subprocess +import time +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +MAJOR = 0 +MINOR = 6 +PATCH = 0 +SUFFIX = '' +SHORT_VERSION = '{}.{}.{}{}'.format(MAJOR, MINOR, PATCH, SUFFIX) + +version_file = 'mmdet/version.py' + + +def get_git_hash(): + + def _minimal_ext_cmd(cmd): + # construct minimal environment + env = {} + for k in ['SYSTEMROOT', 'PATH', 'HOME']: + v = os.environ.get(k) + if v is not None: + env[k] = v + # LANGUAGE is used on win32 + env['LANGUAGE'] = 'C' + env['LANG'] = 'C' + env['LC_ALL'] = 'C' + out = subprocess.Popen( + cmd, stdout=subprocess.PIPE, env=env).communicate()[0] + return out + + try: + out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) + sha = out.strip().decode('ascii') + except OSError: + sha = 'unknown' + + return sha + + +def get_hash(): + if os.path.exists('.git'): + sha = get_git_hash()[:7] + elif os.path.exists(version_file): + try: + from mmdet.version import __version__ + sha = __version__.split('+')[-1] + except ImportError: + raise ImportError('Unable to get git version') + else: + sha = 'unknown' + + return sha + + +def write_version_py(): + content = """# GENERATED VERSION FILE +# TIME: {} + +__version__ = '{}' +short_version = '{}' +""" + sha = get_hash() + VERSION = SHORT_VERSION + '+' + sha + + with open(version_file, 'w') as f: + f.write(content.format(time.asctime(), VERSION, SHORT_VERSION)) + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +if __name__ == '__main__': + write_version_py() + setup( + name='mmdet', + version=get_version(), + description='Open MMLab Detection Toolbox', + long_description=readme(), + keywords='computer vision, object detection', + url='https://github.com/open-mmlab/mmdetection', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + package_data={'mmdet.ops': ['*/*.so']}, + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + ], + license='Apache License 2.0', + setup_requires=['pytest-runner'], + tests_require=['pytest'], + install_requires=[ + 'mmcv>=0.2.6', 'numpy', 'matplotlib', 'six', 'terminaltables', + 'pycocotools' + ], + zip_safe=False) diff --git a/CDARTS_detection/test.py b/CDARTS_detection/test.py new file mode 100644 index 0000000..53d13f1 --- /dev/null +++ b/CDARTS_detection/test.py @@ -0,0 +1,205 @@ +import os +import argparse +import os.path as osp +import torch +import torch.distributed as dist +import shutil +import tempfile + +import mmcv +from mmcv.runner import load_checkpoint, get_dist_info +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel + +from mmdet.apis import init_dist +from mmdet.core import results2json, coco_eval, wrap_fp16_model +from mmdet.datasets import build_dataloader, build_dataset +from mmdet.models import build_detector + + +def single_gpu_test(model, data_loader, show=False): + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=not show, **data) + results.append(result) + + if show: + model.module.show_result(data, result, dataset.img_norm_cfg) + + batch_size = data['img'][0].size(0) + for _ in range(0, batch_size, 100): + prog_bar.update() + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None): + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + results.append(result) + + if rank == 0: + batch_size = data['img'][0].size(0) + for _ in range(0, batch_size * world_size, 500): + prog_bar.update() + + # collect results from all ranks + results = collect_results(results, len(dataset), tmpdir) + + return results + + +def collect_results(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + tmpdir = tempfile.mkdtemp() + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) + part_list.append(mmcv.load(part_file)) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMDet test detector') + parser.add_argument('--config', help='test config file path') + parser.add_argument('--checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file') + parser.add_argument( + '--eval', + type=str, + nargs='+', + choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], + help='eval types') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument('--tmpdir', help='tmp dir for writing some results') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + + args, unparsed = parser.parse_known_args() + + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + imgs_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + + # build the model and load checkpoint + model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + # old versions did not save class info in checkpoint, this walkaround is + # for backward compatibility + if 'CLASSES' in checkpoint['meta']: + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + + if not distributed: + model = MMDataParallel(model, device_ids=[0]) + outputs = single_gpu_test(model, data_loader, args.show) + else: + model = MMDistributedDataParallel(model.cuda()) + outputs = multi_gpu_test(model, data_loader, args.tmpdir) + + rank, _ = get_dist_info() + if args.out and rank == 0: + print('\nwriting results to {}'.format(args.out)) + mmcv.dump(outputs, args.out) + eval_types = args.eval + if eval_types: + print('Starting evaluate {}'.format(' and '.join(eval_types))) + if eval_types == ['proposal_fast']: + result_file = args.out + coco_eval(result_file, eval_types, dataset.coco) + else: + if not isinstance(outputs[0], dict): + result_files = results2json(dataset, outputs, args.out) + coco_eval(result_files, eval_types, dataset.coco) + else: + for name in outputs[0]: + print('\nEvaluating {}'.format(name)) + outputs_ = [out[name] for out in outputs] + result_file = args.out + '.{}'.format(name) + result_files = results2json(dataset, outputs_, + result_file) + coco_eval(result_files, eval_types, dataset.coco) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/analyze_logs.py b/CDARTS_detection/tools/analyze_logs.py new file mode 100644 index 0000000..c9f603f --- /dev/null +++ b/CDARTS_detection/tools/analyze_logs.py @@ -0,0 +1,178 @@ +import argparse +import json +from collections import defaultdict + +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns + + +def cal_train_time(log_dicts, args): + for i, log_dict in enumerate(log_dicts): + print('{}Analyze train time of {}{}'.format('-' * 5, args.json_logs[i], + '-' * 5)) + all_times = [] + for epoch in log_dict.keys(): + if args.include_outliers: + all_times.append(log_dict[epoch]['time']) + else: + all_times.append(log_dict[epoch]['time'][1:]) + all_times = np.array(all_times) + epoch_ave_time = all_times.mean(-1) + slowest_epoch = epoch_ave_time.argmax() + fastest_epoch = epoch_ave_time.argmin() + std_over_epoch = epoch_ave_time.std() + print('slowest epoch {}, average time is {:.4f}'.format( + slowest_epoch + 1, epoch_ave_time[slowest_epoch])) + print('fastest epoch {}, average time is {:.4f}'.format( + fastest_epoch + 1, epoch_ave_time[fastest_epoch])) + print('time std over epochs is {:.4f}'.format(std_over_epoch)) + print('average iter time: {:.4f} s/iter'.format(np.mean(all_times))) + print() + + +def plot_curve(log_dicts, args): + if args.backend is not None: + plt.switch_backend(args.backend) + sns.set_style(args.style) + # if legend is None, use {filename}_{key} as legend + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + legend.append('{}_{}'.format(json_log, metric)) + assert len(legend) == (len(args.json_logs) * len(args.keys)) + metrics = args.keys + + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + epochs = list(log_dict.keys()) + for j, metric in enumerate(metrics): + print('plot curve of {}, metric is {}'.format( + args.json_logs[i], metric)) + assert metric in log_dict[epochs[ + 0]], '{} does not contain metric {}'.format( + args.json_logs[i], metric) + + if 'mAP' in metric: + xs = np.arange(1, max(epochs) + 1) + ys = [] + for epoch in epochs: + ys += log_dict[epoch][metric] + ax = plt.gca() + ax.set_xticks(xs) + plt.xlabel('epoch') + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + else: + xs = [] + ys = [] + num_iters_per_epoch = log_dict[epochs[0]]['iter'][-1] + for epoch in epochs: + iters = log_dict[epoch]['iter'] + if log_dict[epoch]['mode'][-1] == 'val': + iters = iters[:-1] + xs.append( + np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs = np.concatenate(xs) + ys = np.concatenate(ys) + plt.xlabel('iter') + plt.plot( + xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.legend() + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print('save curve to: {}'.format(args.out)) + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['bbox_mAP'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--backend', type=str, default=None, help='backend of plt') + parser_plt.add_argument( + '--style', type=str, default='dark', help='style of plt') + parser_plt.add_argument('--out', type=str, default=None) + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + return args + + +def load_json_logs(json_logs): + # load and convert json_logs to log_dict, key is epoch, value is a sub dict + # keys of sub dict is different metrics, e.g. memory, bbox_mAP + # value of sub dict is a list of corresponding values of all iterations + log_dicts = [dict() for _ in json_logs] + for json_log, log_dict in zip(json_logs, log_dicts): + with open(json_log, 'r') as log_file: + for l in log_file: + log = json.loads(l.strip()) + epoch = log.pop('epoch') + if epoch not in log_dict: + log_dict[epoch] = defaultdict(list) + for k, v in log.items(): + log_dict[epoch][k].append(v) + return log_dicts + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = load_json_logs(json_logs) + + eval(args.task)(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/coco_eval.py b/CDARTS_detection/tools/coco_eval.py new file mode 100644 index 0000000..65e114c --- /dev/null +++ b/CDARTS_detection/tools/coco_eval.py @@ -0,0 +1,28 @@ +from argparse import ArgumentParser + +from mmdet.core import coco_eval + + +def main(): + parser = ArgumentParser(description='COCO Evaluation') + parser.add_argument('result', help='result file path') + parser.add_argument('--ann', help='annotation file path') + parser.add_argument( + '--types', + type=str, + nargs='+', + choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'], + default=['bbox'], + help='result types') + parser.add_argument( + '--max-dets', + type=int, + nargs='+', + default=[100, 300, 1000], + help='proposal numbers, only used for recall evaluation') + args = parser.parse_args() + coco_eval(args.result, args.types, args.ann, args.max_dets) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/convert_datasets/pascal_voc.py b/CDARTS_detection/tools/convert_datasets/pascal_voc.py new file mode 100644 index 0000000..5fb5cb4 --- /dev/null +++ b/CDARTS_detection/tools/convert_datasets/pascal_voc.py @@ -0,0 +1,140 @@ +import argparse +import os.path as osp +import xml.etree.ElementTree as ET + +import mmcv +import numpy as np + +from mmdet.core import voc_classes + +label_ids = {name: i + 1 for i, name in enumerate(voc_classes())} + + +def parse_xml(args): + xml_path, img_path = args + tree = ET.parse(xml_path) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + bboxes = [] + labels = [] + bboxes_ignore = [] + labels_ignore = [] + for obj in root.findall('object'): + name = obj.find('name').text + label = label_ids[name] + difficult = int(obj.find('difficult').text) + bnd_box = obj.find('bndbox') + bbox = [ + int(bnd_box.find('xmin').text), + int(bnd_box.find('ymin').text), + int(bnd_box.find('xmax').text), + int(bnd_box.find('ymax').text) + ] + if difficult: + bboxes_ignore.append(bbox) + labels_ignore.append(label) + else: + bboxes.append(bbox) + labels.append(label) + if not bboxes: + bboxes = np.zeros((0, 4)) + labels = np.zeros((0, )) + else: + bboxes = np.array(bboxes, ndmin=2) - 1 + labels = np.array(labels) + if not bboxes_ignore: + bboxes_ignore = np.zeros((0, 4)) + labels_ignore = np.zeros((0, )) + else: + bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1 + labels_ignore = np.array(labels_ignore) + annotation = { + 'filename': img_path, + 'width': w, + 'height': h, + 'ann': { + 'bboxes': bboxes.astype(np.float32), + 'labels': labels.astype(np.int64), + 'bboxes_ignore': bboxes_ignore.astype(np.float32), + 'labels_ignore': labels_ignore.astype(np.int64) + } + } + return annotation + + +def cvt_annotations(devkit_path, years, split, out_file): + if not isinstance(years, list): + years = [years] + annotations = [] + for year in years: + filelist = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format( + year, split)) + if not osp.isfile(filelist): + print('filelist does not exist: {}, skip voc{} {}'.format( + filelist, year, split)) + return + img_names = mmcv.list_from_file(filelist) + xml_paths = [ + osp.join(devkit_path, 'VOC{}/Annotations/{}.xml'.format( + year, img_name)) for img_name in img_names + ] + img_paths = [ + 'VOC{}/JPEGImages/{}.jpg'.format(year, img_name) + for img_name in img_names + ] + part_annotations = mmcv.track_progress(parse_xml, + list(zip(xml_paths, img_paths))) + annotations.extend(part_annotations) + mmcv.dump(annotations, out_file) + return annotations + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert PASCAL VOC annotations to mmdetection format') + parser.add_argument('devkit_path', help='pascal voc devkit path') + parser.add_argument('-o', '--out-dir', help='output path') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + devkit_path = args.devkit_path + out_dir = args.out_dir if args.out_dir else devkit_path + mmcv.mkdir_or_exist(out_dir) + + years = [] + if osp.isdir(osp.join(devkit_path, 'VOC2007')): + years.append('2007') + if osp.isdir(osp.join(devkit_path, 'VOC2012')): + years.append('2012') + if '2007' in years and '2012' in years: + years.append(['2007', '2012']) + if not years: + raise IOError('The devkit path {} contains neither "VOC2007" nor ' + '"VOC2012" subfolder'.format(devkit_path)) + for year in years: + if year == '2007': + prefix = 'voc07' + elif year == '2012': + prefix = 'voc12' + elif year == ['2007', '2012']: + prefix = 'voc0712' + for split in ['train', 'val', 'trainval']: + dataset_name = prefix + '_' + split + print('processing {} ...'.format(dataset_name)) + cvt_annotations(devkit_path, year, split, + osp.join(out_dir, dataset_name + '.pkl')) + if not isinstance(year, list): + dataset_name = prefix + '_test' + print('processing {} ...'.format(dataset_name)) + cvt_annotations(devkit_path, year, 'test', + osp.join(out_dir, dataset_name + '.pkl')) + print('Done!') + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/detectron2pytorch.py b/CDARTS_detection/tools/detectron2pytorch.py new file mode 100644 index 0000000..0a90ad1 --- /dev/null +++ b/CDARTS_detection/tools/detectron2pytorch.py @@ -0,0 +1,88 @@ +import argparse +from collections import OrderedDict + +import mmcv +import torch + +arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)} + + +def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names): + # detectron replace bn with affine channel layer + state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + + '_b']) + state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + + '_s']) + bn_size = state_dict[torch_name + '.weight'].size() + state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size) + state_dict[torch_name + '.running_var'] = torch.ones(bn_size) + converted_names.add(caffe_name + '_b') + converted_names.add(caffe_name + '_s') + + +def convert_conv_fc(blobs, state_dict, caffe_name, torch_name, + converted_names): + state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name + + '_w']) + converted_names.add(caffe_name + '_w') + if caffe_name + '_b' in blobs: + state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name + + '_b']) + converted_names.add(caffe_name + '_b') + + +def convert(src, dst, depth): + """Convert keys in detectron pretrained ResNet models to pytorch style.""" + # load arch_settings + if depth not in arch_settings: + raise ValueError('Only support ResNet-50 and ResNet-101 currently') + block_nums = arch_settings[depth] + # load caffe model + caffe_model = mmcv.load(src, encoding='latin1') + blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names) + convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names) + for i in range(1, len(block_nums) + 1): + for j in range(block_nums[i - 1]): + if j == 0: + convert_conv_fc(blobs, state_dict, + 'res{}_{}_branch1'.format(i + 1, j), + 'layer{}.{}.downsample.0'.format(i, j), + converted_names) + convert_bn(blobs, state_dict, + 'res{}_{}_branch1_bn'.format(i + 1, j), + 'layer{}.{}.downsample.1'.format(i, j), + converted_names) + for k, letter in enumerate(['a', 'b', 'c']): + convert_conv_fc(blobs, state_dict, + 'res{}_{}_branch2{}'.format(i + 1, j, letter), + 'layer{}.{}.conv{}'.format(i, j, k + 1), + converted_names) + convert_bn(blobs, state_dict, + 'res{}_{}_branch2{}_bn'.format(i + 1, j, letter), + 'layer{}.{}.bn{}'.format(i, j, + k + 1), converted_names) + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print('Not Convert: {}'.format(key)) + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + parser.add_argument('depth', type=int, help='ResNet model depth') + args = parser.parse_args() + convert(args.src, args.dst, args.depth) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/dist_test.sh b/CDARTS_detection/tools/dist_test.sh new file mode 100644 index 0000000..5f6abf1 --- /dev/null +++ b/CDARTS_detection/tools/dist_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +PYTHON=${PYTHON:-"python"} + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 + +$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \ + $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} diff --git a/CDARTS_detection/tools/dist_train.sh b/CDARTS_detection/tools/dist_train.sh new file mode 100644 index 0000000..a6ed485 --- /dev/null +++ b/CDARTS_detection/tools/dist_train.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +PYTHON=${PYTHON:-"python"} + +CONFIG=$1 +GPUS=$2 + +$PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \ + $(dirname "$0")/train.py $CONFIG --launcher pytorch ${@:3} diff --git a/CDARTS_detection/tools/get_flops.py b/CDARTS_detection/tools/get_flops.py new file mode 100644 index 0000000..e64bac6 --- /dev/null +++ b/CDARTS_detection/tools/get_flops.py @@ -0,0 +1,52 @@ +import argparse + +from mmcv import Config + +from mmdet.models import build_detector +from mmdet.utils import get_model_complexity_info + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[1280, 800], + help='input image size') + args = parser.parse_args() + return args + + +def main(): + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (3, ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = Config.fromfile(args.config) + model = build_detector( + cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda() + model.eval() + + if hasattr(model, 'forward_dummy'): + model.forward = model.forward_dummy + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + flops, params = get_model_complexity_info(model, input_shape) + split_line = '=' * 30 + print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format( + split_line, input_shape, flops, params)) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/publish_model.py b/CDARTS_detection/tools/publish_model.py new file mode 100644 index 0000000..39795f1 --- /dev/null +++ b/CDARTS_detection/tools/publish_model.py @@ -0,0 +1,34 @@ +import argparse +import subprocess +import torch + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file): + checkpoint = torch.load(in_file, map_location='cpu') + # remove optimizer for smaller file size + if 'optimizer' in checkpoint: + del checkpoint['optimizer'] + # if it is necessary to remove some sensitive data in checkpoint['meta'], + # add the code here. + torch.save(checkpoint, out_file) + sha = subprocess.check_output(['sha256sum', out_file]).decode() + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) + subprocess.Popen(['mv', out_file, final_file]) + + +def main(): + args = parse_args() + process_checkpoint(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/slurm_test.sh b/CDARTS_detection/tools/slurm_test.sh new file mode 100644 index 0000000..8950bc8 --- /dev/null +++ b/CDARTS_detection/tools/slurm_test.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/CDARTS_detection/tools/slurm_train.sh b/CDARTS_detection/tools/slurm_train.sh new file mode 100644 index 0000000..45474c4 --- /dev/null +++ b/CDARTS_detection/tools/slurm_train.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${5:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${PY_ARGS:-"--validate"} + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work_dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/CDARTS_detection/tools/test.py b/CDARTS_detection/tools/test.py new file mode 100644 index 0000000..c7ce14e --- /dev/null +++ b/CDARTS_detection/tools/test.py @@ -0,0 +1,313 @@ +import argparse +import os +import os.path as osp +import pickle +import shutil +import tempfile + +import mmcv +import torch +import torch.distributed as dist +from mmcv.parallel import MMDataParallel, MMDistributedDataParallel +from mmcv.runner import get_dist_info, init_dist, load_checkpoint + +from mmdet.core import wrap_fp16_model +from mmdet.datasets import build_dataloader, build_dataset +from mmdet.models import build_detector + + +def single_gpu_test(model, data_loader, show=False): + model.eval() + results = [] + dataset = data_loader.dataset + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=not show, **data) + results.append(result) + + if show: + model.module.show_result(data, result) + + batch_size = data['img'][0].size(0) + for _ in range(batch_size): + prog_bar.update() + return results + + +def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """Test model with multiple gpus. + + This method tests model with multiple gpus and collects the results + under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' + it encodes results to gpu tensors and use gpu communication for results + collection. On cpu mode it saves the results on different gpus to 'tmpdir' + and collects them by the rank 0 worker. + + Args: + model (nn.Module): Model to be tested. + data_loader (nn.Dataloader): Pytorch data loader. + tmpdir (str): Path of directory to save the temporary results from + different gpus under cpu mode. + gpu_collect (bool): Option to use either gpu or cpu to collect results. + + Returns: + list: The prediction results. + """ + model.eval() + results = [] + dataset = data_loader.dataset + rank, world_size = get_dist_info() + if rank == 0: + prog_bar = mmcv.ProgressBar(len(dataset)) + for i, data in enumerate(data_loader): + with torch.no_grad(): + result = model(return_loss=False, rescale=True, **data) + results.append(result) + + if rank == 0: + batch_size = data['img'][0].size(0) + for _ in range(batch_size * world_size): + prog_bar.update() + + # collect results from all ranks + if gpu_collect: + results = collect_results_gpu(results, len(dataset)) + else: + results = collect_results_cpu(results, len(dataset), tmpdir) + return results + + +def collect_results_cpu(result_part, size, tmpdir=None): + rank, world_size = get_dist_info() + # create a tmp dir if it is not specified + if tmpdir is None: + MAX_LEN = 512 + # 32 is whitespace + dir_tensor = torch.full((MAX_LEN, ), + 32, + dtype=torch.uint8, + device='cuda') + if rank == 0: + tmpdir = tempfile.mkdtemp() + tmpdir = torch.tensor( + bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') + dir_tensor[:len(tmpdir)] = tmpdir + dist.broadcast(dir_tensor, 0) + tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() + else: + mmcv.mkdir_or_exist(tmpdir) + # dump the part result to the dir + mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) + dist.barrier() + # collect all parts + if rank != 0: + return None + else: + # load results of all parts from tmp dir + part_list = [] + for i in range(world_size): + part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) + part_list.append(mmcv.load(part_file)) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + # remove tmp dir + shutil.rmtree(tmpdir) + return ordered_results + + +def collect_results_gpu(result_part, size): + rank, world_size = get_dist_info() + # dump result part to tensor with pickle + part_tensor = torch.tensor( + bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') + # gather all result part tensor shape + shape_tensor = torch.tensor(part_tensor.shape, device='cuda') + shape_list = [shape_tensor.clone() for _ in range(world_size)] + dist.all_gather(shape_list, shape_tensor) + # padding result part tensor to max length + shape_max = torch.tensor(shape_list).max() + part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') + part_send[:shape_tensor[0]] = part_tensor + part_recv_list = [ + part_tensor.new_zeros(shape_max) for _ in range(world_size) + ] + # gather all result part + dist.all_gather(part_recv_list, part_send) + + if rank == 0: + part_list = [] + for recv, shape in zip(part_recv_list, shape_list): + part_list.append( + pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) + # sort the results + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + # the dataloader may pad some samples + ordered_results = ordered_results[:size] + return ordered_results + + +class MultipleKVAction(argparse.Action): + """ + argparse action to split an argument into KEY=VALUE form + on the first = and append to a dictionary. + """ + + def _is_int(self, val): + try: + _ = int(val) + return True + except Exception: + return False + + def _is_float(self, val): + try: + _ = float(val) + return True + except Exception: + return False + + def _is_bool(self, val): + return val.lower() in ['true', 'false'] + + def __call__(self, parser, namespace, values, option_string=None): + options = {} + for val in values: + parts = val.split('=') + key = parts[0].strip() + if len(parts) > 2: + val = '='.join(parts[1:]) + else: + val = parts[1].strip() + # try parsing val to bool/int/float first + if self._is_bool(val): + import json + val = json.loads(val.lower()) + elif self._is_int(val): + val = int(val) + elif self._is_float(val): + val = float(val) + options[key] = val + setattr(namespace, self.dest, options) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMDet test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument('--out', help='output result file in pickle format') + parser.add_argument( + '--format_only', + action='store_true', + help='Format the output results without perform evaluation. It is' + 'useful when you want to format the result to a specific format and ' + 'submit it to the test server') + parser.add_argument( + '--eval', + type=str, + nargs='+', + help='evaluation metrics, which depends on the dataset, e.g., "bbox",' + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') + parser.add_argument('--show', action='store_true', help='show results') + parser.add_argument( + '--gpu_collect', + action='store_true', + help='whether to use gpu to collect results.') + parser.add_argument( + '--tmpdir', + help='tmp directory used for collecting results from multiple ' + 'workers, available when gpu_collect is not specified') + parser.add_argument( + '--options', nargs='+', action=MultipleKVAction, help='custom options') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def main(): + args = parse_args() + + assert args.out or args.eval or args.format_only or args.show, \ + ('Please specify at least one operation (save/eval/format/show the ' + 'results) with the argument "--out", "--eval", "--format_only" ' + 'or "--show"') + + if args.eval and args.format_only: + raise ValueError('--eval and --format_only cannot be both specified') + + if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): + raise ValueError('The output file must be a pkl file.') + + cfg = mmcv.Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + cfg.model.pretrained = None + cfg.data.test.test_mode = True + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # build the dataloader + # TODO: support multiple images per gpu (only minor changes are needed) + dataset = build_dataset(cfg.data.test) + data_loader = build_dataloader( + dataset, + imgs_per_gpu=1, + workers_per_gpu=cfg.data.workers_per_gpu, + dist=distributed, + shuffle=False) + + # build the model and load checkpoint + model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) + fp16_cfg = cfg.get('fp16', None) + if fp16_cfg is not None: + wrap_fp16_model(model) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + # old versions did not save class info in checkpoints, this walkaround is + # for backward compatibility + if 'CLASSES' in checkpoint['meta']: + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = dataset.CLASSES + + if not distributed: + model = MMDataParallel(model, device_ids=[0]) + outputs = single_gpu_test(model, data_loader, args.show) + else: + model = MMDistributedDataParallel(model.cuda()) + outputs = multi_gpu_test(model, data_loader, args.tmpdir, + args.gpu_collect) + + rank, _ = get_dist_info() + if rank == 0: + if args.out: + print('\nwriting results to {}'.format(args.out)) + mmcv.dump(outputs, args.out) + kwargs = {} if args.options is None else args.options + if args.format_only: + dataset.format_results(outputs, **kwargs) + if args.eval: + dataset.evaluate(outputs, args.eval, **kwargs) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/upgrade_model_version.py b/CDARTS_detection/tools/upgrade_model_version.py new file mode 100644 index 0000000..00bcdf4 --- /dev/null +++ b/CDARTS_detection/tools/upgrade_model_version.py @@ -0,0 +1,42 @@ +import argparse +import re +from collections import OrderedDict + +import torch + + +def convert(in_file, out_file): + """Convert keys in checkpoints. + + There can be some breaking changes during the development of mmdetection, + and this tool is used for upgrading checkpoints trained with old versions + to the latest one. + """ + checkpoint = torch.load(in_file) + in_state_dict = checkpoint.pop('state_dict') + out_state_dict = OrderedDict() + for key, val in in_state_dict.items(): + # Use ConvModule instead of nn.Conv2d in RetinaNet + # cls_convs.0.weight -> cls_convs.0.conv.weight + m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key) + if m is not None: + param = m.groups()[1] + new_key = key.replace(param, 'conv.{}'.format(param)) + out_state_dict[new_key] = val + continue + + out_state_dict[key] = val + checkpoint['state_dict'] = out_state_dict + torch.save(checkpoint, out_file) + + +def main(): + parser = argparse.ArgumentParser(description='Upgrade model version') + parser.add_argument('in_file', help='input checkpoint file') + parser.add_argument('out_file', help='output checkpoint file') + args = parser.parse_args() + convert(args.in_file, args.out_file) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/tools/voc_eval.py b/CDARTS_detection/tools/voc_eval.py new file mode 100644 index 0000000..478ec3c --- /dev/null +++ b/CDARTS_detection/tools/voc_eval.py @@ -0,0 +1,62 @@ +from argparse import ArgumentParser + +import mmcv +import numpy as np + +from mmdet import datasets +from mmdet.core import eval_map + + +def voc_eval(result_file, dataset, iou_thr=0.5): + det_results = mmcv.load(result_file) + gt_bboxes = [] + gt_labels = [] + gt_ignore = [] + for i in range(len(dataset)): + ann = dataset.get_ann_info(i) + bboxes = ann['bboxes'] + labels = ann['labels'] + if 'bboxes_ignore' in ann: + ignore = np.concatenate([ + np.zeros(bboxes.shape[0], dtype=np.bool), + np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool) + ]) + gt_ignore.append(ignore) + bboxes = np.vstack([bboxes, ann['bboxes_ignore']]) + labels = np.concatenate([labels, ann['labels_ignore']]) + gt_bboxes.append(bboxes) + gt_labels.append(labels) + if not gt_ignore: + gt_ignore = gt_ignore + if hasattr(dataset, 'year') and dataset.year == 2007: + dataset_name = 'voc07' + else: + dataset_name = dataset.CLASSES + eval_map( + det_results, + gt_bboxes, + gt_labels, + gt_ignore=gt_ignore, + scale_ranges=None, + iou_thr=iou_thr, + dataset=dataset_name, + print_summary=True) + + +def main(): + parser = ArgumentParser(description='VOC Evaluation') + parser.add_argument('result', help='result file path') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--iou-thr', + type=float, + default=0.5, + help='IoU threshold for evaluation') + args = parser.parse_args() + cfg = mmcv.Config.fromfile(args.config) + test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets) + voc_eval(args.result, test_dataset, args.iou_thr) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/train.py b/CDARTS_detection/train.py new file mode 100644 index 0000000..73d27c7 --- /dev/null +++ b/CDARTS_detection/train.py @@ -0,0 +1,112 @@ +from __future__ import division + +import argparse +import torch +# torch.multiprocessing.set_sharing_strategy('file_system') +# for file_descriptor, but cause shm leak while nas optimizer +import os + +from mmcv import Config + +from mmdet import __version__ +from mmdet.datasets import build_dataset +from mmdet.apis import (train_detector, init_dist, get_root_logger, + set_random_seed) +from mmdet.models import build_detector + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a detector') + parser.add_argument('--config', help='train config file path') + parser.add_argument('--work_dir', default='/cache/tmp', help='path to save log and model') + parser.add_argument( + '--resume_from', help='the checkpoint file to resume from') + parser.add_argument( + '--validate', + action='store_true', + help='whether to evaluate the checkpoint during training') + parser.add_argument( + '--gpus', + type=int, + default=1, + help='number of gpus to use ' + '(only applicable to non-distributed training)') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + + args, unparsed = parser.parse_known_args() + + + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + # Solve SyncBN deadlock + os.environ["NCCL_LL_THRESHOLD"] = '0' + return args + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + # set cudnn_benchmark + if cfg.get('cudnn_benchmark', False): + torch.backends.cudnn.benchmark = True + # update configs according to CLI args + cfg.work_dir = args.work_dir + cfg.gpus = args.gpus + if args.resume_from is not None: + cfg.resume_from = args.resume_from + + # init distributed env first, since logger depends on the dist info. + if args.launcher == 'none': + distributed = False + else: + distributed = True + init_dist(args.launcher, **cfg.dist_params) + + # init logger before other steps + logger = get_root_logger(cfg.log_level) + logger.info('Distributed training: {}'.format(distributed)) + + # set random seeds + if args.seed is not None: + logger.info('Set random seed to {}'.format(args.seed)) + set_random_seed(args.seed) + model = build_detector( + cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) + + train_dataset = build_dataset(cfg.data.train) + + model = torch.nn.parallel.DistributedDataParallel( + model.cuda(), find_unused_parameters=True, device_ids=[args.local_rank], output_device=args.local_rank) + print(model) + print("Model have {} paramerters.".format(sum(x.numel() for x in model.parameters()) / 1e6)) + print("Model have {} backbone.".format(sum(x.numel() for x in model.module.backbone.parameters()) / 1e6)) + print("Model have {} neck.".format(sum(x.numel() for x in model.module.neck.parameters()) / 1e6)) + print("Model have {} head.".format(sum(x.numel() for x in model.module.bbox_head.parameters()) / 1e6)) + + if cfg.checkpoint_config is not None: + # save mmdet version, config file content and class names in + # checkpoints as meta data + cfg.checkpoint_config.meta = dict( + mmdet_version=__version__, + config=cfg.text, + CLASSES=train_dataset.CLASSES) + # add an attribute for visualization convenience + model.CLASSES = train_dataset.CLASSES + train_detector( + model, + train_dataset, + cfg, + distributed=distributed, + validate=args.validate, + logger=logger) + + +if __name__ == '__main__': + main() diff --git a/CDARTS_detection/train.sh b/CDARTS_detection/train.sh new file mode 100644 index 0000000..26b92b9 --- /dev/null +++ b/CDARTS_detection/train.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + + +GPUs=8 +CONFIG='configs/CyDAS_retinanet_1x.py' +WORKDIR='./work_dirs/CyDAS_1x/' + +python -m torch.distributed.launch \ +--nproc_per_node=${GPUs} train.py \ +--validate \ +--gpus ${GPUs} \ +--launcher pytorch \ +--config ${CONFIG} \ +--work_dir ${WORKDIR} diff --git a/CDARTS_segmentation/LICENSE b/CDARTS_segmentation/LICENSE new file mode 100644 index 0000000..d281f89 --- /dev/null +++ b/CDARTS_segmentation/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Wuyang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/CDARTS_segmentation/README.md b/CDARTS_segmentation/README.md new file mode 100644 index 0000000..0f2644c --- /dev/null +++ b/CDARTS_segmentation/README.md @@ -0,0 +1,48 @@ +## Prerequisites +- Ubuntu 16.04 +- Python 3.7 +- CUDA 11.1 (lower versions may work but were not tested) +- NVIDIA GPU (>= 11G graphic memory) + CuDNN v7.3 + +This repository has been tested on RTX 3090. Configurations (e.g batch size, image patch size) may need to be changed on different platforms. + +## Installation +* Clone this repo: +```bash +cd CyDAS_segmentation +``` +* Install dependencies: +```bash +bash install.sh +``` + +## Usage +### 0. Prepare the dataset +* Download the [leftImg8bit_trainvaltest.zip](https://www.cityscapes-dataset.com/file-handling/?packageID=3) and [gtFine_trainvaltest.zip](https://www.cityscapes-dataset.com/file-handling/?packageID=1) from the Cityscapes. +* Prepare the annotations by using the [createTrainIdLabelImgs.py](https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createTrainIdLabelImgs.py). +* Put the [file of image list](tools/datasets/cityscapes/) into where you save the dataset. + + +### 1. Train from scratch +* `cd HRTNet/train` +* Set the dataset path via `ln -s $YOUR_DATA_PATH ../DATASET` +* Set the output path via `mkdir ../OUTPUT` +* Train from scratch +``` +export DETECTRON2_DATASETS="$Your_DATA_PATH" +NGPUS=8 +python -m torch.distributed.launch --nproc_per_node=$NGPUS train.py --world_size $NGPUS --seed 12367 --config ../configs/cityscapes/cydas.yaml +``` + +### 2. Evaluation +We provide training models and logs, which can be downloaded from [Google Drive](https://drive.google.com/drive/folders/1CkFp24bEDq0wUp504BQ68jn5Vs069qox?usp=sharing). + +```bash +cd train +``` +* Download the pretrained weights of the from [Google Drive](https://drive.google.com/drive/folders/1CkFp24bEDq0wUp504BQ68jn5Vs069qox?usp=sharing). +* Set `config.model_path = $YOUR_MODEL_PATH` in `cydas.yaml`. +* Set `config.json_file = $CYDAS_MODEL` in `cydas.yaml`. +* Start the evaluation process: +```bash +CUDA_VISIBLE_DEVICES=0 python test.py \ No newline at end of file diff --git a/CDARTS_segmentation/configs/ade/cydas.yaml b/CDARTS_segmentation/configs/ade/cydas.yaml new file mode 100644 index 0000000..f9d8ac3 --- /dev/null +++ b/CDARTS_segmentation/configs/ade/cydas.yaml @@ -0,0 +1,37 @@ +json_file: "jsons/big4.json" +data_path: "../DATASET/ADEChallengeData2016/" +dataset: "coco" +det2_cfg: "configs/ADE20K/base.yaml" +num_classes: 150 +max_iteration: 160000 +seed: 12345 +random_sample: False +eval_flag: True +opt: "sgd" +opt_eps: 0.001 +sched: "new" #"raw for original" +epochs: 1000 +drop_path_prob: 0.2 +image_height: 640 +image_width: 640 +eval_height: 640 +eval_width: 640 +crop_size: 640 +batch_size: 4 +mode: "poly" +base_lr: 0.05 +Fch: 16 +bn_momentum: 0.01 +warmup_start_lr: 5e-6 +warmup_iters: 1000 +weight_decay: 1e-4 +model_ema: True +model_ema_decay: 0.9998 +clip_grad: 1.0 +lamb: 0.2 +ignore: 255 +topk_percent: 0.2 +semantic_loss_weight: 1.0 +center_loss_weight: 200 +offset_loss_weight: 0.01 +eval_flip: False diff --git a/CDARTS_segmentation/configs/cityscapes/cydas.yaml b/CDARTS_segmentation/configs/cityscapes/cydas.yaml new file mode 100644 index 0000000..f0a0661 --- /dev/null +++ b/CDARTS_segmentation/configs/cityscapes/cydas.yaml @@ -0,0 +1,24 @@ +data_path: "../DATASET/cityscapes/" +det2_cfg: "configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml" +seed: 12345 +random_sample: False +opt: "sgd" +opt_eps: 0.001 +sched: "new" #"raw for original" +epochs: 4000 +drop_path_prob: 0.2 +image_height: 512 +image_width: 1024 +eval_height: 1024 +eval_width: 2048 +batch_size: 4 +mode: "poly" +base_lr: 0.05 +workers: 4 +Fch: 6 +warmup_start_lr: 5e-6 +warmup_iters: 1000 +weight_decay: 1e-4 +model_ema: True +model_ema_decay: 0.9998 +stem_head_width: 1.0 diff --git a/CDARTS_segmentation/dataloaders/__init__.py b/CDARTS_segmentation/dataloaders/__init__.py new file mode 100644 index 0000000..8f80b92 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/__init__.py @@ -0,0 +1,285 @@ +from dataloaders.datasets import cityscapes, kd, coco, combine_dbs, pascal, sbd +from dataloaders.segdatasets import Cityscapes, CityscapesPanoptic, COCOPanoptic +from torch.utils.data import DataLoader +import torch.utils.data.distributed + +def make_data_loader(args, **kwargs): + root = args.data_path + if args.dist: + print("=> Using Distribued Sampler") + if args.dataset == 'cityscapes': + if args.autodeeplab == 'train': + train_set = cityscapes.CityscapesSegmentation(args, root, split='retrain') + num_class = train_set.NUM_CLASSES + train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, sampler=train_sampler, **kwargs) + + val_set = cityscapes.CityscapesSegmentation(args, root, split='val') + test_set = cityscapes.CityscapesSegmentation(args, root, split='test') + val_sampler = torch.utils.data.distributed.DistributedSampler(val_set) + test_sampler = torch.utils.data.distributed.DistributedSampler(test_set) + val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, sampler=val_sampler, **kwargs) + test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, sampler=test_sampler, **kwargs) + + elif args.autodeeplab == 'train_seg': + dataset_cfg = { + 'cityscapes': dict( + root=args.data_path, + split='train', + is_train=True, + crop_size=(args.image_height, args.image_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225) + )} + train_set = Cityscapes(**dataset_cfg['cityscapes']) + num_class = train_set.num_classes + train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, sampler=train_sampler, **kwargs) + + dataset_val_cfg = { + 'cityscapes': dict( + root=args.data_path, + split='val', + is_train=False, + crop_size=(args.eval_height, args.eval_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225) + )} + val_set = Cityscapes(**dataset_val_cfg['cityscapes']) + val_sampler = torch.utils.data.distributed.DistributedSampler(val_set) + val_loader = DataLoader(val_set, batch_size=max(1, args.batch_size//4), shuffle=False, sampler=val_sampler, num_workers=args.workers, pin_memory=True, drop_last=False) + + elif args.autodeeplab == 'train_seg_panoptic': + dataset_cfg = { + 'cityscapes_panoptic': dict( + root=args.data_path, + split='train', + is_train=True, + crop_size=(args.image_height, args.image_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=True, + small_instance_area=4096, + small_instance_weight=3 + )} + train_set = CityscapesPanoptic(**dataset_cfg['cityscapes_panoptic']) + num_class = train_set.num_classes + train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, sampler=train_sampler, **kwargs) + + dataset_val_cfg = { + 'cityscapes_panoptic': dict( + root=args.data_path, + split='val', + is_train=False, + crop_size=(args.eval_height, args.eval_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=True, + small_instance_area=4096, + small_instance_weight=3 + )} + val_set = Cityscapes(**dataset_val_cfg['cityscapes_panoptic']) + val_sampler = torch.utils.data.distributed.DistributedSampler(val_set) + val_loader = DataLoader(val_set, batch_size=max(1, args.batch_size//4), shuffle=False, sampler=val_sampler, num_workers=args.workers, pin_memory=True, drop_last=False) + else: + raise Exception('autodeeplab param not set properly') + + return train_loader, train_sampler, val_loader, val_sampler, num_class + + elif args.dataset == 'coco': + if args.autodeeplab == 'train_seg_panoptic': + dataset_cfg = { + 'coco_panoptic': dict( + root=args.data_path, + split='train2017', + is_train=True, + min_resize_value=args.image_height, + max_resize_value=args.image_height, + resize_factor=32, + crop_size=(args.image_height, args.image_width), + mirror=True, + min_scale=0.5, + max_scale=1.5, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=True, + small_instance_area=4096, + small_instance_weight=3 + )} + train_set = COCOPanoptic(**dataset_cfg['coco_panoptic']) + num_class = train_set.num_classes + train_sampler = torch.utils.data.distributed.DistributedSampler(train_set) + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, sampler=train_sampler, **kwargs) + + + # train_set = coco.COCOSegmentation(args, root, split='train') + # root=args.data_path + # val_set = coco.COCOSegmentation(args, root, split='val') + dataset_val_cfg = { + 'coco_panoptic': dict( + root=args.data_path, + split='val2017', + is_train=True, + min_resize_value=args.image_height, + max_resize_value=args.image_height, + resize_factor=32, + crop_size=(args.eval_height, args.eval_width), + mirror=False, + min_scale=1, + max_scale=1, + scale_step_size=0, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=True, + small_instance_area=4096, + small_instance_weight=3 + )} + val_set = COCOPanoptic(**dataset_val_cfg['coco_panoptic']) + val_sampler = torch.utils.data.distributed.DistributedSampler(val_set) + val_loader = DataLoader(val_set, batch_size=args.batch_size*4, shuffle=False, sampler=val_sampler, num_workers=args.workers, pin_memory=True, drop_last=False) + + return train_loader, train_sampler, val_loader, val_sampler, num_class + else: + raise NotImplementedError + + else: + if args.dataset == 'pascal': + train_set = pascal.VOCSegmentation(args, root, split='train') + val_set = pascal.VOCSegmentation(args, root, split='val') + if args.use_sbd: + sbd_train = sbd.SBDSegmentation(args, root, split=['train', 'val']) + train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set]) + + num_class = train_set.NUM_CLASSES + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs) + val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs) + test_loader = None + + return train_loader, train_loader, val_loader, test_loader, num_class + + elif args.dataset == 'cityscapes': + if args.autodeeplab == 'train_seg': + dataset_cfg = { + 'cityscapes': dict( + root=args.data_path, + split='train', + is_train=True, + crop_size=(args.image_height, args.image_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225) + )} + train_set = Cityscapes(**dataset_cfg['cityscapes']) + num_class = train_set.num_classes + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, **kwargs) + + dataset_val_cfg = { + 'cityscapes': dict( + root=args.data_path, + split='val', + is_train=False, + crop_size=(args.eval_height, args.eval_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225) + )} + val_set = Cityscapes(**dataset_val_cfg['cityscapes']) + val_loader = DataLoader(val_set, batch_size=max(1, args.batch_size//4), shuffle=False, num_workers=args.workers, pin_memory=True, drop_last=False) + + elif args.autodeeplab == 'train_seg_panoptic': + dataset_cfg = { + 'cityscapes_panoptic': dict( + root=args.data_path, + split='train', + is_train=True, + crop_size=(args.image_height, args.image_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=True, + small_instance_area=4096, + small_instance_weight=3 + )} + train_set = CityscapesPanoptic(**dataset_cfg['cityscapes_panoptic']) + num_class = train_set.num_classes + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=False, **kwargs) + + dataset_val_cfg = { + 'cityscapes_panoptic': dict( + root=args.data_path, + split='val', + is_train=False, + crop_size=(args.eval_height, args.eval_width), + mirror=True, + min_scale=0.5, + max_scale=2.0, + scale_step_size=0.1, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=True, + small_instance_area=4096, + small_instance_weight=3 + )} + val_set = Cityscapes(**dataset_val_cfg['cityscapes_panoptic']) + val_loader = DataLoader(val_set, batch_size=max(1, args.batch_size//4), shuffle=False, num_workers=args.workers, pin_memory=True, drop_last=False) + else: + raise Exception('autodeeplab param not set properly') + + return train_loader, val_loader, num_class + + + elif args.dataset == 'coco': + train_set = coco.COCOSegmentation(args, root, split='train') + val_set = coco.COCOSegmentation(args, root, split='val') + num_class = train_set.NUM_CLASSES + train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs) + val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs) + test_loader = None + return train_loader, train_loader, val_loader, test_loader, num_class + + elif args.dataset == 'kd': + train_set = kd.CityscapesSegmentation(args, root, split='train') + val_set = kd.CityscapesSegmentation(args, root, split='val') + test_set = kd.CityscapesSegmentation(args, root, split='test') + num_class = train_set.NUM_CLASSES + train_loader1 = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs) + train_loader2 = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs) + val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs) + test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs) + + return train_loader1, train_loader2, val_loader, test_loader, num_class + else: + raise NotImplementedError diff --git a/CDARTS_segmentation/dataloaders/custom_transforms.py b/CDARTS_segmentation/dataloaders/custom_transforms.py new file mode 100644 index 0000000..e40fa44 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/custom_transforms.py @@ -0,0 +1,326 @@ +import math +import torch +import random +import numpy as np +import torch.nn as nn +from numpy import int64 as int64 +import torchvision.transforms as transforms + +from PIL import Image, ImageOps, ImageFilter + + +class Normalize(object): + """Normalize a tensor image with mean and standard deviation. + Args: + mean (tuple): means for each channel. + std (tuple): standard deviations for each channel. + """ + + def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)): + self.mean = mean + self.std = std + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + img = np.array(img).astype(np.float32) + mask = np.array(mask).astype(np.float32) + img /= 255.0 + img -= self.mean + img /= self.std + + return {'image': img, + 'label': mask} + + +class ToTensor(object): + """Convert ndarrays in sample to Tensors.""" + + def __call__(self, sample): + # swap color axis because + # numpy image: H x W x C + # torch image: C X H X W + img = sample['image'] + mask = sample['label'] + img = np.array(img).astype(np.float32).transpose((2, 0, 1)) + mask = np.array(mask).astype(np.float32) + + img = torch.from_numpy(img).float() + mask = torch.from_numpy(mask).float() + + return {'image': img, + 'label': mask} + + +class RandomHorizontalFlip(object): + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + if random.random() < 0.5: + img = img.transpose(Image.FLIP_LEFT_RIGHT) + mask = mask.transpose(Image.FLIP_LEFT_RIGHT) + + return {'image': img, + 'label': mask} + + +class RandomRotate(object): + def __init__(self, degree): + self.degree = degree + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + rotate_degree = random.uniform(-1 * self.degree, self.degree) + img = img.rotate(rotate_degree, Image.BILINEAR) + mask = mask.rotate(rotate_degree, Image.NEAREST) + + return {'image': img, + 'label': mask} + + +class RandomGaussianBlur(object): + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + if random.random() < 0.5: + img = img.filter(ImageFilter.GaussianBlur( + radius=random.random())) + + return {'image': img, + 'label': mask} + + +class RandomScaleCrop(object): + def __init__(self, base_size, crop_size, fill=0): + self.base_size = base_size + self.crop_size = crop_size + self.fill = fill + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + # random scale (short edge) + short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) + w, h = img.size + if h > w: + ow = short_size + oh = int(1.0 * h * ow / w) + else: + oh = short_size + ow = int(1.0 * w * oh / h) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # pad crop + if short_size < self.crop_size: + padh = self.crop_size - oh if oh < self.crop_size else 0 + padw = self.crop_size - ow if ow < self.crop_size else 0 + img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) + mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill) + # random crop crop_size + w, h = img.size + x1 = random.randint(0, w - self.crop_size) + y1 = random.randint(0, h - self.crop_size) + img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) + mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) + + return {'image': img, + 'label': mask} + + +class FixScaleCrop(object): + def __init__(self, crop_size): + self.crop_size = crop_size + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + w, h = img.size + if w > h: + oh = self.crop_size + ow = int(1.0 * w * oh / h) + else: + ow = self.crop_size + oh = int(1.0 * h * ow / w) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # center crop + w, h = img.size + x1 = int(round((w - self.crop_size) / 2.)) + y1 = int(round((h - self.crop_size) / 2.)) + img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) + mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) + + return {'image': img, + 'label': mask} + + +# resize to 512*1024 +class FixedResize(object): + """change the short edge length to size""" + + def __init__(self, resize=512): + self.size1 = resize # size= 512 + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + assert img.size == mask.size + + w, h = img.size + if w > h: + oh = self.size1 + ow = int(1.0 * w * oh / h) + else: + ow = self.size1 + oh = int(1.0 * h * ow / w) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + return {'image': img, + 'label': mask} + + +# random crop 321*321 +class RandomCrop(object): + def __init__(self, crop_size=320): + self.crop_size = crop_size + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + w, h = img.size + x1 = random.randint(0, w - self.crop_size) + y1 = random.randint(0, h - self.crop_size) + img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) + mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size)) + return {'image': img, + 'label': mask} + + +class RandomScale(object): + def __init__(self, scales=(1,)): + self.scales = scales + + def __call__(self, sample): + img = sample['image'] + mask = sample['label'] + w, h = img.size + scale = random.choice(self.scales) + w, h = int(w * scale), int(h * scale) + return {'image': img, + 'label': mask} + + +class Retrain_Preprocess(object): + def __init__(self, flip_prob, scale_range, crop, mean, std): + self.flip_prob = flip_prob + self.scale_range = scale_range + self.crop = crop + self.data_transforms = transforms.Compose([transforms.ToTensor(), + transforms.Normalize(mean=mean, std=std)]) + + def __call__(self, sample): + if self.flip_prob is not None and random.random() < self.flip_prob: + sample['image'] = sample['image'].transpose(Image.FLIP_LEFT_RIGHT) + sample['label'] = sample['label'].transpose(Image.FLIP_LEFT_RIGHT) + + if self.scale_range is not None: + w, h = sample['image'].size + rand_log_scale = math.log(self.scale_range[0], 2) + random.random() * \ + (math.log(self.scale_range[1], 2) - math.log(self.scale_range[0], 2)) + random_scale = math.pow(2, rand_log_scale) + new_size = (int(round(w * random_scale)), int(round(h * random_scale))) + sample['image'] = sample['image'].resize(new_size, Image.ANTIALIAS) + sample['label'] = sample['label'].resize(new_size, Image.NEAREST) + sample['image'] = self.data_transforms(sample['image']) + sample['label'] = torch.LongTensor(np.array(sample['label']).astype(int64)) + + if self.crop: + image, mask = sample['image'], sample['label'] + h, w = image.shape[1], image.shape[2] + pad_tb = max(0, self.crop[0] - h) + pad_lr = max(0, self.crop[1] - w) + image = nn.ZeroPad2d((0, pad_lr, 0, pad_tb))(image) + mask = nn.ConstantPad2d((0, pad_lr, 0, pad_tb), 255)(mask) + + h, w = image.shape[1], image.shape[2] + i = random.randint(0, h - self.crop[0]) + j = random.randint(0, w - self.crop[1]) + sample['image'] = image[:, i:i + self.crop[0], j:j + self.crop[1]] + sample['label'] = mask[i:i + self.crop[0], j:j + self.crop[1]] + return sample + + +class transform_tr(object): + def __init__(self, args, mean, std): + if args.multi_scale is None: + self.composed_transforms = transforms.Compose([ + FixedResize(resize=args.resize), + RandomCrop(crop_size=args.crop_size), + # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255), + # tr.RandomGaussianBlur(), + Normalize(mean, std), + ToTensor()]) + else: + self.composed_transforms = transforms.Compose([ + FixedResize(resize=args.resize), + RandomScale(scales=args.multi_scale), + RandomCrop(crop_size=args.crop_size), + # tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255), + # tr.RandomGaussianBlur(), + Normalize(mean, std), + ToTensor()]) + + def __call__(self, sample): + return self.composed_transforms(sample) + + +class transform_val(object): + def __init__(self, args, mean, std): + self.composed_transforms = transforms.Compose([ + FixedResize(resize=args.resize), + FixScaleCrop(crop_size=args.crop_size), # TODO:CHECK THIS + Normalize(mean, std), + ToTensor()]) + + def __call__(self, sample): + return self.composed_transforms(sample) + + +class transform_val(object): + def __init__(self, args, mean, std): + self.composed_transforms = transforms.Compose([ + FixedResize(resize=args.crop_size), + Normalize(mean, std), + ToTensor()]) + + def __call__(self, sample): + return self.composed_transforms(sample) + + +class transform_ts(object): + def __init__(self, args, mean, std): + self.composed_transforms = transforms.Compose([ + FixedResize(resize=args.crop_size), + Normalize(mean, std), + ToTensor()]) + + def __call__(self, sample): + return self.composed_transforms(sample) + + +class transform_retr(object): + def __init__(self, args, mean, std): + crop_size = (args.crop_size, args.crop_size) if isinstance(args.crop_size, int) else args.crop_size + self.composed_transforms = Retrain_Preprocess(0.5, (0.5, 2), crop_size, mean, std) + + def __call__(self, sample): + return self.composed_transforms(sample) + + +class transform_reval(object): # we use multi_scale evaluate in evaluate.py so dont need resize in dataset + def __init__(self, args, mean, std): + self.composed_transforms = Retrain_Preprocess(None, None, None, mean, std) + + def __call__(self, sample): + return self.composed_transforms(sample) diff --git a/CDARTS_segmentation/dataloaders/dataloader_utils.py b/CDARTS_segmentation/dataloaders/dataloader_utils.py new file mode 100644 index 0000000..4bb600c --- /dev/null +++ b/CDARTS_segmentation/dataloaders/dataloader_utils.py @@ -0,0 +1,104 @@ +import matplotlib.pyplot as plt +import numpy as np +import torch + +def decode_seg_map_sequence(label_masks, dataset='pascal'): + rgb_masks = [] + for label_mask in label_masks: + rgb_mask = decode_segmap(label_mask, dataset) + rgb_masks.append(rgb_mask) + rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2])) + return rgb_masks + + +def decode_segmap(label_mask, dataset, plot=False): + """Decode segmentation class labels into a color image + Args: + label_mask (np.ndarray): an (M,N) array of integer values denoting + the class label at each spatial location. + plot (bool, optional): whether to show the resulting color image + in a figure. + Returns: + (np.ndarray, optional): the resulting decoded color image. + """ + if dataset == 'pascal' or dataset == 'coco': + n_classes = 21 + label_colours = get_pascal_labels() + elif dataset == 'cityscapes': + n_classes = 19 + label_colours = get_cityscapes_labels() + elif dataset == 'kd': + n_classes = 19 + label_colours = get_cityscapes_labels() + else: + raise NotImplementedError + + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + for ll in range(0, n_classes): + r[label_mask == ll] = label_colours[ll, 0] + g[label_mask == ll] = label_colours[ll, 1] + b[label_mask == ll] = label_colours[ll, 2] + rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3)) + rgb[:, :, 0] = r / 255.0 + rgb[:, :, 1] = g / 255.0 + rgb[:, :, 2] = b / 255.0 + if plot: + plt.imshow(rgb) + plt.show() + else: + return rgb + + +def encode_segmap(mask): + """Encode segmentation label images as pascal classes + Args: + mask (np.ndarray): raw segmentation label image of dimension + (M, N, 3), in which the Pascal classes are encoded as colours. + Returns: + (np.ndarray): class map with dimensions (M,N), where the value at + a given location is the integer denoting the class index. + """ + mask = mask.astype(int) + label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16) + for ii, label in enumerate(get_pascal_labels()): + label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii + label_mask = label_mask.astype(int) + return label_mask + + +def get_cityscapes_labels(): + return np.array([ + [128, 64, 128], + [244, 35, 232], + [70, 70, 70], + [102, 102, 156], + [190, 153, 153], + [153, 153, 153], + [250, 170, 30], + [220, 220, 0], + [107, 142, 35], + [152, 251, 152], + [0, 130, 180], + [220, 20, 60], + [255, 0, 0], + [0, 0, 142], + [0, 0, 70], + [0, 60, 100], + [0, 80, 100], + [0, 0, 230], + [119, 11, 32]]) + + +def get_pascal_labels(): + """Load the mapping that associates pascal classes with label colors + Returns: + np.ndarray with dimensions (21, 3) + """ + return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], + [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], + [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], + [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], + [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], + [0, 64, 128]]) \ No newline at end of file diff --git a/CDARTS_segmentation/dataloaders/datasets/__init__.py b/CDARTS_segmentation/dataloaders/datasets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_segmentation/dataloaders/datasets/cityscapes.py b/CDARTS_segmentation/dataloaders/datasets/cityscapes.py new file mode 100644 index 0000000..ae45e87 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/datasets/cityscapes.py @@ -0,0 +1,153 @@ +import os +import numpy as np +from PIL import Image +from torch.utils import data +from dataloaders import custom_transforms as tr + + +def twoTrainSeg(args, root): + images_base = os.path.join(root, 'leftImg8bit', 'train') + train_files = [os.path.join(looproot, filename) for looproot, _, filenames in os.walk(images_base) + for filename in filenames if filename.endswith('.png')] + number_images = len(train_files) + permuted_indices_ls = np.random.permutation(number_images) + indices_1 = permuted_indices_ls[: int(0.5 * number_images) + 1] + indices_2 = permuted_indices_ls[int(0.5 * number_images):] + if len(indices_1) % 2 != 0 or len(indices_2) % 2 != 0: + raise Exception('indices lists need to be even numbers for batch norm') + return CityscapesSegmentation(args, split='train', indices_for_split=indices_1), CityscapesSegmentation(args, + split='train', + indices_for_split=indices_2) + + +class CityscapesSegmentation(data.Dataset): + NUM_CLASSES = 19 + + CLASSES = [ + 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', + 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', + 'truck', 'bus', 'train', 'motorcycle', 'bicycle' + ] + + def __init__(self, args, root, split="train", indices_for_split=None): + self.root = root + self.split = split + self.args = args + self.files = {} + self.mean = (0.485, 0.456, 0.406) + self.std = (0.229, 0.224, 0.225) + self.crop = self.args.crop_size + if split.startswith('re'): + self.images_base = os.path.join(self.root, 'leftImg8bit', self.split[2:]) + self.annotations_base = os.path.join(self.root, 'gtFine', self.split[2:]) + else: + self.images_base = os.path.join(self.root, 'leftImg8bit', self.split) + self.annotations_base = os.path.join(self.root, 'gtFine', self.split) + + self.files[split] = self.recursive_glob(rootdir=self.images_base, suffix='.png') + + if indices_for_split is not None: + self.files[split] = np.array(self.files[split])[indices_for_split].tolist() + + self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1] + self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33] + self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence', + 'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain', + 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', + 'motorcycle', 'bicycle'] + + self.ignore_index = 255 + self.class_map = dict(zip(self.valid_classes, range(self.NUM_CLASSES))) + + if not self.files[split]: + raise Exception("No files for split=[%s] found in %s" % (split, self.images_base)) + + print("Found %d %s images" % (len(self.files[split]), split)) + self.transform = self.get_transform() + + def __len__(self): + return len(self.files[self.split]) + + def __getitem__(self, index): + + img_path = self.files[self.split][index].rstrip() + lbl_path = os.path.join(self.annotations_base, + img_path.split(os.sep)[-2], + os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png') + + _img = Image.open(img_path).convert('RGB') + _tmp = np.array(Image.open(lbl_path), dtype=np.uint8) + _tmp = self.encode_segmap(_tmp) + _target = Image.fromarray(_tmp) + + sample = {'image': _img, 'label': _target} + return self.transform(sample) + + def encode_segmap(self, mask): + # Put all void classes to zero + for _voidc in self.void_classes: + mask[mask == _voidc] = self.ignore_index + for _validc in self.valid_classes: + mask[mask == _validc] = self.class_map[_validc] + return mask + + def recursive_glob(self, rootdir='.', suffix=''): + """Performs recursive glob with given suffix and rootdir + :param rootdir is the root directory + :param suffix is the suffix to be searched + """ + return [os.path.join(looproot, filename) + for looproot, _, filenames in os.walk(rootdir) + for filename in filenames if filename.endswith(suffix)] + + def get_transform(self): + if self.split == 'train': + return tr.transform_tr(self.args, self.mean, self.std) + elif self.split == 'val': + return tr.transform_val(self.args, self.mean, self.std) + elif self.split == 'test': + return tr.transform_ts(self.args, self.mean, self.std) + elif self.split == 'retrain': + return tr.transform_retr(self.args, self.mean, self.std) + elif self.split == 'reval': + return tr.transform_reval(self.args, self.mean, self.std) + + +if __name__ == '__main__': + from dataloaders.dataloader_utils import decode_segmap + from torch.utils.data import DataLoader + import matplotlib.pyplot as plt + import argparse + + parser = argparse.ArgumentParser() + args = parser.parse_args() + args.resize = 513 + args.base_size = 513 + args.crop_size = 513 + + cityscapes_train = CityscapesSegmentation(args, split='retrain') + + dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=2) + + for ii, sample in enumerate(dataloader): + for jj in range(sample["image"].size()[0]): + img = sample['image'].numpy() + gt = sample['label'].numpy() + tmp = np.array(gt[jj]).astype(np.uint8) + segmap = decode_segmap(tmp, dataset='cityscapes') + img_tmp = np.transpose(img[jj], axes=[1, 2, 0]) + img_tmp *= (0.229, 0.224, 0.225) + img_tmp += (0.485, 0.456, 0.406) + img_tmp *= 255.0 + img_tmp = img_tmp.astype(np.uint8) + plt.figure() + plt.title('display') + plt.subplot(211) + plt.imshow(img_tmp) + plt.subplot(212) + plt.imshow(segmap) + + if ii == 1: + break + + plt.show(block=True) diff --git a/CDARTS_segmentation/dataloaders/datasets/coco.py b/CDARTS_segmentation/dataloaders/datasets/coco.py new file mode 100644 index 0000000..5827425 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/datasets/coco.py @@ -0,0 +1,160 @@ +import numpy as np +import torch +from torch.utils.data import Dataset +from tqdm import trange +import os +from pycocotools.coco import COCO +from pycocotools import mask +from torchvision import transforms +from dataloaders import custom_transforms as tr +from PIL import Image, ImageFile +ImageFile.LOAD_TRUNCATED_IMAGES = True + + +class COCOSegmentation(Dataset): + NUM_CLASSES = 21 + CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, + 1, 64, 20, 63, 7, 72] + + def __init__(self, + args, + base_dir, + split='train', + year='2017'): + super().__init__() + ann_file = os.path.join(base_dir, 'annotations/instances_{}{}.json'.format(split, year)) + ids_file = os.path.join(base_dir, 'annotations/{}_ids_{}.pth'.format(split, year)) + # self.img_dir = os.path.join(base_dir, 'images/{}{}'.format(split, year)) + self.img_dir = os.path.join(base_dir, '{}{}'.format(split, year)) + self.split = split + self.coco = COCO(ann_file) + self.coco_mask = mask + if os.path.exists(ids_file): + self.ids = torch.load(ids_file) + else: + ids = list(self.coco.imgs.keys()) + self.ids = self._preprocess(ids, ids_file) + self.args = args + + def __getitem__(self, index): + _img, _target = self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target} + + if self.split == "train": + return self.transform_tr(sample) + elif self.split == 'val': + return self.transform_val(sample) + + def _make_img_gt_point_pair(self, index): + coco = self.coco + img_id = self.ids[index] + img_metadata = coco.loadImgs(img_id)[0] + path = img_metadata['file_name'] + _img = Image.open(os.path.join(self.img_dir, path)).convert('RGB') + cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id)) + _target = Image.fromarray(self._gen_seg_mask( + cocotarget, img_metadata['height'], img_metadata['width'])) + + return _img, _target + + def _preprocess(self, ids, ids_file): + print("Preprocessing mask, this will take a while. " + \ + "But don't worry, it only run once for each split.") + tbar = trange(len(ids)) + new_ids = [] + for i in tbar: + img_id = ids[i] + cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id)) + img_metadata = self.coco.loadImgs(img_id)[0] + mask = self._gen_seg_mask(cocotarget, img_metadata['height'], + img_metadata['width']) + # more than 1k pixels + if (mask > 0).sum() > 1000: + new_ids.append(img_id) + tbar.set_description('Doing: {}/{}, got {} qualified images'. \ + format(i, len(ids), len(new_ids))) + print('Found number of qualified images: ', len(new_ids)) + torch.save(new_ids, ids_file) + return new_ids + + def _gen_seg_mask(self, target, h, w): + mask = np.zeros((h, w), dtype=np.uint8) + coco_mask = self.coco_mask + for instance in target: + rle = coco_mask.frPyObjects(instance['segmentation'], h, w) + m = coco_mask.decode(rle) + cat = instance['category_id'] + if cat in self.CAT_LIST: + c = self.CAT_LIST.index(cat) + else: + continue + if len(m.shape) < 3: + mask[:, :] += (mask == 0) * (m * c) + else: + mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8) + return mask + + def transform_tr(self, sample): + composed_transforms = transforms.Compose([ + tr.RandomHorizontalFlip(), + tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), + tr.RandomGaussianBlur(), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + + def transform_val(self, sample): + + composed_transforms = transforms.Compose([ + tr.FixScaleCrop(crop_size=self.args.crop_size), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + + + def __len__(self): + return len(self.ids) + + + +if __name__ == "__main__": + from dataloaders import custom_transforms as tr + from dataloaders.dataloader_utils import decode_segmap + from torch.utils.data import DataLoader + from torchvision import transforms + import matplotlib.pyplot as plt + import argparse + + parser = argparse.ArgumentParser() + args = parser.parse_args() + args.base_size = 513 + args.crop_size = 513 + + coco_val = COCOSegmentation(args, split='val', year='2017') + + dataloader = DataLoader(coco_val, batch_size=4, shuffle=True, num_workers=0) + + for ii, sample in enumerate(dataloader): + for jj in range(sample["image"].size()[0]): + img = sample['image'].numpy() + gt = sample['label'].numpy() + tmp = np.array(gt[jj]).astype(np.uint8) + segmap = decode_segmap(tmp, dataset='coco') + img_tmp = np.transpose(img[jj], axes=[1, 2, 0]) + img_tmp *= (0.229, 0.224, 0.225) + img_tmp += (0.485, 0.456, 0.406) + img_tmp *= 255.0 + img_tmp = img_tmp.astype(np.uint8) + plt.figure() + plt.title('display') + plt.subplot(211) + plt.imshow(img_tmp) + plt.subplot(212) + plt.imshow(segmap) + + if ii == 1: + break + + plt.show(block=True) \ No newline at end of file diff --git a/CDARTS_segmentation/dataloaders/datasets/combine_dbs.py b/CDARTS_segmentation/dataloaders/datasets/combine_dbs.py new file mode 100644 index 0000000..b251a73 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/datasets/combine_dbs.py @@ -0,0 +1,100 @@ +import torch.utils.data as data + + +class CombineDBs(data.Dataset): + NUM_CLASSES = 21 + def __init__(self, dataloaders, excluded=None): + self.dataloaders = dataloaders + self.excluded = excluded + self.im_ids = [] + + # Combine object lists + for dl in dataloaders: + for elem in dl.im_ids: + if elem not in self.im_ids: + self.im_ids.append(elem) + + # Exclude + if excluded: + for dl in excluded: + for elem in dl.im_ids: + if elem in self.im_ids: + self.im_ids.remove(elem) + + # Get object pointers + self.cat_list = [] + self.im_list = [] + new_im_ids = [] + num_images = 0 + for ii, dl in enumerate(dataloaders): + for jj, curr_im_id in enumerate(dl.im_ids): + if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids): + num_images += 1 + new_im_ids.append(curr_im_id) + self.cat_list.append({'db_ii': ii, 'cat_ii': jj}) + + self.im_ids = new_im_ids + print('Combined number of images: {:d}'.format(num_images)) + + def __getitem__(self, index): + + _db_ii = self.cat_list[index]["db_ii"] + _cat_ii = self.cat_list[index]['cat_ii'] + sample = self.dataloaders[_db_ii].__getitem__(_cat_ii) + + if 'meta' in sample.keys(): + sample['meta']['db'] = str(self.dataloaders[_db_ii]) + + return sample + + def __len__(self): + return len(self.cat_list) + + def __str__(self): + include_db = [str(db) for db in self.dataloaders] + exclude_db = [str(db) for db in self.excluded] + return 'Included datasets:'+str(include_db)+'\n'+'Excluded datasets:'+str(exclude_db) + + +if __name__ == "__main__": + import matplotlib.pyplot as plt + from dataloaders.datasets import pascal, sbd + from dataloaders import sbd + import torch + import numpy as np + from dataloaders.dataloader_utils import decode_segmap + import argparse + + parser = argparse.ArgumentParser() + args = parser.parse_args() + args.base_size = 513 + args.crop_size = 513 + + pascal_voc_val = pascal.VOCSegmentation(args, split='val') + sbd = sbd.SBDSegmentation(args, split=['train', 'val']) + pascal_voc_train = pascal.VOCSegmentation(args, split='train') + + dataset = CombineDBs([pascal_voc_train, sbd], excluded=[pascal_voc_val]) + dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=0) + + for ii, sample in enumerate(dataloader): + for jj in range(sample["image"].size()[0]): + img = sample['image'].numpy() + gt = sample['label'].numpy() + tmp = np.array(gt[jj]).astype(np.uint8) + segmap = decode_segmap(tmp, dataset='pascal') + img_tmp = np.transpose(img[jj], axes=[1, 2, 0]) + img_tmp *= (0.229, 0.224, 0.225) + img_tmp += (0.485, 0.456, 0.406) + img_tmp *= 255.0 + img_tmp = img_tmp.astype(np.uint8) + plt.figure() + plt.title('display') + plt.subplot(211) + plt.imshow(img_tmp) + plt.subplot(212) + plt.imshow(segmap) + + if ii == 1: + break + plt.show(block=True) \ No newline at end of file diff --git a/CDARTS_segmentation/dataloaders/datasets/kd.py b/CDARTS_segmentation/dataloaders/datasets/kd.py new file mode 100644 index 0000000..ef7820c --- /dev/null +++ b/CDARTS_segmentation/dataloaders/datasets/kd.py @@ -0,0 +1,139 @@ +import os +import numpy as np +import scipy.misc as m +from PIL import Image +from torch.utils import data +from torchvision import transforms +from dataloaders import custom_transforms as tr +import pandas as pd + +class CityscapesSegmentation(data.Dataset): + NUM_CLASSES = 7 + + def __init__(self, args, root, split="train"): + + self.root = root + self.split = split + self.args = args + self.files = {} + + self.images_base = os.path.join(self.root, 'kd-cityscapes-sources', self.split) + self.annotations_base = os.path.join(self.root, 'kd-cityscapes-gt', self.split) + + self.files[split] = self.recursive_glob(rootdir=self.images_base, suffix='.png') + self.map = pd.read_csv('label_map.txt', header=0, sep='\t') + self.map['#id'] = self.map['#id'] + 6 + + self.dict_map = dict(zip(self.map['#id'],self.map['categoryId'])) + if not self.files[split]: + raise Exception("No files for split=[%s] found in %s" % (split, self.images_base)) + + print("Found %d %s images" % (len(self.files[split]), split)) + + def __len__(self): + return len(self.files[self.split]) + + def __getitem__(self, index): + + img_path = self.files[self.split][index].rstrip() + lbl_path = os.path.join(self.annotations_base, + img_path.split(os.sep)[-2], + os.path.basename(img_path)) + + _img = Image.open(img_path).convert('RGB') + _tmp = np.array(Image.open(lbl_path), dtype=np.uint8) + _tmp = self.encode_segmap(_tmp) + _target = Image.fromarray(_tmp) + + sample = {'image': _img, 'label': _target} + + if self.split == 'train': + return self.transform_tr(sample) + elif self.split == 'val': + return self.transform_val(sample) + elif self.split == 'test': + return self.transform_ts(sample) + + def encode_segmap(self, mask): + + mask = mask + 6 + for label_id, cl in self.dict_map.items(): + mask[mask == label_id] = cl + + return mask + + def recursive_glob(self, rootdir='.', suffix=''): + """Performs recursive glob with given suffix and rootdir + :param rootdir is the root directory + :param suffix is the suffix to be searched + """ + return [os.path.join(looproot, filename) + for looproot, _, filenames in os.walk(rootdir) + for filename in filenames if filename.endswith(suffix)] + + def transform_tr(self, sample): + composed_transforms = transforms.Compose([ + tr.RandomHorizontalFlip(), + tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255), + tr.RandomGaussianBlur(), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + + def transform_val(self, sample): + + composed_transforms = transforms.Compose([ + tr.FixScaleCrop(crop_size=self.args.crop_size), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + + def transform_ts(self, sample): + + composed_transforms = transforms.Compose([ + tr.FixedResize(size=self.args.crop_size), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + +if __name__ == '__main__': + from dataloaders.dataloader_utils import decode_segmap + from torch.utils.data import DataLoader + import matplotlib.pyplot as plt + import argparse + + parser = argparse.ArgumentParser() + args = parser.parse_args() + args.base_size = 513 + args.crop_size = 513 + + cityscapes_train = CityscapesSegmentation(args, split='train') + + dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=2) + + for ii, sample in enumerate(dataloader): + for jj in range(sample["image"].size()[0]): + img = sample['image'].numpy() + gt = sample['label'].numpy() + tmp = np.array(gt[jj]).astype(np.uint8) + segmap = decode_segmap(tmp, dataset='cityscapes') + img_tmp = np.transpose(img[jj], axes=[1, 2, 0]) + img_tmp *= (0.229, 0.224, 0.225) + img_tmp += (0.485, 0.456, 0.406) + img_tmp *= 255.0 + img_tmp = img_tmp.astype(np.uint8) + plt.figure() + plt.title('display') + plt.subplot(211) + plt.imshow(img_tmp) + plt.subplot(212) + plt.imshow(segmap) + + if ii == 1: + break + + plt.show(block=True) + diff --git a/CDARTS_segmentation/dataloaders/datasets/pascal.py b/CDARTS_segmentation/dataloaders/datasets/pascal.py new file mode 100644 index 0000000..e28015d --- /dev/null +++ b/CDARTS_segmentation/dataloaders/datasets/pascal.py @@ -0,0 +1,144 @@ +from __future__ import print_function, division +import os +from PIL import Image +import numpy as np +from torch.utils.data import Dataset +from torchvision import transforms +from dataloaders import custom_transforms as tr + +class VOCSegmentation(Dataset): + """ + PascalVoc dataset + """ + NUM_CLASSES = 21 + + def __init__(self, + args, + base_dir, + split='train', + ): + """ + :param base_dir: path to VOC dataset directory + :param split: train/val + :param transform: transform to apply + """ + super().__init__() + self._base_dir = base_dir + self._image_dir = os.path.join(self._base_dir, 'JPEGImages') + self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass') + + if isinstance(split, str): + self.split = [split] + else: + split.sort() + self.split = split + + self.args = args + + _splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation') + + self.im_ids = [] + self.images = [] + self.categories = [] + + for splt in self.split: + with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f: + lines = f.read().splitlines() + + for ii, line in enumerate(lines): + _image = os.path.join(self._image_dir, line + ".jpg") + _cat = os.path.join(self._cat_dir, line + ".png") + assert os.path.isfile(_image) + assert os.path.isfile(_cat) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_cat) + + assert (len(self.images) == len(self.categories)) + + # Display stats + print('Number of images in {}: {:d}'.format(split, len(self.images))) + + def __len__(self): + return len(self.images) + + + def __getitem__(self, index): + _img, _target = self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target} + + for split in self.split: + if split == "train": + return self.transform_tr(sample) + elif split == 'val': + return self.transform_val(sample) + + + def _make_img_gt_point_pair(self, index): + _img = Image.open(self.images[index]).convert('RGB') + _target = Image.open(self.categories[index]) + + return _img, _target + + def transform_tr(self, sample): + composed_transforms = transforms.Compose([ + tr.RandomHorizontalFlip(), + tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), + tr.RandomGaussianBlur(), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + + def transform_val(self, sample): + + composed_transforms = transforms.Compose([ + tr.FixScaleCrop(crop_size=self.args.crop_size), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + + def __str__(self): + return 'VOC2012(split=' + str(self.split) + ')' + + +if __name__ == '__main__': + from dataloaders.dataloader_utils import decode_segmap + from torch.utils.data import DataLoader + import matplotlib.pyplot as plt + import argparse + + parser = argparse.ArgumentParser() + args = parser.parse_args() + args.base_size = 513 + args.crop_size = 513 + + voc_train = VOCSegmentation(args, split='train') + + dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=0) + + for ii, sample in enumerate(dataloader): + for jj in range(sample["image"].size()[0]): + img = sample['image'].numpy() + gt = sample['label'].numpy() + tmp = np.array(gt[jj]).astype(np.uint8) + segmap = decode_segmap(tmp, dataset='pascal') + img_tmp = np.transpose(img[jj], axes=[1, 2, 0]) + img_tmp *= (0.229, 0.224, 0.225) + img_tmp += (0.485, 0.456, 0.406) + img_tmp *= 255.0 + img_tmp = img_tmp.astype(np.uint8) + plt.figure() + plt.title('display') + plt.subplot(211) + plt.imshow(img_tmp) + plt.subplot(212) + plt.imshow(segmap) + + if ii == 1: + break + + plt.show(block=True) + + diff --git a/CDARTS_segmentation/dataloaders/datasets/sbd.py b/CDARTS_segmentation/dataloaders/datasets/sbd.py new file mode 100644 index 0000000..e197743 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/datasets/sbd.py @@ -0,0 +1,128 @@ +from __future__ import print_function, division +import os + +import numpy as np +import scipy.io +import torch.utils.data as data +from PIL import Image + +from torchvision import transforms +from dataloaders import custom_transforms as tr + +class SBDSegmentation(data.Dataset): + NUM_CLASSES = 21 + + def __init__(self, + args, + base_dir, + split='train', + ): + """ + :param base_dir: path to VOC dataset directory + :param split: train/val + :param transform: transform to apply + """ + super().__init__() + self._base_dir = base_dir + self._dataset_dir = os.path.join(self._base_dir, 'dataset') + self._image_dir = os.path.join(self._dataset_dir, 'img') + self._cat_dir = os.path.join(self._dataset_dir, 'cls') + + + if isinstance(split, str): + self.split = [split] + else: + split.sort() + self.split = split + + self.args = args + + # Get list of all images from the split and check that the files exist + self.im_ids = [] + self.images = [] + self.categories = [] + for splt in self.split: + with open(os.path.join(self._dataset_dir, splt + '.txt'), "r") as f: + lines = f.read().splitlines() + + for line in lines: + _image = os.path.join(self._image_dir, line + ".jpg") + _categ= os.path.join(self._cat_dir, line + ".mat") + assert os.path.isfile(_image) + assert os.path.isfile(_categ) + self.im_ids.append(line) + self.images.append(_image) + self.categories.append(_categ) + + assert (len(self.images) == len(self.categories)) + + # Display stats + print('Number of images: {:d}'.format(len(self.images))) + + + def __getitem__(self, index): + _img, _target = self._make_img_gt_point_pair(index) + sample = {'image': _img, 'label': _target} + + return self.transform(sample) + + def __len__(self): + return len(self.images) + + def _make_img_gt_point_pair(self, index): + _img = Image.open(self.images[index]).convert('RGB') + _target = Image.fromarray(scipy.io.loadmat(self.categories[index])["GTcls"][0]['Segmentation'][0]) + + return _img, _target + + def transform(self, sample): + composed_transforms = transforms.Compose([ + tr.RandomHorizontalFlip(), + tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), + tr.RandomGaussianBlur(), + tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + tr.ToTensor()]) + + return composed_transforms(sample) + + + def __str__(self): + return 'SBDSegmentation(split=' + str(self.split) + ')' + + +if __name__ == '__main__': + from dataloaders.dataloader_utils import decode_segmap + from torch.utils.data import DataLoader + import matplotlib.pyplot as plt + import argparse + + parser = argparse.ArgumentParser() + args = parser.parse_args() + args.base_size = 513 + args.crop_size = 513 + + sbd_train = SBDSegmentation(args, split='train') + dataloader = DataLoader(sbd_train, batch_size=2, shuffle=True, num_workers=2) + + for ii, sample in enumerate(dataloader): + for jj in range(sample["image"].size()[0]): + img = sample['image'].numpy() + gt = sample['label'].numpy() + tmp = np.array(gt[jj]).astype(np.uint8) + segmap = decode_segmap(tmp, dataset='pascal') + img_tmp = np.transpose(img[jj], axes=[1, 2, 0]) + img_tmp *= (0.229, 0.224, 0.225) + img_tmp += (0.485, 0.456, 0.406) + img_tmp *= 255.0 + img_tmp = img_tmp.astype(np.uint8) + plt.figure() + plt.title('display') + plt.subplot(211) + plt.imshow(img_tmp) + plt.subplot(212) + plt.imshow(segmap) + + if ii == 1: + break + + plt.show(block=True) \ No newline at end of file diff --git a/CDARTS_segmentation/dataloaders/segdatasets/__init__.py b/CDARTS_segmentation/dataloaders/segdatasets/__init__.py new file mode 100644 index 0000000..8f70169 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/segdatasets/__init__.py @@ -0,0 +1,4 @@ +from .base_dataset import BaseDataset +from .cityscapes import Cityscapes +from .cityscapes_panoptic import CityscapesPanoptic +from .coco_panoptic import COCOPanoptic diff --git a/CDARTS_segmentation/dataloaders/segdatasets/base_dataset.py b/CDARTS_segmentation/dataloaders/segdatasets/base_dataset.py new file mode 100644 index 0000000..73c7003 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/segdatasets/base_dataset.py @@ -0,0 +1,182 @@ +# ------------------------------------------------------------------------------ +# Base class for loading a segmentation Dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import os + +import numpy as np +from PIL import Image, ImageOps + +import torch +from torch.utils import data + + +class BaseDataset(data.Dataset): + """ + Base class for segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + """ + def __init__(self, + root, + split, + is_train=True, + crop_size=(513, 1025), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225)): + self.root = root + self.split = split + self.is_train = is_train + + self.crop_h, self.crop_w = crop_size + + self.mirror = mirror + self.min_scale = min_scale + self.max_scale = max_scale + self.scale_step_size = scale_step_size + + self.mean = mean + self.std = std + + self.pad_value = tuple([int(v * 255) for v in self.mean]) + + # ======== override the following fields ======== + self.ignore_label = 255 + self.label_pad_value = (self.ignore_label, ) + self.label_dtype = 'uint8' + + # list of image filename (required) + self.img_list = [] + # list of label filename (required) + self.ann_list = [] + # list of instance dictionary (optional) + self.ins_list = [] + + self.has_instance = False + self.label_divisor = 1000 + + self.raw_label_transform = None + self.pre_augmentation_transform = None + self.transform = None + self.target_transform = None + + def __len__(self): + return len(self.img_list) + + def __getitem__(self, index): + # TODO: handle transform properly when there is no label + dataset_dict = {} + assert os.path.exists(self.img_list[index]), 'Path does not exist: {}'.format(self.img_list[index]) + image = self.read_image(self.img_list[index], 'RGB') + if not self.is_train: + # Do not save this during training. + dataset_dict['raw_image'] = image.copy() + if self.ann_list is not None: + assert os.path.exists(self.ann_list[index]), 'Path does not exist: {}'.format(self.ann_list[index]) + label = self.read_label(self.ann_list[index], self.label_dtype) + else: + label = None + raw_label = label.copy() + if self.raw_label_transform is not None: + raw_label = self.raw_label_transform(raw_label, self.ins_list[index])['semantic'] + if not self.is_train: + # Do not save this during training + dataset_dict['raw_label'] = raw_label + size = image.shape + dataset_dict['raw_size'] = np.array(size) + # To save prediction for official evaluation. + name = os.path.splitext(os.path.basename(self.ann_list[index]))[0] + # TODO: how to return the filename? + # dataset_dict['name'] = np.array(name) + + # Resize and pad image to the same size before data augmentation. + if self.pre_augmentation_transform is not None: + image, label = self.pre_augmentation_transform(image, label) + size = image.shape + dataset_dict['size'] = np.array(size) + else: + dataset_dict['size'] = dataset_dict['raw_size'] + + # Apply data augmentation. + if self.transform is not None: + image, label = self.transform(image, label) + + dataset_dict['image'] = image + if not self.has_instance: + dataset_dict['semantic'] = torch.as_tensor(label.astype('long')) + return dataset_dict + + # Generate training target. + if self.target_transform is not None: + label_dict = self.target_transform(label, self.ins_list[index]) + for key in label_dict.keys(): + dataset_dict[key] = label_dict[key] + + return dataset_dict + + @staticmethod + def read_image(file_name, format=None): + image = Image.open(file_name) + + # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973 + try: + image = ImageOps.exif_transpose(image) + except Exception: + pass + + if format is not None: + # PIL only supports RGB, so convert to RGB and flip channels over below + conversion_format = format + if format == "BGR": + conversion_format = "RGB" + image = image.convert(conversion_format) + image = np.asarray(image) + if format == "BGR": + # flip channels if needed + image = image[:, :, ::-1] + # PIL squeezes out the channel dimension for "L", so make it HWC + if format == "L": + image = np.expand_dims(image, -1) + return image + + @staticmethod + def read_label(file_name, dtype='uint8'): + # In some cases, `uint8` is not enough for label + label = Image.open(file_name) + return np.asarray(label, dtype=dtype) + + def reverse_transform(self, image_tensor): + """Reverse the normalization on image. + Args: + image_tensor: torch.Tensor, the normalized image tensor. + Returns: + image: numpy.array, the original image before normalization. + """ + dtype = image_tensor.dtype + mean = torch.as_tensor(self.mean, dtype=dtype, device=image_tensor.device) + std = torch.as_tensor(self.std, dtype=dtype, device=image_tensor.device) + image_tensor.mul_(std[:, None, None]).add_(mean[:, None, None]) + image = image_tensor.mul(255)\ + .clamp(0, 255)\ + .byte()\ + .permute(1, 2, 0)\ + .cpu().numpy() + return image + + @staticmethod + def train_id_to_eval_id(): + return None diff --git a/CDARTS_segmentation/dataloaders/segdatasets/cityscapes.py b/CDARTS_segmentation/dataloaders/segdatasets/cityscapes.py new file mode 100644 index 0000000..a1ec794 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/segdatasets/cityscapes.py @@ -0,0 +1,150 @@ +# ------------------------------------------------------------------------------ +# Loads Cityscapes semantic dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import glob +import os + +import numpy as np + +from .base_dataset import BaseDataset +from .utils import DatasetDescriptor +from ..transforms import build_transforms + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train': 2975, + 'trainval': 3475, + 'val': 500, + 'test': 1525}, + num_classes=19, + ignore_label=255, +) + +_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33] + +# A map from data type to folder name that saves the data. +_FOLDERS_MAP = { + 'image': 'leftImg8bit', + 'label': 'gtFine', +} + +# A map from data type to filename postfix. +_POSTFIX_MAP = { + 'image': '_leftImg8bit', + 'label': '_gtFine_labelTrainIds', +} + +# A map from data type to data format. +_DATA_FORMAT_MAP = { + 'image': 'png', + 'label': 'png', +} + + +class Cityscapes(BaseDataset): + """ + Cityscapes semantic segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + """ + def __init__(self, + root, + split, + is_train=True, + crop_size=(513, 1025), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + **kwargs): + super(Cityscapes, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale, + scale_step_size, mean, std) + + self.num_classes = _CITYSCAPES_INFORMATION.num_classes + self.ignore_label = _CITYSCAPES_INFORMATION.ignore_label + self.label_pad_value = (self.ignore_label, ) + + # Get image and annotation list. + self.img_list = self._get_files('image', self.split) + self.ann_list = self._get_files('label', self.split) + + assert len(self) == _CITYSCAPES_INFORMATION.splits_to_sizes[self.split] + + self.transform = build_transforms(self, is_train) + + def _get_files(self, data, dataset_split): + """Gets files for the specified data type and dataset split. + Args: + data: String, desired data ('image' or 'label'). + dataset_split: String, dataset split ('train', 'val', 'test') + Returns: + A list of sorted file names or None when getting label for test set. + """ + if data == 'label' and dataset_split == 'test': + return None + pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data]) + search_files = os.path.join( + self.root, _FOLDERS_MAP[data], dataset_split, '*', pattern) + filenames = glob.glob(search_files) + return sorted(filenames) + + @staticmethod + def train_id_to_eval_id(): + return _CITYSCAPES_TRAIN_ID_TO_EVAL_ID + + def _convert_train_id_to_eval_id(self, prediction): + """Converts the predicted label for evaluation. + There are cases where the training labels are not equal to the evaluation + labels. This function is used to perform the conversion so that we could + evaluate the results on the evaluation server. + Args: + prediction: Semantic segmentation prediction. + Returns: + Semantic segmentation prediction whose labels have been changed. + """ + converted_prediction = prediction.copy() + for train_id, eval_id in enumerate(self.train_id_to_eval_id()): + converted_prediction[prediction == train_id] = eval_id + + return converted_prediction + + @staticmethod + def create_label_colormap(): + """Creates a label colormap used in CITYSCAPES segmentation benchmark. + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=np.uint8) + colormap[0] = [128, 64, 128] + colormap[1] = [244, 35, 232] + colormap[2] = [70, 70, 70] + colormap[3] = [102, 102, 156] + colormap[4] = [190, 153, 153] + colormap[5] = [153, 153, 153] + colormap[6] = [250, 170, 30] + colormap[7] = [220, 220, 0] + colormap[8] = [107, 142, 35] + colormap[9] = [152, 251, 152] + colormap[10] = [70, 130, 180] + colormap[11] = [220, 20, 60] + colormap[12] = [255, 0, 0] + colormap[13] = [0, 0, 142] + colormap[14] = [0, 0, 70] + colormap[15] = [0, 60, 100] + colormap[16] = [0, 80, 100] + colormap[17] = [0, 0, 230] + colormap[18] = [119, 11, 32] + return colormap diff --git a/CDARTS_segmentation/dataloaders/segdatasets/cityscapes_panoptic.py b/CDARTS_segmentation/dataloaders/segdatasets/cityscapes_panoptic.py new file mode 100644 index 0000000..baa5174 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/segdatasets/cityscapes_panoptic.py @@ -0,0 +1,130 @@ +# ------------------------------------------------------------------------------ +# Loads Cityscapes panoptic dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import json +import os + +import numpy as np + +from .cityscapes import Cityscapes +from .utils import DatasetDescriptor +from ..transforms import build_transforms, PanopticTargetGenerator, SemanticTargetGenerator + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train': 2975, + 'trainval': 3475, + 'val': 500, + 'test': 1525}, + num_classes=19, + ignore_label=255, +) + +# Add 1 void label. +_CITYSCAPES_PANOPTIC_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33, 0] + +_CITYSCAPES_THING_LIST = [11, 12, 13, 14, 15, 16, 17, 18] + + +class CityscapesPanoptic(Cityscapes): + """ + Cityscapes panoptic segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + semantic_only: Bool, only use semantic segmentation label. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + """ + def __init__(self, + root, + split, + is_train=True, + crop_size=(513, 1025), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + **kwargs): + super(CityscapesPanoptic, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale, + scale_step_size, mean, std) + + self.num_classes = _CITYSCAPES_INFORMATION.num_classes + self.ignore_label = _CITYSCAPES_INFORMATION.ignore_label + self.label_pad_value = (0, 0, 0) + + self.has_instance = True + self.label_divisor = 1000 + self.label_dtype = np.float32 + self.thing_list = _CITYSCAPES_THING_LIST + + # Get image and annotation list. + if split == 'test': + self.img_list = self._get_files('image', self.split) + self.ann_list = None + self.ins_list = None + else: + self.img_list = [] + self.ann_list = [] + self.ins_list = [] + json_filename = os.path.join(self.root, 'gtFine', 'cityscapes_panoptic_{}_trainId.json'.format(self.split)) + dataset = json.load(open(json_filename)) + for img in dataset['images']: + img_file_name = img['file_name'] + self.img_list.append(os.path.join( + self.root, 'leftImg8bit', self.split, img_file_name.split('_')[0], + img_file_name.replace('_gtFine', ''))) + for ann in dataset['annotations']: + ann_file_name = ann['file_name'] + self.ann_list.append(os.path.join( + self.root, 'gtFine', 'cityscapes_panoptic_{}_trainId'.format(self.split), ann_file_name)) + self.ins_list.append(ann['segments_info']) + + assert len(self) == _CITYSCAPES_INFORMATION.splits_to_sizes[self.split] + + self.transform = build_transforms(self, is_train) + if semantic_only: + self.target_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + else: + self.target_transform = PanopticTargetGenerator(self.ignore_label, self.rgb2id, _CITYSCAPES_THING_LIST, + sigma=8, ignore_stuff_in_offset=ignore_stuff_in_offset, + small_instance_area=small_instance_area, + small_instance_weight=small_instance_weight) + # Generates semantic label for evaluation. + self.raw_label_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + + @staticmethod + def train_id_to_eval_id(): + return _CITYSCAPES_PANOPTIC_TRAIN_ID_TO_EVAL_ID + + @staticmethod + def rgb2id(color): + """Converts the color to panoptic label. + Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. + Args: + color: Ndarray or a tuple, color encoded image. + Returns: + Panoptic label. + """ + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) diff --git a/CDARTS_segmentation/dataloaders/segdatasets/coco_panoptic.py b/CDARTS_segmentation/dataloaders/segdatasets/coco_panoptic.py new file mode 100644 index 0000000..6de52e3 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/segdatasets/coco_panoptic.py @@ -0,0 +1,299 @@ +# ------------------------------------------------------------------------------ +# Loads COCO panoptic dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import json +import os + +import numpy as np + +from .base_dataset import BaseDataset +from .utils import DatasetDescriptor +from ..transforms import build_transforms, Resize, PanopticTargetGenerator, SemanticTargetGenerator + +_COCO_PANOPTIC_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train2017': 118287, + 'trainval2017': 123287, + 'val2017': 5000, + 'test-dev2017': 20288, + 'test2017': 40670}, # `test` includes `test-dev` and `test-challenge` + num_classes=133, + ignore_label=255, +) + +# Add 1 void label. +_COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID = ( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 100, 107, 109, 112, + 118, 119, 122, 125, 128, 130, 133, 138, 141, 144, 145, 147, 148, 149, 151, 154, 155, 156, 159, 161, 166, 168, 171, + 175, 176, 177, 178, 180, 181, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 0]) + +_COCO_PANOPTIC_EVAL_ID_TO_TRAIN_ID = { + v: k for k, v in enumerate(_COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID[:-1]) +} + +_COCO_PANOPTIC_THING_LIST = list(range(80)) # the first 80 classes are `thing` classes + +COCO_CATEGORIES = [ + {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, + {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, + {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, + {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, + {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, + {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, + {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, + {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, + {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, + {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, + {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, + {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, + {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, + {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, + {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, + {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, + {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, + {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, + {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, + {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, + {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, + {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, + {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, + {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, + {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, + {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, + {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, + {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, + {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, + {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, + {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, + {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, + {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, + {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, + {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, + {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, + {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, + {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, + {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, + {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, + {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, + {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, + {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, + {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, + {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, + {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, + {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, + {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, + {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, + {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, + {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, + {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, + {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, + {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, + {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, + {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, + {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, + {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, + {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, + {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, + {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, + {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, + {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, + {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, + {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, + {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, + {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, + {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, + {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, + {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, + {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, + {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, + {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, + {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, + {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, + {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, + {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, + {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, + {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, + {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, + {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, + {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, + {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, + {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, + {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, + {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, + {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, + {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, + {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, + {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, + {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, + {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, + {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, + {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, + {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, + {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, + {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, + {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, + {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, + {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, + {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, + {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, + {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, + {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, + {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, + {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, + {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, + {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, + {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, + {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, + {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, + {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, + {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, + {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, + {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, + {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, + {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, + {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, + {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, + {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, + {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, + {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, + {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, + {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, + {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, + {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, + {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, + {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, + {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, + {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, + {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, + {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, + {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, +] + + +class COCOPanoptic(BaseDataset): + """ + COCO panoptic segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + semantic_only: Bool, only use semantic segmentation label. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + """ + def __init__(self, + root, + split, + min_resize_value=641, + max_resize_value=641, + resize_factor=32, + is_train=True, + crop_size=(641, 641), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + **kwargs): + super(COCOPanoptic, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale, + scale_step_size, mean, std) + + assert split in _COCO_PANOPTIC_INFORMATION.splits_to_sizes.keys() + + self.num_classes = _COCO_PANOPTIC_INFORMATION.num_classes + self.ignore_label = _COCO_PANOPTIC_INFORMATION.ignore_label + self.label_pad_value = (0, 0, 0) + + self.has_instance = True + self.label_divisor = 256 + self.label_dtype = np.float32 + self.thing_list = _COCO_PANOPTIC_THING_LIST + + # Get image and annotation list. + if 'test' in split: + self.img_list = [] + self.ann_list = None + self.ins_list = None + json_filename = os.path.join(self.root, 'annotations', 'image_info_{}.json'.format(self.split)) + dataset = json.load(open(json_filename)) + for img in dataset['images']: + img_file_name = img['file_name'] + self.img_list.append(os.path.join(self.root, 'test2017', img_file_name)) + else: + self.img_list = [] + self.ann_list = [] + self.ins_list = [] + json_filename = os.path.join(self.root, 'annotations', 'panoptic_{}_trainId.json'.format(self.split)) + dataset = json.load(open(json_filename)) + # First sort by image id. + images = sorted(dataset['images'], key=lambda i: i['id']) + annotations = sorted(dataset['annotations'], key=lambda i: i['image_id']) + for img in images: + img_file_name = img['file_name'] + self.img_list.append(os.path.join(self.root, self.split, img_file_name)) + for ann in annotations: + ann_file_name = ann['file_name'] + self.ann_list.append(os.path.join( + self.root, 'annotations', 'panoptic_{}'.format(self.split), ann_file_name)) + self.ins_list.append(ann['segments_info']) + + assert len(self) == _COCO_PANOPTIC_INFORMATION.splits_to_sizes[self.split] + + self.pre_augmentation_transform = Resize(min_resize_value, max_resize_value, resize_factor) + self.transform = build_transforms(self, is_train) + if semantic_only: + self.target_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + else: + self.target_transform = PanopticTargetGenerator(self.ignore_label, self.rgb2id, _COCO_PANOPTIC_THING_LIST, + sigma=8, ignore_stuff_in_offset=ignore_stuff_in_offset, + small_instance_area=small_instance_area, + small_instance_weight=small_instance_weight) + # Generates semantic label for evaluation. + self.raw_label_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + + @staticmethod + def train_id_to_eval_id(): + return _COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID + + @staticmethod + def rgb2id(color): + """Converts the color to panoptic label. + Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. + Args: + color: Ndarray or a tuple, color encoded image. + Returns: + Panoptic label. + """ + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) + + @staticmethod + def create_label_colormap(): + """Creates a label colormap used in COCO panoptic benchmark. + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=np.uint8) + for i, color in enumerate(COCO_CATEGORIES): + colormap[i] = color['color'] + return colormap diff --git a/CDARTS_segmentation/dataloaders/segdatasets/utils.py b/CDARTS_segmentation/dataloaders/segdatasets/utils.py new file mode 100644 index 0000000..3149ad6 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/segdatasets/utils.py @@ -0,0 +1,18 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/tensorflow/models/blob/master/research/deeplab/datasets/data_generator.py +# ------------------------------------------------------------------------------ + +import collections + +# Named tuple to describe the dataset properties. +DatasetDescriptor = collections.namedtuple( + 'DatasetDescriptor', + [ + 'splits_to_sizes', # Splits of the dataset into training, val and test. + 'num_classes', # Number of semantic classes, including the + # background class (if exists). For example, there + # are 20 foreground classes + 1 background class in + # the PASCAL VOC 2012 dataset. Thus, we set + # num_classes=21. + 'ignore_label', # Ignore label value. + ]) diff --git a/CDARTS_segmentation/dataloaders/transforms/__init__.py b/CDARTS_segmentation/dataloaders/transforms/__init__.py new file mode 100644 index 0000000..c37854a --- /dev/null +++ b/CDARTS_segmentation/dataloaders/transforms/__init__.py @@ -0,0 +1,3 @@ +from .build import build_transforms +from .pre_augmentation_transforms import Resize +from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator diff --git a/CDARTS_segmentation/dataloaders/transforms/build.py b/CDARTS_segmentation/dataloaders/transforms/build.py new file mode 100644 index 0000000..08742bc --- /dev/null +++ b/CDARTS_segmentation/dataloaders/transforms/build.py @@ -0,0 +1,57 @@ +# ------------------------------------------------------------------------------ +# Builds transformation for both image and labels. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from . import transforms as T + + +def build_transforms(dataset, is_train=True): + if is_train: + min_scale = dataset.min_scale + max_scale = dataset.max_scale + scale_step_size = dataset.scale_step_size + crop_h = dataset.crop_h + crop_w = dataset.crop_w + pad_value = dataset.pad_value + ignore_label = dataset.label_pad_value + flip_prob = 0.5 if dataset.mirror else 0 + mean = dataset.mean + std = dataset.std + else: + # no data augmentation + min_scale = 1 + max_scale = 1 + scale_step_size = 0 + flip_prob = 0 + crop_h = dataset.crop_h + crop_w = dataset.crop_w + pad_value = dataset.pad_value + ignore_label = dataset.label_pad_value + mean = dataset.mean + std = dataset.std + + transforms = T.Compose( + [ + T.RandomScale( + min_scale, + max_scale, + scale_step_size + ), + T.RandomCrop( + crop_h, + crop_w, + pad_value, + ignore_label, + random_pad=is_train + ), + T.RandomHorizontalFlip(flip_prob), + T.ToTensor(), + T.Normalize( + mean, + std + ) + ] + ) + + return transforms diff --git a/CDARTS_segmentation/dataloaders/transforms/pre_augmentation_transforms.py b/CDARTS_segmentation/dataloaders/transforms/pre_augmentation_transforms.py new file mode 100644 index 0000000..b4d9864 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/transforms/pre_augmentation_transforms.py @@ -0,0 +1,92 @@ +# ------------------------------------------------------------------------------ +# Builds transformation before data augmentation. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import warnings + +import cv2 +import math +import numpy as np + + +class Resize(object): + """ + Applies random scale augmentation. + Reference: https://github.com/tensorflow/models/blob/master/research/deeplab/input_preprocess.py#L28 + Arguments: + min_resize_value: Desired size of the smaller image side, no resize if set to None + max_resize_value: Maximum allowed size of the larger image side, no limit if set to None + resize_factor: Resized dimensions are multiple of factor plus one. + keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input + will be resized while keeping the original aspect ratio. If False, the + input will be resized to [max_resize_value, max_resize_value] without + keeping the original aspect ratio. + align_corners: If True, exactly align all 4 corners of input and output. + """ + def __init__(self, min_resize_value=None, max_resize_value=None, resize_factor=None, + keep_aspect_ratio=True, align_corners=False): + if min_resize_value is not None and min_resize_value < 0: + min_resize_value = None + if max_resize_value is not None and max_resize_value < 0: + max_resize_value = None + if resize_factor is not None and resize_factor < 0: + resize_factor = None + self.min_resize_value = min_resize_value + self.max_resize_value = max_resize_value + self.resize_factor = resize_factor + self.keep_aspect_ratio = keep_aspect_ratio + self.align_corners = align_corners + + if self.align_corners: + warnings.warn('`align_corners = True` is not supported by opencv.') + + if self.max_resize_value is not None: + # Modify the max_size to be a multiple of factor plus 1 and make sure the max dimension after resizing + # is no larger than max_size. + if self.resize_factor is not None: + self.max_resize_value = (self.max_resize_value - (self.max_resize_value - 1) % self.resize_factor) + + def __call__(self, image, label): + if self.min_resize_value is None: + return image, label + [orig_height, orig_width, _] = image.shape + orig_min_size = np.minimum(orig_height, orig_width) + + # Calculate the larger of the possible sizes + large_scale_factor = self.min_resize_value / orig_min_size + large_height = int(math.floor(orig_height * large_scale_factor)) + large_width = int(math.floor(orig_width * large_scale_factor)) + large_size = np.array([large_height, large_width]) + + new_size = large_size + if self.max_resize_value is not None: + # Calculate the smaller of the possible sizes, use that if the larger is too big. + orig_max_size = np.maximum(orig_height, orig_width) + small_scale_factor = self.max_resize_value / orig_max_size + small_height = int(math.floor(orig_height * small_scale_factor)) + small_width = int(math.floor(orig_width * small_scale_factor)) + small_size = np.array([small_height, small_width]) + + if np.max(large_size) > self.max_resize_value: + new_size = small_size + + # Ensure that both output sides are multiples of factor plus one. + if self.resize_factor is not None: + new_size += (self.resize_factor - (new_size - 1) % self.resize_factor) % self.resize_factor + # If new_size exceeds largest allowed size + new_size[new_size > self.max_resize_value] -= self.resize_factor + + if not self.keep_aspect_ratio: + # If not keep the aspect ratio, we resize everything to max_size, allowing + # us to do pre-processing without extra padding. + new_size = [np.max(new_size), np.max(new_size)] + + # TODO: cv2 uses align_corner=False + # TODO: use fvcore (https://github.com/facebookresearch/fvcore/blob/master/fvcore/transforms/transform.py#L377) + image_dtype = image.dtype + label_dtype = label.dtype + # cv2: (width, height) + image = cv2.resize(image.astype(np.float), (new_size[1], new_size[0]), interpolation=cv2.INTER_LINEAR) + label = cv2.resize(label.astype(np.float), (new_size[1], new_size[0]), interpolation=cv2.INTER_NEAREST) + return image.astype(image_dtype), label.astype(label_dtype) diff --git a/CDARTS_segmentation/dataloaders/transforms/target_transforms.py b/CDARTS_segmentation/dataloaders/transforms/target_transforms.py new file mode 100644 index 0000000..47a3f39 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/transforms/target_transforms.py @@ -0,0 +1,200 @@ +# ------------------------------------------------------------------------------ +# Generates targets for Panoptic-DeepLab. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import numpy as np + +import torch + + +class PanopticTargetGenerator(object): + """ + Generates panoptic training target for Panoptic-DeepLab. + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_label: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + thing_list: List, a list of thing classes + sigma: the sigma for Gaussian kernel. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in semantic segmentation branch, + crowd region is ignored in the original TensorFlow implementation. + """ + def __init__(self, ignore_label, rgb2id, thing_list, sigma=8, ignore_stuff_in_offset=False, + small_instance_area=0, small_instance_weight=1, ignore_crowd_in_semantic=False): + self.ignore_label = ignore_label + self.rgb2id = rgb2id + self.thing_list = thing_list + self.ignore_stuff_in_offset = ignore_stuff_in_offset + self.small_instance_area = small_instance_area + self.small_instance_weight = small_instance_weight + self.ignore_crowd_in_semantic = ignore_crowd_in_semantic + + self.sigma = sigma + size = 6 * sigma + 3 + x = np.arange(0, size, 1, float) + y = x[:, np.newaxis] + x0, y0 = 3 * sigma + 1, 3 * sigma + 1 + self.g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + - foreground: Tensor, foreground mask label, shape=(H, W). + - center: Tensor, center heatmap, shape=(1, H, W). + - center_points: List, center coordinates, with tuple (y-coord, x-coord). + - offset: Tensor, offset, shape=(2, H, W), first dim is (offset_y, offset_x). + - semantic_weights: Tensor, loss weight for semantic prediction, shape=(H, W). + - center_weights: Tensor, ignore region of center prediction, shape=(H, W), used as weights for center + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + - offset_weights: Tensor, ignore region of offset prediction, shape=(H, W), used as weights for offset + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + """ + panoptic = self.rgb2id(panoptic) + height, width = panoptic.shape[0], panoptic.shape[1] + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label + foreground = np.zeros_like(panoptic, dtype=np.uint8) + center = np.zeros((1, height, width), dtype=np.float32) + center_pts = [] + offset = np.zeros((2, height, width), dtype=np.float32) + y_coord = np.ones_like(panoptic, dtype=np.float32) + x_coord = np.ones_like(panoptic, dtype=np.float32) + y_coord = np.cumsum(y_coord, axis=0) - 1 + x_coord = np.cumsum(x_coord, axis=1) - 1 + # Generate pixel-wise loss weights + semantic_weights = np.ones_like(panoptic, dtype=np.uint8) + # 0: ignore, 1: has instance + # three conditions for a region to be ignored for instance branches: + # (1) It is labeled as `ignore_label` + # (2) It is crowd region (iscrowd=1) + # (3) (Optional) It is stuff region (for offset branch) + center_weights = np.zeros_like(panoptic, dtype=np.uint8) + offset_weights = np.zeros_like(panoptic, dtype=np.uint8) + for seg in segments: + cat_id = seg["category_id"] + if self.ignore_crowd_in_semantic: + if not seg['iscrowd']: + semantic[panoptic == seg["id"]] = cat_id + else: + semantic[panoptic == seg["id"]] = cat_id + if cat_id in self.thing_list: + foreground[panoptic == seg["id"]] = 1 + if not seg['iscrowd']: + # Ignored regions are not in `segments`. + # Handle crowd region. + center_weights[panoptic == seg["id"]] = 1 + if self.ignore_stuff_in_offset: + # Handle stuff region. + if cat_id in self.thing_list: + offset_weights[panoptic == seg["id"]] = 1 + else: + offset_weights[panoptic == seg["id"]] = 1 + if cat_id in self.thing_list: + # find instance center + mask_index = np.where(panoptic == seg["id"]) + if len(mask_index[0]) == 0: + # the instance is completely cropped + continue + + # Find instance area + ins_area = len(mask_index[0]) + if ins_area < self.small_instance_area: + semantic_weights[panoptic == seg["id"]] = self.small_instance_weight + + center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1]) + center_pts.append([center_y, center_x]) + + # generate center heatmap + y, x = int(center_y), int(center_x) + # outside image boundary + if x < 0 or y < 0 or \ + x >= width or y >= height: + continue + sigma = self.sigma + # upper left + ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1)) + # bottom right + br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2)) + + c, d = max(0, -ul[0]), min(br[0], width) - ul[0] + a, b = max(0, -ul[1]), min(br[1], height) - ul[1] + + cc, dd = max(0, ul[0]), min(br[0], width) + aa, bb = max(0, ul[1]), min(br[1], height) + center[0, aa:bb, cc:dd] = np.maximum( + center[0, aa:bb, cc:dd], self.g[a:b, c:d]) + + # generate offset (2, h, w) -> (y-dir, x-dir) + offset_y_index = (np.zeros_like(mask_index[0]), mask_index[0], mask_index[1]) + offset_x_index = (np.ones_like(mask_index[0]), mask_index[0], mask_index[1]) + offset[offset_y_index] = center_y - y_coord[mask_index] + offset[offset_x_index] = center_x - x_coord[mask_index] + + return dict( + semantic=torch.as_tensor(semantic.astype('long')), + foreground=torch.as_tensor(foreground.astype('long')), + center=torch.as_tensor(center.astype(np.float32)), + center_points=center_pts, + offset=torch.as_tensor(offset.astype(np.float32)), + semantic_weights=torch.as_tensor(semantic_weights.astype(np.float32)), + center_weights=torch.as_tensor(center_weights.astype(np.float32)), + offset_weights=torch.as_tensor(offset_weights.astype(np.float32)) + ) + + +class SemanticTargetGenerator(object): + """ + Generates semantic training target only for Panoptic-DeepLab (no instance). + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_label: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + thing_list: List, a list of thing classes + sigma: the sigma for Gaussian kernel. + """ + def __init__(self, ignore_label, rgb2id): + self.ignore_label = ignore_label + self.rgb2id = rgb2id + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + """ + panoptic = self.rgb2id(panoptic) + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label + for seg in segments: + cat_id = seg["category_id"] + semantic[panoptic == seg["id"]] = cat_id + + return dict( + semantic=torch.as_tensor(semantic.astype('long')) + ) diff --git a/CDARTS_segmentation/dataloaders/transforms/transforms.py b/CDARTS_segmentation/dataloaders/transforms/transforms.py new file mode 100644 index 0000000..a6404a6 --- /dev/null +++ b/CDARTS_segmentation/dataloaders/transforms/transforms.py @@ -0,0 +1,172 @@ +# ------------------------------------------------------------------------------ +# Data augmentation following DeepLab +# (https://github.com/tensorflow/models/blob/master/research/deeplab/input_preprocess.py#L28). +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import random + +import cv2 +import numpy as np +from torchvision.transforms import functional as F + + +class Compose(object): + """ + Composes a sequence of transforms. + Arguments: + transforms: A list of transforms. + """ + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, label): + for t in self.transforms: + image, label = t(image, label) + return image, label + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string + + +class ToTensor(object): + """ + Converts image to torch Tensor. + """ + def __call__(self, image, label): + return F.to_tensor(image), label + + +class Normalize(object): + """ + Normalizes image by mean and std. + """ + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, label): + image = F.normalize(image, mean=self.mean, std=self.std) + return image, label + + +class RandomScale(object): + """ + Applies random scale augmentation. + Arguments: + min_scale: Minimum scale value. + max_scale: Maximum scale value. + scale_step_size: The step size from minimum to maximum value. + """ + def __init__(self, min_scale, max_scale, scale_step_size): + self.min_scale = min_scale + self.max_scale = max_scale + self.scale_step_size = scale_step_size + + @staticmethod + def get_random_scale(min_scale_factor, max_scale_factor, step_size): + """Gets a random scale value. + Args: + min_scale_factor: Minimum scale value. + max_scale_factor: Maximum scale value. + step_size: The step size from minimum to maximum value. + Returns: + A random scale value selected between minimum and maximum value. + Raises: + ValueError: min_scale_factor has unexpected value. + """ + if min_scale_factor < 0 or min_scale_factor > max_scale_factor: + raise ValueError('Unexpected value of min_scale_factor.') + + if min_scale_factor == max_scale_factor: + return min_scale_factor + + # When step_size = 0, we sample the value uniformly from [min, max). + if step_size == 0: + return random.uniform(min_scale_factor, max_scale_factor) + + # When step_size != 0, we randomly select one discrete value from [min, max]. + num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1) + scale_factors = np.linspace(min_scale_factor, max_scale_factor, num_steps) + np.random.shuffle(scale_factors) + return scale_factors[0] + + def __call__(self, image, label): + f_scale = self.get_random_scale(self.min_scale, self.max_scale, self.scale_step_size) + # TODO: cv2 uses align_corner=False + # TODO: use fvcore (https://github.com/facebookresearch/fvcore/blob/master/fvcore/transforms/transform.py#L377) + image_dtype = image.dtype + label_dtype = label.dtype + image = cv2.resize(image.astype(np.float), None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR) + label = cv2.resize(label.astype(np.float), None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST) + return image.astype(image_dtype), label.astype(label_dtype) + + +class RandomCrop(object): + """ + Applies random crop augmentation. + Arguments: + crop_h: Integer, crop height size. + crop_w: Integer, crop width size. + pad_value: Tuple, pad value for image, length 3. + ignore_label: Tuple, pad value for label, length could be 1 (semantic) or 3 (panoptic). + random_pad: Bool, when crop size larger than image size, whether to randomly pad four boundaries, + or put image to top-left and only pad bottom and right boundaries. + """ + def __init__(self, crop_h, crop_w, pad_value, ignore_label, random_pad): + self.crop_h = crop_h + self.crop_w = crop_w + self.pad_value = pad_value + self.ignore_label = ignore_label + self.random_pad = random_pad + + def __call__(self, image, label): + img_h, img_w = image.shape[0], image.shape[1] + # save dtype + image_dtype = image.dtype + label_dtype = label.dtype + # padding + pad_h = max(self.crop_h - img_h, 0) + pad_w = max(self.crop_w - img_w, 0) + if pad_h > 0 or pad_w > 0: + if self.random_pad: + pad_top = random.randint(0, pad_h) + pad_bottom = pad_h - pad_top + pad_left = random.randint(0, pad_w) + pad_right = pad_w - pad_left + else: + pad_top, pad_bottom, pad_left, pad_right = 0, pad_h, 0, pad_w + img_pad = cv2.copyMakeBorder(image, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, + value=self.pad_value) + label_pad = cv2.copyMakeBorder(label, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, + value=self.ignore_label) + else: + img_pad, label_pad = image, label + img_h, img_w = img_pad.shape[0], img_pad.shape[1] + h_off = random.randint(0, img_h - self.crop_h) + w_off = random.randint(0, img_w - self.crop_w) + image = np.asarray(img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.float32) + label = np.asarray(label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.float32) + return image.astype(image_dtype), label.astype(label_dtype) + + +class RandomHorizontalFlip(object): + """ + Applies random flip augmentation. + Arguments: + prob: Probability of flip. + """ + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, image, label): + if random.random() < self.prob: + # https://discuss.pytorch.org/t/torch-from-numpy-not-support-negative-strides/3663 + image = image[:, ::-1].copy() + label = label[:, ::-1].copy() + return image, label diff --git a/CDARTS_segmentation/install.sh b/CDARTS_segmentation/install.sh new file mode 100644 index 0000000..f56fbe5 --- /dev/null +++ b/CDARTS_segmentation/install.sh @@ -0,0 +1,33 @@ +pip install torchvision==0.4.0 +git clone https://github.com/NVIDIA/apex +cd apex +git checkout f3a960f +pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ +# pip install torch==1.5.0 +# pip install timm==0.2.1 +pip install torchvision==0.5.0 +pip install numba +pip install pycocotools +pip install yacs +pip install tensorboardX +pip install thop==0.0.31.post2001170342 +pip install pandas +# pip install --upgrade git+https://github.com/Lyken17/pytorch-OpCounter.git +pip install --upgrade git+https://github.com/sovrasov/flops-counter.pytorch.git +pip install easydict +pip install numpy==1.16.1 +pip install matplotlib==3.0.0 +pip install pyyaml +pip install tensorboard +pip install tqdm +pip install opencv-python==3.4.4.19 +pip install Pillow==8.2.0 +pip install scipy==1.1.0 +pip install protobuf==3.8.0 +pip install timm==0.3.0 +pip install git+https://github.com/mcordts/cityscapesScripts.git +pip install git+https://github.com/cocodataset/panopticapi.git +# pip install -U iopath +# pip install git+https://github.com/facebookresearch/fvcore.git +pip install einops +# python -m pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.6/index.html diff --git a/CDARTS_segmentation/segmentation/__init__.py b/CDARTS_segmentation/segmentation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_segmentation/segmentation/config/__init__.py b/CDARTS_segmentation/segmentation/config/__init__.py new file mode 100644 index 0000000..623b156 --- /dev/null +++ b/CDARTS_segmentation/segmentation/config/__init__.py @@ -0,0 +1,4 @@ +from .default import _C as config +from .default import update_config +seg_config = config +update_seg_config = update_config \ No newline at end of file diff --git a/CDARTS_segmentation/segmentation/config/default.py b/CDARTS_segmentation/segmentation/config/default.py new file mode 100644 index 0000000..1bbc14f --- /dev/null +++ b/CDARTS_segmentation/segmentation/config/default.py @@ -0,0 +1,306 @@ +import os + +from yacs.config import CfgNode as CN + +_C = CN() + +# ----------------------------------------------------------------------------- +# Misc +# ----------------------------------------------------------------------------- +_C.OUTPUT_DIR = '' +_C.GPUS = (0,) +_C.WORKERS = 4 +# Logging frequency +_C.PRINT_FREQ = 20 +# Checkpoint frequency +_C.CKPT_FREQ = 5000 + +# ----------------------------------------------------------------------------- +# CUDNN +# ----------------------------------------------------------------------------- +_C.CUDNN = CN() +_C.CUDNN.BENCHMARK = True +_C.CUDNN.DETERMINISTIC = False +_C.CUDNN.ENABLED = True + +# ----------------------------------------------------------------------------- +# Model +# ----------------------------------------------------------------------------- +_C.MODEL = CN() +_C.MODEL.META_ARCHITECTURE = 'panoptic_deeplab' +# pretrained model (including decoder, head, etc) on other dataset +# need to do a net surgery to remove classifiers etc. +_C.MODEL.WEIGHTS = '' + +_C.MODEL.BN_MOMENTUM = 0.1 + +# ---------------------------------------------------------------------------- # +# Backbone options +# ---------------------------------------------------------------------------- # +_C.MODEL.BACKBONE = CN() + +# META could be +# resnet +# mobilenet_v2 +# mnasnet +_C.MODEL.BACKBONE.META = 'resnet' + +# NAME could be +# For resnet: +# 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2' +# For mobilenet_v2: +# 'mobilenet_v2' +# For mnasnet: +# 'mnasnet0_5', 'mnasnet0_75' (no official weight), 'mnasnet1_0', 'mnasnet1_3' (no official weight) +_C.MODEL.BACKBONE.NAME = "resnet50" +# Controls output stride +_C.MODEL.BACKBONE.DILATION = (False, False, True) +# pretrained backbone provided by official PyTorch modelzoo +_C.MODEL.BACKBONE.PRETRAINED = True +_C.MODEL.BACKBONE.WEIGHTS = '' + +# Low-level feature key +# For resnet backbone: +# res2: 256 +# res3: 512 +# res4: 1024 +# res5: 2048 + +# For mobilenet_v2 backbone: +# layer_4: 24 +# layer_7: 32 +# layer_14: 96 +# layer_18: 320 + +# For mnasnet backbone: +# layer_9: 24 (0_5: 16) +# layer_10: 40 (0_5: 24) +# layer_12: 96 (0_5: 48) +# layer_14: 320 (0_5: 160) + +# ---------------------------------------------------------------------------- # +# Decoder options +# ---------------------------------------------------------------------------- # +_C.MODEL.DECODER = CN() +_C.MODEL.DECODER.IN_CHANNELS = 2048 +_C.MODEL.DECODER.FEATURE_KEY = 'res5' +_C.MODEL.DECODER.DECODER_CHANNELS = 256 +_C.MODEL.DECODER.ATROUS_RATES = (6, 12, 18) + +# TODO: pass these into the decoder. +_C.MODEL.DECODER.CONV_TYPE = 'depthwise_separable_conv' +_C.MODEL.DECODER.CONV_KERNEL = 5 +_C.MODEL.DECODER.CONV_PADDING = 2 +_C.MODEL.DECODER.CONV_STACK = 1 + +# ---------------------------------------------------------------------------- # +# DeepLabV3+ options +# ---------------------------------------------------------------------------- # +_C.MODEL.DEEPLABV3PLUS = CN() +_C.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS = 256 +_C.MODEL.DEEPLABV3PLUS.LOW_LEVEL_KEY = 'res2' +_C.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS_PROJECT = 48 + +# ---------------------------------------------------------------------------- # +# Panoptic-DeepLab options +# ---------------------------------------------------------------------------- # +_C.MODEL.PANOPTIC_DEEPLAB = CN() +_C.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS = (512, 256) +_C.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_KEY = ('res3', 'res2') +_C.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS_PROJECT = (64, 32) +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE = CN() +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ENABLE = False +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.LOW_LEVEL_CHANNELS_PROJECT = (32, 16) +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.DECODER_CHANNELS = 128 +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.HEAD_CHANNELS = 128 +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ASPP_CHANNELS = 256 +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.NUM_CLASSES = (1, 2) +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.CLASS_KEY = ('center', 'offset') +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.FOREGROUND_SEG = False +_C.MODEL.PANOPTIC_DEEPLAB.INSTANCE.FOREGROUND_ARCH = 'v1' + +# ----------------------------------------------------------------------------- +# DATASET +# ----------------------------------------------------------------------------- +_C.DATASET = CN() +_C.DATASET.ROOT = './datasets/cityscapes' +_C.DATASET.DATASET = 'cityscapes' +_C.DATASET.NUM_CLASSES = 19 +_C.DATASET.TRAIN_SPLIT = 'train' +_C.DATASET.TEST_SPLIT = 'val' +_C.DATASET.CROP_SIZE = (513, 1025) +_C.DATASET.MIRROR = True +_C.DATASET.MIN_SCALE = 0.5 +_C.DATASET.MAX_SCALE = 2.0 +_C.DATASET.SCALE_STEP_SIZE = 0.1 +_C.DATASET.MEAN = (0.485, 0.456, 0.406) +_C.DATASET.STD = (0.229, 0.224, 0.225) +_C.DATASET.SEMANTIC_ONLY = False +_C.DATASET.IGNORE_STUFF_IN_OFFSET = True +_C.DATASET.SMALL_INSTANCE_AREA = 0 +_C.DATASET.SMALL_INSTANCE_WEIGHT = 1 + +_C.DATASET.MIN_RESIZE_VALUE = -1 +_C.DATASET.MAX_RESIZE_VALUE = -1 +_C.DATASET.RESIZE_FACTOR = -1 + +# ----------------------------------------------------------------------------- +# Solver +# ----------------------------------------------------------------------------- +_C.SOLVER = CN() +_C.SOLVER.BASE_LR = 0.01 +_C.SOLVER.WEIGHT_DECAY = 0.0001 +# Weight decay of norm layers. +_C.SOLVER.WEIGHT_DECAY_NORM = 0.0 +# Bias. +_C.SOLVER.BIAS_LR_FACTOR = 2.0 +_C.SOLVER.WEIGHT_DECAY_BIAS = 0.0 +_C.SOLVER.MOMENTUM = 0.9 +_C.SOLVER.OPTIMIZER = 'sgd' +_C.SOLVER.ADAM_BETAS = (0.9, 0.999) +_C.SOLVER.ADAM_EPS = 1e-08 + +_C.SOLVER.LR_SCHEDULER_NAME = 'WarmupPolyLR' +# The iteration number to decrease learning rate by GAMMA. +_C.SOLVER.STEPS = (30000,) +_C.SOLVER.GAMMA = 0.1 + +_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 +_C.SOLVER.WARMUP_ITERS = 1000 +_C.SOLVER.WARMUP_METHOD = "linear" + +_C.SOLVER.POLY_LR_POWER = 0.9 +_C.SOLVER.POLY_LR_CONSTANT_ENDING = 0 + +_C.SOLVER.CLIP_GRADIENTS = CN() +_C.SOLVER.CLIP_GRADIENTS.ENABLED = False +# Type of gradient clipping, currently 2 values are supported: +# - "value": the absolute values of elements of each gradients are clipped +# - "norm": the norm of the gradient for each parameter is clipped thus +# affecting all elements in the parameter +_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value" +# Maximum absolute value used for clipping gradients +_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0 +# Floating point number p for L-p norm to be used with the "norm" +# gradient clipping type; for L-inf, please specify .inf +_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0 + +# ----------------------------------------------------------------------------- +# Loss +# ----------------------------------------------------------------------------- +_C.LOSS = CN() + +_C.LOSS.SEMANTIC = CN() +_C.LOSS.SEMANTIC.NAME = 'cross_entropy' +# TODO: make `ignore` more consistent +_C.LOSS.SEMANTIC.IGNORE = 255 +_C.LOSS.SEMANTIC.REDUCTION = 'mean' +_C.LOSS.SEMANTIC.THRESHOLD = 0.7 +_C.LOSS.SEMANTIC.MIN_KEPT = 100000 +_C.LOSS.SEMANTIC.TOP_K_PERCENT = 1.0 +_C.LOSS.SEMANTIC.WEIGHT = 1.0 + +_C.LOSS.CENTER = CN() +_C.LOSS.CENTER.NAME = 'mse' +_C.LOSS.CENTER.REDUCTION = 'none' +_C.LOSS.CENTER.WEIGHT = 200.0 + +_C.LOSS.OFFSET = CN() +_C.LOSS.OFFSET.NAME = 'l1' +_C.LOSS.OFFSET.REDUCTION = 'none' +_C.LOSS.OFFSET.WEIGHT = 0.01 + +_C.LOSS.FOREGROUND = CN() +_C.LOSS.FOREGROUND.NAME = 'cross_entropy' +_C.LOSS.FOREGROUND.IGNORE = 255 +_C.LOSS.FOREGROUND.REDUCTION = 'mean' +_C.LOSS.FOREGROUND.THRESHOLD = 0.7 +_C.LOSS.FOREGROUND.MIN_KEPT = 100000 +_C.LOSS.FOREGROUND.TOP_K_PERCENT = 1.0 +_C.LOSS.FOREGROUND.WEIGHT = 1.0 + +# ----------------------------------------------------------------------------- +# TRAIN +# ----------------------------------------------------------------------------- +_C.TRAIN = CN() + +_C.TRAIN.IMS_PER_BATCH = 32 +_C.TRAIN.MAX_ITER = 90000 +_C.TRAIN.RESUME = False + +# ----------------------------------------------------------------------------- +# DATALOADER +# ----------------------------------------------------------------------------- +_C.DATALOADER = CN() + +_C.DATALOADER.SAMPLER_TRAIN = 'TrainingSampler' +_C.DATALOADER.TRAIN_SHUFFLE = True + +_C.DATALOADER.NUM_WORKERS = 4 + +# ----------------------------------------------------------------------------- +# DEBUG +# ----------------------------------------------------------------------------- +_C.DEBUG = CN() +_C.DEBUG.DEBUG = True +_C.DEBUG.DEBUG_FREQ = 100 +_C.DEBUG.TARGET_KEYS = ('semantic', 'center', 'offset', 'semantic_weights', 'center_weights', 'offset_weights') +_C.DEBUG.OUTPUT_KEYS = ('semantic', 'center', 'offset') +_C.DEBUG.KEEP_INTERVAL = 1000 + +# ----------------------------------------------------------------------------- +# TEST +# ----------------------------------------------------------------------------- +_C.TEST = CN() + +_C.TEST.GPUS = (0, ) +_C.TEST.CROP_SIZE = (1025, 2049) + +_C.TEST.SEMANTIC_FOLDER = 'semantic' +_C.TEST.INSTANCE_FOLDER = 'instance' +_C.TEST.PANOPTIC_FOLDER = 'panoptic' +_C.TEST.FOREGROUND_FOLDER = 'foreground' + +_C.TEST.EVAL_INSTANCE = False +_C.TEST.EVAL_PANOPTIC = False +_C.TEST.EVAL_FOREGROUND = False + +_C.TEST.MODEL_FILE = '' +_C.TEST.TEST_TIME_AUGMENTATION = False +_C.TEST.FLIP_TEST = False +_C.TEST.SCALE_LIST = [1] + +_C.TEST.DEBUG = False + +_C.TEST.ORACLE_SEMANTIC = False +_C.TEST.ORACLE_FOREGROUND = False +_C.TEST.ORACLE_CENTER = False +_C.TEST.ORACLE_OFFSET = False + +_C.TEST.INSTANCE_SCORE_TYPE = "semantic" + +# ----------------------------------------------------------------------------- +# POST PROCESSING +# Panoptic post-processing params +# ----------------------------------------------------------------------------- +_C.POST_PROCESSING = CN() +_C.POST_PROCESSING.CENTER_THRESHOLD = 0.1 +_C.POST_PROCESSING.NMS_KERNEL = 7 +_C.POST_PROCESSING.TOP_K_INSTANCE = 200 +_C.POST_PROCESSING.STUFF_AREA = 2048 + + +def update_config(cfg, args): + cfg.defrost() + + cfg.merge_from_file(args.cfg) + cfg.merge_from_list(args.opts) + + cfg.freeze() + + +if __name__ == '__main__': + import sys + + with open(sys.argv[1], 'w') as f: + print(_C, file=f) diff --git a/CDARTS_segmentation/segmentation/config/hrnet_config.py b/CDARTS_segmentation/segmentation/config/hrnet_config.py new file mode 100644 index 0000000..24992ad --- /dev/null +++ b/CDARTS_segmentation/segmentation/config/hrnet_config.py @@ -0,0 +1,130 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft +# Licensed under the MIT License. +# Create by Bin Xiao (Bin.Xiao@microsoft.com) +# Modified by Ke Sun (sunk@mail.ustc.edu.cn), Rainbowsecret (yuyua@microsoft.com) +# ------------------------------------------------------------------------------ + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from yacs.config import CfgNode as CN + + +# configs for HRNet48 +HRNET_48 = CN() +HRNET_48.FINAL_CONV_KERNEL = 1 + +HRNET_48.STAGE1 = CN() +HRNET_48.STAGE1.NUM_MODULES = 1 +HRNET_48.STAGE1.NUM_BRANCHES = 1 +HRNET_48.STAGE1.NUM_BLOCKS = [4] +HRNET_48.STAGE1.NUM_CHANNELS = [64] +HRNET_48.STAGE1.BLOCK = 'BOTTLENECK' +HRNET_48.STAGE1.FUSE_METHOD = 'SUM' + +HRNET_48.STAGE2 = CN() +HRNET_48.STAGE2.NUM_MODULES = 1 +HRNET_48.STAGE2.NUM_BRANCHES = 2 +HRNET_48.STAGE2.NUM_BLOCKS = [4, 4] +HRNET_48.STAGE2.NUM_CHANNELS = [48, 96] +HRNET_48.STAGE2.BLOCK = 'BASIC' +HRNET_48.STAGE2.FUSE_METHOD = 'SUM' + +HRNET_48.STAGE3 = CN() +HRNET_48.STAGE3.NUM_MODULES = 4 +HRNET_48.STAGE3.NUM_BRANCHES = 3 +HRNET_48.STAGE3.NUM_BLOCKS = [4, 4, 4] +HRNET_48.STAGE3.NUM_CHANNELS = [48, 96, 192] +HRNET_48.STAGE3.BLOCK = 'BASIC' +HRNET_48.STAGE3.FUSE_METHOD = 'SUM' + +HRNET_48.STAGE4 = CN() +HRNET_48.STAGE4.NUM_MODULES = 3 +HRNET_48.STAGE4.NUM_BRANCHES = 4 +HRNET_48.STAGE4.NUM_BLOCKS = [4, 4, 4, 4] +HRNET_48.STAGE4.NUM_CHANNELS = [48, 96, 192, 384] +HRNET_48.STAGE4.BLOCK = 'BASIC' +HRNET_48.STAGE4.FUSE_METHOD = 'SUM' + + +# configs for HRNet32 +HRNET_32 = CN() +HRNET_32.FINAL_CONV_KERNEL = 1 + +HRNET_32.STAGE1 = CN() +HRNET_32.STAGE1.NUM_MODULES = 1 +HRNET_32.STAGE1.NUM_BRANCHES = 1 +HRNET_32.STAGE1.NUM_BLOCKS = [4] +HRNET_32.STAGE1.NUM_CHANNELS = [64] +HRNET_32.STAGE1.BLOCK = 'BOTTLENECK' +HRNET_32.STAGE1.FUSE_METHOD = 'SUM' + +HRNET_32.STAGE2 = CN() +HRNET_32.STAGE2.NUM_MODULES = 1 +HRNET_32.STAGE2.NUM_BRANCHES = 2 +HRNET_32.STAGE2.NUM_BLOCKS = [4, 4] +HRNET_32.STAGE2.NUM_CHANNELS = [32, 64] +HRNET_32.STAGE2.BLOCK = 'BASIC' +HRNET_32.STAGE2.FUSE_METHOD = 'SUM' + +HRNET_32.STAGE3 = CN() +HRNET_32.STAGE3.NUM_MODULES = 4 +HRNET_32.STAGE3.NUM_BRANCHES = 3 +HRNET_32.STAGE3.NUM_BLOCKS = [4, 4, 4] +HRNET_32.STAGE3.NUM_CHANNELS = [32, 64, 128] +HRNET_32.STAGE3.BLOCK = 'BASIC' +HRNET_32.STAGE3.FUSE_METHOD = 'SUM' + +HRNET_32.STAGE4 = CN() +HRNET_32.STAGE4.NUM_MODULES = 3 +HRNET_32.STAGE4.NUM_BRANCHES = 4 +HRNET_32.STAGE4.NUM_BLOCKS = [4, 4, 4, 4] +HRNET_32.STAGE4.NUM_CHANNELS = [32, 64, 128, 256] +HRNET_32.STAGE4.BLOCK = 'BASIC' +HRNET_32.STAGE4.FUSE_METHOD = 'SUM' + + +# configs for HRNet18 +HRNET_18 = CN() +HRNET_18.FINAL_CONV_KERNEL = 1 + +HRNET_18.STAGE1 = CN() +HRNET_18.STAGE1.NUM_MODULES = 1 +HRNET_18.STAGE1.NUM_BRANCHES = 1 +HRNET_18.STAGE1.NUM_BLOCKS = [4] +HRNET_18.STAGE1.NUM_CHANNELS = [64] +HRNET_18.STAGE1.BLOCK = 'BOTTLENECK' +HRNET_18.STAGE1.FUSE_METHOD = 'SUM' + +HRNET_18.STAGE2 = CN() +HRNET_18.STAGE2.NUM_MODULES = 1 +HRNET_18.STAGE2.NUM_BRANCHES = 2 +HRNET_18.STAGE2.NUM_BLOCKS = [4, 4] +HRNET_18.STAGE2.NUM_CHANNELS = [18, 36] +HRNET_18.STAGE2.BLOCK = 'BASIC' +HRNET_18.STAGE2.FUSE_METHOD = 'SUM' + +HRNET_18.STAGE3 = CN() +HRNET_18.STAGE3.NUM_MODULES = 4 +HRNET_18.STAGE3.NUM_BRANCHES = 3 +HRNET_18.STAGE3.NUM_BLOCKS = [4, 4, 4] +HRNET_18.STAGE3.NUM_CHANNELS = [18, 36, 72] +HRNET_18.STAGE3.BLOCK = 'BASIC' +HRNET_18.STAGE3.FUSE_METHOD = 'SUM' + +HRNET_18.STAGE4 = CN() +HRNET_18.STAGE4.NUM_MODULES = 3 +HRNET_18.STAGE4.NUM_BRANCHES = 4 +HRNET_18.STAGE4.NUM_BLOCKS = [4, 4, 4, 4] +HRNET_18.STAGE4.NUM_CHANNELS = [18, 36, 72, 144] +HRNET_18.STAGE4.BLOCK = 'BASIC' +HRNET_18.STAGE4.FUSE_METHOD = 'SUM' + + +MODEL_CONFIGS = { + 'hrnet18': HRNET_18, + 'hrnet32': HRNET_32, + 'hrnet48': HRNET_48, +} diff --git a/CDARTS_segmentation/segmentation/data/__init__.py b/CDARTS_segmentation/segmentation/data/__init__.py new file mode 100644 index 0000000..c121b90 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/__init__.py @@ -0,0 +1,2 @@ +from .build import ( + build_dataset_from_cfg, build_train_loader_from_cfg, build_test_loader_from_cfg) diff --git a/CDARTS_segmentation/segmentation/data/build.py b/CDARTS_segmentation/segmentation/data/build.py new file mode 100644 index 0000000..4bb6bbe --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/build.py @@ -0,0 +1,159 @@ +# ------------------------------------------------------------------------------ +# Builds dataloader. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import logging + +import torch +import numpy as np + +from .datasets import Cityscapes, CityscapesPanoptic, COCOPanoptic +from . import samplers +from segmentation.utils.comm import get_world_size +from segmentation.utils.env import seed_all_rng + + +def build_dataset_from_cfg(config, is_train=True): + """Builds dataset from configuration file. + Args: + config: the configuration file. + is_train: Bool, training or testing, it automatically handles data augmentation. + + Returns: + A torch Dataset. + """ + dataset_map = { + 'cityscapes': Cityscapes, + 'cityscapes_panoptic': CityscapesPanoptic, + 'coco_panoptic': COCOPanoptic, + } + + dataset_cfg = { + 'cityscapes': dict( + root=config.DATASET.ROOT, + split=config.DATASET.TRAIN_SPLIT if is_train else config.DATASET.TEST_SPLIT, + is_train=is_train, + crop_size=config.DATASET.CROP_SIZE if is_train else config.TEST.CROP_SIZE, + mirror=config.DATASET.MIRROR, + min_scale=config.DATASET.MIN_SCALE, + max_scale=config.DATASET.MAX_SCALE, + scale_step_size=config.DATASET.SCALE_STEP_SIZE, + mean=config.DATASET.MEAN, + std=config.DATASET.STD + ), + 'cityscapes_panoptic': dict( + root=config.DATASET.ROOT, + split=config.DATASET.TRAIN_SPLIT if is_train else config.DATASET.TEST_SPLIT, + is_train=is_train, + crop_size=config.DATASET.CROP_SIZE if is_train else config.TEST.CROP_SIZE, + mirror=config.DATASET.MIRROR, + min_scale=config.DATASET.MIN_SCALE, + max_scale=config.DATASET.MAX_SCALE, + scale_step_size=config.DATASET.SCALE_STEP_SIZE, + mean=config.DATASET.MEAN, + std=config.DATASET.STD, + semantic_only=config.DATASET.SEMANTIC_ONLY, + ignore_stuff_in_offset=config.DATASET.IGNORE_STUFF_IN_OFFSET, + small_instance_area=config.DATASET.SMALL_INSTANCE_AREA, + small_instance_weight=config.DATASET.SMALL_INSTANCE_WEIGHT + ), + 'coco_panoptic': dict( + root=config.DATASET.ROOT, + split=config.DATASET.TRAIN_SPLIT if is_train else config.DATASET.TEST_SPLIT, + min_resize_value=config.DATASET.MIN_RESIZE_VALUE, + max_resize_value=config.DATASET.MAX_RESIZE_VALUE, + resize_factor=config.DATASET.RESIZE_FACTOR, + is_train=is_train, + crop_size=config.DATASET.CROP_SIZE if is_train else config.TEST.CROP_SIZE, + mirror=config.DATASET.MIRROR, + min_scale=config.DATASET.MIN_SCALE, + max_scale=config.DATASET.MAX_SCALE, + scale_step_size=config.DATASET.SCALE_STEP_SIZE, + mean=config.DATASET.MEAN, + std=config.DATASET.STD, + semantic_only=config.DATASET.SEMANTIC_ONLY, + ignore_stuff_in_offset=config.DATASET.IGNORE_STUFF_IN_OFFSET, + small_instance_area=config.DATASET.SMALL_INSTANCE_AREA, + small_instance_weight=config.DATASET.SMALL_INSTANCE_WEIGHT + ), + } + + dataset = dataset_map[config.DATASET.DATASET]( + **dataset_cfg[config.DATASET.DATASET] + ) + return dataset + + +def build_train_loader_from_cfg(config): + """Builds dataloader from configuration file. + Args: + config: the configuration file. + + Returns: + A torch Dataloader. + """ + num_workers = get_world_size() + images_per_batch = config.TRAIN.IMS_PER_BATCH + assert ( + images_per_batch % num_workers == 0 + ), "TRAIN.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format( + images_per_batch, num_workers + ) + assert ( + images_per_batch >= num_workers + ), "TRAIN.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format( + images_per_batch, num_workers + ) + images_per_worker = images_per_batch // num_workers + + dataset = build_dataset_from_cfg(config, is_train=True) + + sampler_name = config.DATALOADER.SAMPLER_TRAIN + logger = logging.getLogger(__name__) + logger.info("Using training sampler {}".format(sampler_name)) + if sampler_name == "TrainingSampler": + sampler = samplers.TrainingSampler(len(dataset), shuffle=config.DATALOADER.TRAIN_SHUFFLE) + else: + raise ValueError("Unknown training sampler: {}".format(sampler_name)) + + batch_sampler = torch.utils.data.sampler.BatchSampler( + sampler, images_per_worker, drop_last=True + ) + # drop_last so the batch always have the same size + data_loader = torch.utils.data.DataLoader( + dataset, + num_workers=config.DATALOADER.NUM_WORKERS, + batch_sampler=batch_sampler, + worker_init_fn=worker_init_reset_seed, + ) + + return data_loader + + +def build_test_loader_from_cfg(config): + """Builds dataloader from configuration file. + Args: + config: the configuration file. + + Returns: + A torch Dataloader. + """ + dataset = build_dataset_from_cfg(config, is_train=False) + + sampler = samplers.InferenceSampler(len(dataset)) + # Always use 1 image per worker during inference since this is the + # standard when reporting inference time in papers. + batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False) + + data_loader = torch.utils.data.DataLoader( + dataset, + num_workers=config.DATALOADER.NUM_WORKERS, + batch_sampler=batch_sampler, + ) + + return data_loader + + +def worker_init_reset_seed(worker_id): + seed_all_rng(np.random.randint(2 ** 31) + worker_id) diff --git a/CDARTS_segmentation/segmentation/data/datasets/__init__.py b/CDARTS_segmentation/segmentation/data/datasets/__init__.py new file mode 100644 index 0000000..8f70169 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/datasets/__init__.py @@ -0,0 +1,4 @@ +from .base_dataset import BaseDataset +from .cityscapes import Cityscapes +from .cityscapes_panoptic import CityscapesPanoptic +from .coco_panoptic import COCOPanoptic diff --git a/CDARTS_segmentation/segmentation/data/datasets/base_dataset.py b/CDARTS_segmentation/segmentation/data/datasets/base_dataset.py new file mode 100644 index 0000000..73c7003 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/datasets/base_dataset.py @@ -0,0 +1,182 @@ +# ------------------------------------------------------------------------------ +# Base class for loading a segmentation Dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import os + +import numpy as np +from PIL import Image, ImageOps + +import torch +from torch.utils import data + + +class BaseDataset(data.Dataset): + """ + Base class for segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + """ + def __init__(self, + root, + split, + is_train=True, + crop_size=(513, 1025), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225)): + self.root = root + self.split = split + self.is_train = is_train + + self.crop_h, self.crop_w = crop_size + + self.mirror = mirror + self.min_scale = min_scale + self.max_scale = max_scale + self.scale_step_size = scale_step_size + + self.mean = mean + self.std = std + + self.pad_value = tuple([int(v * 255) for v in self.mean]) + + # ======== override the following fields ======== + self.ignore_label = 255 + self.label_pad_value = (self.ignore_label, ) + self.label_dtype = 'uint8' + + # list of image filename (required) + self.img_list = [] + # list of label filename (required) + self.ann_list = [] + # list of instance dictionary (optional) + self.ins_list = [] + + self.has_instance = False + self.label_divisor = 1000 + + self.raw_label_transform = None + self.pre_augmentation_transform = None + self.transform = None + self.target_transform = None + + def __len__(self): + return len(self.img_list) + + def __getitem__(self, index): + # TODO: handle transform properly when there is no label + dataset_dict = {} + assert os.path.exists(self.img_list[index]), 'Path does not exist: {}'.format(self.img_list[index]) + image = self.read_image(self.img_list[index], 'RGB') + if not self.is_train: + # Do not save this during training. + dataset_dict['raw_image'] = image.copy() + if self.ann_list is not None: + assert os.path.exists(self.ann_list[index]), 'Path does not exist: {}'.format(self.ann_list[index]) + label = self.read_label(self.ann_list[index], self.label_dtype) + else: + label = None + raw_label = label.copy() + if self.raw_label_transform is not None: + raw_label = self.raw_label_transform(raw_label, self.ins_list[index])['semantic'] + if not self.is_train: + # Do not save this during training + dataset_dict['raw_label'] = raw_label + size = image.shape + dataset_dict['raw_size'] = np.array(size) + # To save prediction for official evaluation. + name = os.path.splitext(os.path.basename(self.ann_list[index]))[0] + # TODO: how to return the filename? + # dataset_dict['name'] = np.array(name) + + # Resize and pad image to the same size before data augmentation. + if self.pre_augmentation_transform is not None: + image, label = self.pre_augmentation_transform(image, label) + size = image.shape + dataset_dict['size'] = np.array(size) + else: + dataset_dict['size'] = dataset_dict['raw_size'] + + # Apply data augmentation. + if self.transform is not None: + image, label = self.transform(image, label) + + dataset_dict['image'] = image + if not self.has_instance: + dataset_dict['semantic'] = torch.as_tensor(label.astype('long')) + return dataset_dict + + # Generate training target. + if self.target_transform is not None: + label_dict = self.target_transform(label, self.ins_list[index]) + for key in label_dict.keys(): + dataset_dict[key] = label_dict[key] + + return dataset_dict + + @staticmethod + def read_image(file_name, format=None): + image = Image.open(file_name) + + # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973 + try: + image = ImageOps.exif_transpose(image) + except Exception: + pass + + if format is not None: + # PIL only supports RGB, so convert to RGB and flip channels over below + conversion_format = format + if format == "BGR": + conversion_format = "RGB" + image = image.convert(conversion_format) + image = np.asarray(image) + if format == "BGR": + # flip channels if needed + image = image[:, :, ::-1] + # PIL squeezes out the channel dimension for "L", so make it HWC + if format == "L": + image = np.expand_dims(image, -1) + return image + + @staticmethod + def read_label(file_name, dtype='uint8'): + # In some cases, `uint8` is not enough for label + label = Image.open(file_name) + return np.asarray(label, dtype=dtype) + + def reverse_transform(self, image_tensor): + """Reverse the normalization on image. + Args: + image_tensor: torch.Tensor, the normalized image tensor. + Returns: + image: numpy.array, the original image before normalization. + """ + dtype = image_tensor.dtype + mean = torch.as_tensor(self.mean, dtype=dtype, device=image_tensor.device) + std = torch.as_tensor(self.std, dtype=dtype, device=image_tensor.device) + image_tensor.mul_(std[:, None, None]).add_(mean[:, None, None]) + image = image_tensor.mul(255)\ + .clamp(0, 255)\ + .byte()\ + .permute(1, 2, 0)\ + .cpu().numpy() + return image + + @staticmethod + def train_id_to_eval_id(): + return None diff --git a/CDARTS_segmentation/segmentation/data/datasets/cityscapes.py b/CDARTS_segmentation/segmentation/data/datasets/cityscapes.py new file mode 100644 index 0000000..a1ec794 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/datasets/cityscapes.py @@ -0,0 +1,150 @@ +# ------------------------------------------------------------------------------ +# Loads Cityscapes semantic dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import glob +import os + +import numpy as np + +from .base_dataset import BaseDataset +from .utils import DatasetDescriptor +from ..transforms import build_transforms + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train': 2975, + 'trainval': 3475, + 'val': 500, + 'test': 1525}, + num_classes=19, + ignore_label=255, +) + +_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33] + +# A map from data type to folder name that saves the data. +_FOLDERS_MAP = { + 'image': 'leftImg8bit', + 'label': 'gtFine', +} + +# A map from data type to filename postfix. +_POSTFIX_MAP = { + 'image': '_leftImg8bit', + 'label': '_gtFine_labelTrainIds', +} + +# A map from data type to data format. +_DATA_FORMAT_MAP = { + 'image': 'png', + 'label': 'png', +} + + +class Cityscapes(BaseDataset): + """ + Cityscapes semantic segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + """ + def __init__(self, + root, + split, + is_train=True, + crop_size=(513, 1025), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + **kwargs): + super(Cityscapes, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale, + scale_step_size, mean, std) + + self.num_classes = _CITYSCAPES_INFORMATION.num_classes + self.ignore_label = _CITYSCAPES_INFORMATION.ignore_label + self.label_pad_value = (self.ignore_label, ) + + # Get image and annotation list. + self.img_list = self._get_files('image', self.split) + self.ann_list = self._get_files('label', self.split) + + assert len(self) == _CITYSCAPES_INFORMATION.splits_to_sizes[self.split] + + self.transform = build_transforms(self, is_train) + + def _get_files(self, data, dataset_split): + """Gets files for the specified data type and dataset split. + Args: + data: String, desired data ('image' or 'label'). + dataset_split: String, dataset split ('train', 'val', 'test') + Returns: + A list of sorted file names or None when getting label for test set. + """ + if data == 'label' and dataset_split == 'test': + return None + pattern = '*%s.%s' % (_POSTFIX_MAP[data], _DATA_FORMAT_MAP[data]) + search_files = os.path.join( + self.root, _FOLDERS_MAP[data], dataset_split, '*', pattern) + filenames = glob.glob(search_files) + return sorted(filenames) + + @staticmethod + def train_id_to_eval_id(): + return _CITYSCAPES_TRAIN_ID_TO_EVAL_ID + + def _convert_train_id_to_eval_id(self, prediction): + """Converts the predicted label for evaluation. + There are cases where the training labels are not equal to the evaluation + labels. This function is used to perform the conversion so that we could + evaluate the results on the evaluation server. + Args: + prediction: Semantic segmentation prediction. + Returns: + Semantic segmentation prediction whose labels have been changed. + """ + converted_prediction = prediction.copy() + for train_id, eval_id in enumerate(self.train_id_to_eval_id()): + converted_prediction[prediction == train_id] = eval_id + + return converted_prediction + + @staticmethod + def create_label_colormap(): + """Creates a label colormap used in CITYSCAPES segmentation benchmark. + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=np.uint8) + colormap[0] = [128, 64, 128] + colormap[1] = [244, 35, 232] + colormap[2] = [70, 70, 70] + colormap[3] = [102, 102, 156] + colormap[4] = [190, 153, 153] + colormap[5] = [153, 153, 153] + colormap[6] = [250, 170, 30] + colormap[7] = [220, 220, 0] + colormap[8] = [107, 142, 35] + colormap[9] = [152, 251, 152] + colormap[10] = [70, 130, 180] + colormap[11] = [220, 20, 60] + colormap[12] = [255, 0, 0] + colormap[13] = [0, 0, 142] + colormap[14] = [0, 0, 70] + colormap[15] = [0, 60, 100] + colormap[16] = [0, 80, 100] + colormap[17] = [0, 0, 230] + colormap[18] = [119, 11, 32] + return colormap diff --git a/CDARTS_segmentation/segmentation/data/datasets/cityscapes_panoptic.py b/CDARTS_segmentation/segmentation/data/datasets/cityscapes_panoptic.py new file mode 100644 index 0000000..baa5174 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/datasets/cityscapes_panoptic.py @@ -0,0 +1,130 @@ +# ------------------------------------------------------------------------------ +# Loads Cityscapes panoptic dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import json +import os + +import numpy as np + +from .cityscapes import Cityscapes +from .utils import DatasetDescriptor +from ..transforms import build_transforms, PanopticTargetGenerator, SemanticTargetGenerator + +_CITYSCAPES_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train': 2975, + 'trainval': 3475, + 'val': 500, + 'test': 1525}, + num_classes=19, + ignore_label=255, +) + +# Add 1 void label. +_CITYSCAPES_PANOPTIC_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33, 0] + +_CITYSCAPES_THING_LIST = [11, 12, 13, 14, 15, 16, 17, 18] + + +class CityscapesPanoptic(Cityscapes): + """ + Cityscapes panoptic segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + semantic_only: Bool, only use semantic segmentation label. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + """ + def __init__(self, + root, + split, + is_train=True, + crop_size=(513, 1025), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + **kwargs): + super(CityscapesPanoptic, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale, + scale_step_size, mean, std) + + self.num_classes = _CITYSCAPES_INFORMATION.num_classes + self.ignore_label = _CITYSCAPES_INFORMATION.ignore_label + self.label_pad_value = (0, 0, 0) + + self.has_instance = True + self.label_divisor = 1000 + self.label_dtype = np.float32 + self.thing_list = _CITYSCAPES_THING_LIST + + # Get image and annotation list. + if split == 'test': + self.img_list = self._get_files('image', self.split) + self.ann_list = None + self.ins_list = None + else: + self.img_list = [] + self.ann_list = [] + self.ins_list = [] + json_filename = os.path.join(self.root, 'gtFine', 'cityscapes_panoptic_{}_trainId.json'.format(self.split)) + dataset = json.load(open(json_filename)) + for img in dataset['images']: + img_file_name = img['file_name'] + self.img_list.append(os.path.join( + self.root, 'leftImg8bit', self.split, img_file_name.split('_')[0], + img_file_name.replace('_gtFine', ''))) + for ann in dataset['annotations']: + ann_file_name = ann['file_name'] + self.ann_list.append(os.path.join( + self.root, 'gtFine', 'cityscapes_panoptic_{}_trainId'.format(self.split), ann_file_name)) + self.ins_list.append(ann['segments_info']) + + assert len(self) == _CITYSCAPES_INFORMATION.splits_to_sizes[self.split] + + self.transform = build_transforms(self, is_train) + if semantic_only: + self.target_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + else: + self.target_transform = PanopticTargetGenerator(self.ignore_label, self.rgb2id, _CITYSCAPES_THING_LIST, + sigma=8, ignore_stuff_in_offset=ignore_stuff_in_offset, + small_instance_area=small_instance_area, + small_instance_weight=small_instance_weight) + # Generates semantic label for evaluation. + self.raw_label_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + + @staticmethod + def train_id_to_eval_id(): + return _CITYSCAPES_PANOPTIC_TRAIN_ID_TO_EVAL_ID + + @staticmethod + def rgb2id(color): + """Converts the color to panoptic label. + Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. + Args: + color: Ndarray or a tuple, color encoded image. + Returns: + Panoptic label. + """ + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) diff --git a/CDARTS_segmentation/segmentation/data/datasets/coco_panoptic.py b/CDARTS_segmentation/segmentation/data/datasets/coco_panoptic.py new file mode 100644 index 0000000..6de52e3 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/datasets/coco_panoptic.py @@ -0,0 +1,299 @@ +# ------------------------------------------------------------------------------ +# Loads COCO panoptic dataset. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import json +import os + +import numpy as np + +from .base_dataset import BaseDataset +from .utils import DatasetDescriptor +from ..transforms import build_transforms, Resize, PanopticTargetGenerator, SemanticTargetGenerator + +_COCO_PANOPTIC_INFORMATION = DatasetDescriptor( + splits_to_sizes={'train2017': 118287, + 'trainval2017': 123287, + 'val2017': 5000, + 'test-dev2017': 20288, + 'test2017': 40670}, # `test` includes `test-dev` and `test-challenge` + num_classes=133, + ignore_label=255, +) + +# Add 1 void label. +_COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID = ( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 100, 107, 109, 112, + 118, 119, 122, 125, 128, 130, 133, 138, 141, 144, 145, 147, 148, 149, 151, 154, 155, 156, 159, 161, 166, 168, 171, + 175, 176, 177, 178, 180, 181, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 0]) + +_COCO_PANOPTIC_EVAL_ID_TO_TRAIN_ID = { + v: k for k, v in enumerate(_COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID[:-1]) +} + +_COCO_PANOPTIC_THING_LIST = list(range(80)) # the first 80 classes are `thing` classes + +COCO_CATEGORIES = [ + {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, + {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, + {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, + {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, + {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, + {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, + {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, + {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, + {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, + {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, + {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, + {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, + {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, + {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, + {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, + {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, + {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, + {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, + {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, + {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, + {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, + {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, + {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, + {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, + {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, + {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, + {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, + {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, + {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, + {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, + {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, + {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, + {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, + {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, + {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, + {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, + {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, + {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, + {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, + {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, + {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, + {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, + {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, + {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, + {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, + {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, + {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, + {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, + {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, + {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, + {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, + {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, + {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, + {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, + {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, + {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, + {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, + {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, + {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, + {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, + {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, + {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, + {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, + {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, + {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, + {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, + {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, + {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, + {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, + {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, + {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, + {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, + {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, + {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, + {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, + {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, + {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, + {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, + {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, + {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, + {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, + {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, + {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, + {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, + {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, + {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, + {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, + {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, + {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, + {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, + {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, + {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, + {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, + {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, + {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, + {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, + {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, + {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, + {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, + {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, + {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, + {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, + {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, + {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, + {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, + {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, + {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, + {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, + {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, + {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, + {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, + {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, + {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, + {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, + {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, + {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, + {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, + {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, + {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, + {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, + {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, + {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, + {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, + {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, + {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, + {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, + {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, + {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, + {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, + {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, + {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, + {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, + {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, +] + + +class COCOPanoptic(BaseDataset): + """ + COCO panoptic segmentation dataset. + Arguments: + root: Str, root directory. + split: Str, data split, e.g. train/val/test. + is_train: Bool, for training or testing. + crop_size: Tuple, crop size. + mirror: Bool, whether to apply random horizontal flip. + min_scale: Float, min scale in scale augmentation. + max_scale: Float, max scale in scale augmentation. + scale_step_size: Float, step size to select random scale. + mean: Tuple, image mean. + std: Tuple, image std. + semantic_only: Bool, only use semantic segmentation label. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + """ + def __init__(self, + root, + split, + min_resize_value=641, + max_resize_value=641, + resize_factor=32, + is_train=True, + crop_size=(641, 641), + mirror=True, + min_scale=0.5, + max_scale=2., + scale_step_size=0.25, + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + semantic_only=False, + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + **kwargs): + super(COCOPanoptic, self).__init__(root, split, is_train, crop_size, mirror, min_scale, max_scale, + scale_step_size, mean, std) + + assert split in _COCO_PANOPTIC_INFORMATION.splits_to_sizes.keys() + + self.num_classes = _COCO_PANOPTIC_INFORMATION.num_classes + self.ignore_label = _COCO_PANOPTIC_INFORMATION.ignore_label + self.label_pad_value = (0, 0, 0) + + self.has_instance = True + self.label_divisor = 256 + self.label_dtype = np.float32 + self.thing_list = _COCO_PANOPTIC_THING_LIST + + # Get image and annotation list. + if 'test' in split: + self.img_list = [] + self.ann_list = None + self.ins_list = None + json_filename = os.path.join(self.root, 'annotations', 'image_info_{}.json'.format(self.split)) + dataset = json.load(open(json_filename)) + for img in dataset['images']: + img_file_name = img['file_name'] + self.img_list.append(os.path.join(self.root, 'test2017', img_file_name)) + else: + self.img_list = [] + self.ann_list = [] + self.ins_list = [] + json_filename = os.path.join(self.root, 'annotations', 'panoptic_{}_trainId.json'.format(self.split)) + dataset = json.load(open(json_filename)) + # First sort by image id. + images = sorted(dataset['images'], key=lambda i: i['id']) + annotations = sorted(dataset['annotations'], key=lambda i: i['image_id']) + for img in images: + img_file_name = img['file_name'] + self.img_list.append(os.path.join(self.root, self.split, img_file_name)) + for ann in annotations: + ann_file_name = ann['file_name'] + self.ann_list.append(os.path.join( + self.root, 'annotations', 'panoptic_{}'.format(self.split), ann_file_name)) + self.ins_list.append(ann['segments_info']) + + assert len(self) == _COCO_PANOPTIC_INFORMATION.splits_to_sizes[self.split] + + self.pre_augmentation_transform = Resize(min_resize_value, max_resize_value, resize_factor) + self.transform = build_transforms(self, is_train) + if semantic_only: + self.target_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + else: + self.target_transform = PanopticTargetGenerator(self.ignore_label, self.rgb2id, _COCO_PANOPTIC_THING_LIST, + sigma=8, ignore_stuff_in_offset=ignore_stuff_in_offset, + small_instance_area=small_instance_area, + small_instance_weight=small_instance_weight) + # Generates semantic label for evaluation. + self.raw_label_transform = SemanticTargetGenerator(self.ignore_label, self.rgb2id) + + @staticmethod + def train_id_to_eval_id(): + return _COCO_PANOPTIC_TRAIN_ID_TO_EVAL_ID + + @staticmethod + def rgb2id(color): + """Converts the color to panoptic label. + Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. + Args: + color: Ndarray or a tuple, color encoded image. + Returns: + Panoptic label. + """ + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) + + @staticmethod + def create_label_colormap(): + """Creates a label colormap used in COCO panoptic benchmark. + Returns: + A colormap for visualizing segmentation results. + """ + colormap = np.zeros((256, 3), dtype=np.uint8) + for i, color in enumerate(COCO_CATEGORIES): + colormap[i] = color['color'] + return colormap diff --git a/CDARTS_segmentation/segmentation/data/datasets/utils.py b/CDARTS_segmentation/segmentation/data/datasets/utils.py new file mode 100644 index 0000000..3149ad6 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/datasets/utils.py @@ -0,0 +1,18 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/tensorflow/models/blob/master/research/deeplab/datasets/data_generator.py +# ------------------------------------------------------------------------------ + +import collections + +# Named tuple to describe the dataset properties. +DatasetDescriptor = collections.namedtuple( + 'DatasetDescriptor', + [ + 'splits_to_sizes', # Splits of the dataset into training, val and test. + 'num_classes', # Number of semantic classes, including the + # background class (if exists). For example, there + # are 20 foreground classes + 1 background class in + # the PASCAL VOC 2012 dataset. Thus, we set + # num_classes=21. + 'ignore_label', # Ignore label value. + ]) diff --git a/CDARTS_segmentation/segmentation/data/samplers/__init__.py b/CDARTS_segmentation/segmentation/data/samplers/__init__.py new file mode 100644 index 0000000..bd4fd75 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/samplers/__init__.py @@ -0,0 +1 @@ +from .distributed_sampler import TrainingSampler, InferenceSampler diff --git a/CDARTS_segmentation/segmentation/data/samplers/distributed_sampler.py b/CDARTS_segmentation/segmentation/data/samplers/distributed_sampler.py new file mode 100644 index 0000000..865bf0b --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/samplers/distributed_sampler.py @@ -0,0 +1,90 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/samplers/distributed_sampler.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import itertools +import math +from collections import defaultdict +from typing import Optional +import torch +from torch.utils.data.sampler import Sampler + +from segmentation.utils import comm + + +class TrainingSampler(Sampler): + """ + In training, we only care about the "infinite stream" of training data. + So this sampler produces an infinite stream of indices and + all workers cooperate to correctly shuffle the indices and sample different indices. + The samplers in each worker effectively produces `indices[worker_id::num_workers]` + where `indices` is an infinite stream of indices consisting of + `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) + or `range(size) + range(size) + ...` (if shuffle is False) + """ + + def __init__(self, size, shuffle=True, seed=None): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + shuffle (bool): whether to shuffle the indices or not + seed (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + self._size = size + assert size > 0 + self._shuffle = shuffle + if seed is None: + seed = comm.shared_random_seed() + self._seed = int(seed) + + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + def __iter__(self): + start = self._rank + yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) + + def __len__(self): + return self._size + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) + while True: + if self._shuffle: + yield from torch.randperm(self._size, generator=g) + else: + yield from torch.arange(self._size) + + +class InferenceSampler(Sampler): + """ + Produce indices for inference. + Inference needs to run on the __exact__ set of samples, + therefore when the total number of samples is not divisible by the number of workers, + this sampler produces different number of samples on different workers. + """ + + def __init__(self, size): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + """ + self._size = size + assert size > 0 + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + shard_size = (self._size - 1) // self._world_size + 1 + begin = shard_size * self._rank + end = min(shard_size * (self._rank + 1), self._size) + self._local_indices = range(begin, end) + + def __iter__(self): + yield from self._local_indices + + def __len__(self): + return len(self._local_indices) diff --git a/CDARTS_segmentation/segmentation/data/transforms/__init__.py b/CDARTS_segmentation/segmentation/data/transforms/__init__.py new file mode 100644 index 0000000..c37854a --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/transforms/__init__.py @@ -0,0 +1,3 @@ +from .build import build_transforms +from .pre_augmentation_transforms import Resize +from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator diff --git a/CDARTS_segmentation/segmentation/data/transforms/build.py b/CDARTS_segmentation/segmentation/data/transforms/build.py new file mode 100644 index 0000000..08742bc --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/transforms/build.py @@ -0,0 +1,57 @@ +# ------------------------------------------------------------------------------ +# Builds transformation for both image and labels. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from . import transforms as T + + +def build_transforms(dataset, is_train=True): + if is_train: + min_scale = dataset.min_scale + max_scale = dataset.max_scale + scale_step_size = dataset.scale_step_size + crop_h = dataset.crop_h + crop_w = dataset.crop_w + pad_value = dataset.pad_value + ignore_label = dataset.label_pad_value + flip_prob = 0.5 if dataset.mirror else 0 + mean = dataset.mean + std = dataset.std + else: + # no data augmentation + min_scale = 1 + max_scale = 1 + scale_step_size = 0 + flip_prob = 0 + crop_h = dataset.crop_h + crop_w = dataset.crop_w + pad_value = dataset.pad_value + ignore_label = dataset.label_pad_value + mean = dataset.mean + std = dataset.std + + transforms = T.Compose( + [ + T.RandomScale( + min_scale, + max_scale, + scale_step_size + ), + T.RandomCrop( + crop_h, + crop_w, + pad_value, + ignore_label, + random_pad=is_train + ), + T.RandomHorizontalFlip(flip_prob), + T.ToTensor(), + T.Normalize( + mean, + std + ) + ] + ) + + return transforms diff --git a/CDARTS_segmentation/segmentation/data/transforms/pre_augmentation_transforms.py b/CDARTS_segmentation/segmentation/data/transforms/pre_augmentation_transforms.py new file mode 100644 index 0000000..b4d9864 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/transforms/pre_augmentation_transforms.py @@ -0,0 +1,92 @@ +# ------------------------------------------------------------------------------ +# Builds transformation before data augmentation. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import warnings + +import cv2 +import math +import numpy as np + + +class Resize(object): + """ + Applies random scale augmentation. + Reference: https://github.com/tensorflow/models/blob/master/research/deeplab/input_preprocess.py#L28 + Arguments: + min_resize_value: Desired size of the smaller image side, no resize if set to None + max_resize_value: Maximum allowed size of the larger image side, no limit if set to None + resize_factor: Resized dimensions are multiple of factor plus one. + keep_aspect_ratio: Boolean, keep aspect ratio or not. If True, the input + will be resized while keeping the original aspect ratio. If False, the + input will be resized to [max_resize_value, max_resize_value] without + keeping the original aspect ratio. + align_corners: If True, exactly align all 4 corners of input and output. + """ + def __init__(self, min_resize_value=None, max_resize_value=None, resize_factor=None, + keep_aspect_ratio=True, align_corners=False): + if min_resize_value is not None and min_resize_value < 0: + min_resize_value = None + if max_resize_value is not None and max_resize_value < 0: + max_resize_value = None + if resize_factor is not None and resize_factor < 0: + resize_factor = None + self.min_resize_value = min_resize_value + self.max_resize_value = max_resize_value + self.resize_factor = resize_factor + self.keep_aspect_ratio = keep_aspect_ratio + self.align_corners = align_corners + + if self.align_corners: + warnings.warn('`align_corners = True` is not supported by opencv.') + + if self.max_resize_value is not None: + # Modify the max_size to be a multiple of factor plus 1 and make sure the max dimension after resizing + # is no larger than max_size. + if self.resize_factor is not None: + self.max_resize_value = (self.max_resize_value - (self.max_resize_value - 1) % self.resize_factor) + + def __call__(self, image, label): + if self.min_resize_value is None: + return image, label + [orig_height, orig_width, _] = image.shape + orig_min_size = np.minimum(orig_height, orig_width) + + # Calculate the larger of the possible sizes + large_scale_factor = self.min_resize_value / orig_min_size + large_height = int(math.floor(orig_height * large_scale_factor)) + large_width = int(math.floor(orig_width * large_scale_factor)) + large_size = np.array([large_height, large_width]) + + new_size = large_size + if self.max_resize_value is not None: + # Calculate the smaller of the possible sizes, use that if the larger is too big. + orig_max_size = np.maximum(orig_height, orig_width) + small_scale_factor = self.max_resize_value / orig_max_size + small_height = int(math.floor(orig_height * small_scale_factor)) + small_width = int(math.floor(orig_width * small_scale_factor)) + small_size = np.array([small_height, small_width]) + + if np.max(large_size) > self.max_resize_value: + new_size = small_size + + # Ensure that both output sides are multiples of factor plus one. + if self.resize_factor is not None: + new_size += (self.resize_factor - (new_size - 1) % self.resize_factor) % self.resize_factor + # If new_size exceeds largest allowed size + new_size[new_size > self.max_resize_value] -= self.resize_factor + + if not self.keep_aspect_ratio: + # If not keep the aspect ratio, we resize everything to max_size, allowing + # us to do pre-processing without extra padding. + new_size = [np.max(new_size), np.max(new_size)] + + # TODO: cv2 uses align_corner=False + # TODO: use fvcore (https://github.com/facebookresearch/fvcore/blob/master/fvcore/transforms/transform.py#L377) + image_dtype = image.dtype + label_dtype = label.dtype + # cv2: (width, height) + image = cv2.resize(image.astype(np.float), (new_size[1], new_size[0]), interpolation=cv2.INTER_LINEAR) + label = cv2.resize(label.astype(np.float), (new_size[1], new_size[0]), interpolation=cv2.INTER_NEAREST) + return image.astype(image_dtype), label.astype(label_dtype) diff --git a/CDARTS_segmentation/segmentation/data/transforms/target_transforms.py b/CDARTS_segmentation/segmentation/data/transforms/target_transforms.py new file mode 100644 index 0000000..47a3f39 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/transforms/target_transforms.py @@ -0,0 +1,200 @@ +# ------------------------------------------------------------------------------ +# Generates targets for Panoptic-DeepLab. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import numpy as np + +import torch + + +class PanopticTargetGenerator(object): + """ + Generates panoptic training target for Panoptic-DeepLab. + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_label: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + thing_list: List, a list of thing classes + sigma: the sigma for Gaussian kernel. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in semantic segmentation branch, + crowd region is ignored in the original TensorFlow implementation. + """ + def __init__(self, ignore_label, rgb2id, thing_list, sigma=8, ignore_stuff_in_offset=False, + small_instance_area=0, small_instance_weight=1, ignore_crowd_in_semantic=False): + self.ignore_label = ignore_label + self.rgb2id = rgb2id + self.thing_list = thing_list + self.ignore_stuff_in_offset = ignore_stuff_in_offset + self.small_instance_area = small_instance_area + self.small_instance_weight = small_instance_weight + self.ignore_crowd_in_semantic = ignore_crowd_in_semantic + + self.sigma = sigma + size = 6 * sigma + 3 + x = np.arange(0, size, 1, float) + y = x[:, np.newaxis] + x0, y0 = 3 * sigma + 1, 3 * sigma + 1 + self.g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2)) + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + - foreground: Tensor, foreground mask label, shape=(H, W). + - center: Tensor, center heatmap, shape=(1, H, W). + - center_points: List, center coordinates, with tuple (y-coord, x-coord). + - offset: Tensor, offset, shape=(2, H, W), first dim is (offset_y, offset_x). + - semantic_weights: Tensor, loss weight for semantic prediction, shape=(H, W). + - center_weights: Tensor, ignore region of center prediction, shape=(H, W), used as weights for center + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + - offset_weights: Tensor, ignore region of offset prediction, shape=(H, W), used as weights for offset + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + """ + panoptic = self.rgb2id(panoptic) + height, width = panoptic.shape[0], panoptic.shape[1] + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label + foreground = np.zeros_like(panoptic, dtype=np.uint8) + center = np.zeros((1, height, width), dtype=np.float32) + center_pts = [] + offset = np.zeros((2, height, width), dtype=np.float32) + y_coord = np.ones_like(panoptic, dtype=np.float32) + x_coord = np.ones_like(panoptic, dtype=np.float32) + y_coord = np.cumsum(y_coord, axis=0) - 1 + x_coord = np.cumsum(x_coord, axis=1) - 1 + # Generate pixel-wise loss weights + semantic_weights = np.ones_like(panoptic, dtype=np.uint8) + # 0: ignore, 1: has instance + # three conditions for a region to be ignored for instance branches: + # (1) It is labeled as `ignore_label` + # (2) It is crowd region (iscrowd=1) + # (3) (Optional) It is stuff region (for offset branch) + center_weights = np.zeros_like(panoptic, dtype=np.uint8) + offset_weights = np.zeros_like(panoptic, dtype=np.uint8) + for seg in segments: + cat_id = seg["category_id"] + if self.ignore_crowd_in_semantic: + if not seg['iscrowd']: + semantic[panoptic == seg["id"]] = cat_id + else: + semantic[panoptic == seg["id"]] = cat_id + if cat_id in self.thing_list: + foreground[panoptic == seg["id"]] = 1 + if not seg['iscrowd']: + # Ignored regions are not in `segments`. + # Handle crowd region. + center_weights[panoptic == seg["id"]] = 1 + if self.ignore_stuff_in_offset: + # Handle stuff region. + if cat_id in self.thing_list: + offset_weights[panoptic == seg["id"]] = 1 + else: + offset_weights[panoptic == seg["id"]] = 1 + if cat_id in self.thing_list: + # find instance center + mask_index = np.where(panoptic == seg["id"]) + if len(mask_index[0]) == 0: + # the instance is completely cropped + continue + + # Find instance area + ins_area = len(mask_index[0]) + if ins_area < self.small_instance_area: + semantic_weights[panoptic == seg["id"]] = self.small_instance_weight + + center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1]) + center_pts.append([center_y, center_x]) + + # generate center heatmap + y, x = int(center_y), int(center_x) + # outside image boundary + if x < 0 or y < 0 or \ + x >= width or y >= height: + continue + sigma = self.sigma + # upper left + ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1)) + # bottom right + br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2)) + + c, d = max(0, -ul[0]), min(br[0], width) - ul[0] + a, b = max(0, -ul[1]), min(br[1], height) - ul[1] + + cc, dd = max(0, ul[0]), min(br[0], width) + aa, bb = max(0, ul[1]), min(br[1], height) + center[0, aa:bb, cc:dd] = np.maximum( + center[0, aa:bb, cc:dd], self.g[a:b, c:d]) + + # generate offset (2, h, w) -> (y-dir, x-dir) + offset_y_index = (np.zeros_like(mask_index[0]), mask_index[0], mask_index[1]) + offset_x_index = (np.ones_like(mask_index[0]), mask_index[0], mask_index[1]) + offset[offset_y_index] = center_y - y_coord[mask_index] + offset[offset_x_index] = center_x - x_coord[mask_index] + + return dict( + semantic=torch.as_tensor(semantic.astype('long')), + foreground=torch.as_tensor(foreground.astype('long')), + center=torch.as_tensor(center.astype(np.float32)), + center_points=center_pts, + offset=torch.as_tensor(offset.astype(np.float32)), + semantic_weights=torch.as_tensor(semantic_weights.astype(np.float32)), + center_weights=torch.as_tensor(center_weights.astype(np.float32)), + offset_weights=torch.as_tensor(offset_weights.astype(np.float32)) + ) + + +class SemanticTargetGenerator(object): + """ + Generates semantic training target only for Panoptic-DeepLab (no instance). + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_label: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + thing_list: List, a list of thing classes + sigma: the sigma for Gaussian kernel. + """ + def __init__(self, ignore_label, rgb2id): + self.ignore_label = ignore_label + self.rgb2id = rgb2id + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + """ + panoptic = self.rgb2id(panoptic) + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label + for seg in segments: + cat_id = seg["category_id"] + semantic[panoptic == seg["id"]] = cat_id + + return dict( + semantic=torch.as_tensor(semantic.astype('long')) + ) diff --git a/CDARTS_segmentation/segmentation/data/transforms/transforms.py b/CDARTS_segmentation/segmentation/data/transforms/transforms.py new file mode 100644 index 0000000..a6404a6 --- /dev/null +++ b/CDARTS_segmentation/segmentation/data/transforms/transforms.py @@ -0,0 +1,172 @@ +# ------------------------------------------------------------------------------ +# Data augmentation following DeepLab +# (https://github.com/tensorflow/models/blob/master/research/deeplab/input_preprocess.py#L28). +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import random + +import cv2 +import numpy as np +from torchvision.transforms import functional as F + + +class Compose(object): + """ + Composes a sequence of transforms. + Arguments: + transforms: A list of transforms. + """ + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, label): + for t in self.transforms: + image, label = t(image, label) + return image, label + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string + + +class ToTensor(object): + """ + Converts image to torch Tensor. + """ + def __call__(self, image, label): + return F.to_tensor(image), label + + +class Normalize(object): + """ + Normalizes image by mean and std. + """ + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, label): + image = F.normalize(image, mean=self.mean, std=self.std) + return image, label + + +class RandomScale(object): + """ + Applies random scale augmentation. + Arguments: + min_scale: Minimum scale value. + max_scale: Maximum scale value. + scale_step_size: The step size from minimum to maximum value. + """ + def __init__(self, min_scale, max_scale, scale_step_size): + self.min_scale = min_scale + self.max_scale = max_scale + self.scale_step_size = scale_step_size + + @staticmethod + def get_random_scale(min_scale_factor, max_scale_factor, step_size): + """Gets a random scale value. + Args: + min_scale_factor: Minimum scale value. + max_scale_factor: Maximum scale value. + step_size: The step size from minimum to maximum value. + Returns: + A random scale value selected between minimum and maximum value. + Raises: + ValueError: min_scale_factor has unexpected value. + """ + if min_scale_factor < 0 or min_scale_factor > max_scale_factor: + raise ValueError('Unexpected value of min_scale_factor.') + + if min_scale_factor == max_scale_factor: + return min_scale_factor + + # When step_size = 0, we sample the value uniformly from [min, max). + if step_size == 0: + return random.uniform(min_scale_factor, max_scale_factor) + + # When step_size != 0, we randomly select one discrete value from [min, max]. + num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1) + scale_factors = np.linspace(min_scale_factor, max_scale_factor, num_steps) + np.random.shuffle(scale_factors) + return scale_factors[0] + + def __call__(self, image, label): + f_scale = self.get_random_scale(self.min_scale, self.max_scale, self.scale_step_size) + # TODO: cv2 uses align_corner=False + # TODO: use fvcore (https://github.com/facebookresearch/fvcore/blob/master/fvcore/transforms/transform.py#L377) + image_dtype = image.dtype + label_dtype = label.dtype + image = cv2.resize(image.astype(np.float), None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR) + label = cv2.resize(label.astype(np.float), None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST) + return image.astype(image_dtype), label.astype(label_dtype) + + +class RandomCrop(object): + """ + Applies random crop augmentation. + Arguments: + crop_h: Integer, crop height size. + crop_w: Integer, crop width size. + pad_value: Tuple, pad value for image, length 3. + ignore_label: Tuple, pad value for label, length could be 1 (semantic) or 3 (panoptic). + random_pad: Bool, when crop size larger than image size, whether to randomly pad four boundaries, + or put image to top-left and only pad bottom and right boundaries. + """ + def __init__(self, crop_h, crop_w, pad_value, ignore_label, random_pad): + self.crop_h = crop_h + self.crop_w = crop_w + self.pad_value = pad_value + self.ignore_label = ignore_label + self.random_pad = random_pad + + def __call__(self, image, label): + img_h, img_w = image.shape[0], image.shape[1] + # save dtype + image_dtype = image.dtype + label_dtype = label.dtype + # padding + pad_h = max(self.crop_h - img_h, 0) + pad_w = max(self.crop_w - img_w, 0) + if pad_h > 0 or pad_w > 0: + if self.random_pad: + pad_top = random.randint(0, pad_h) + pad_bottom = pad_h - pad_top + pad_left = random.randint(0, pad_w) + pad_right = pad_w - pad_left + else: + pad_top, pad_bottom, pad_left, pad_right = 0, pad_h, 0, pad_w + img_pad = cv2.copyMakeBorder(image, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, + value=self.pad_value) + label_pad = cv2.copyMakeBorder(label, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, + value=self.ignore_label) + else: + img_pad, label_pad = image, label + img_h, img_w = img_pad.shape[0], img_pad.shape[1] + h_off = random.randint(0, img_h - self.crop_h) + w_off = random.randint(0, img_w - self.crop_w) + image = np.asarray(img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.float32) + label = np.asarray(label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.float32) + return image.astype(image_dtype), label.astype(label_dtype) + + +class RandomHorizontalFlip(object): + """ + Applies random flip augmentation. + Arguments: + prob: Probability of flip. + """ + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, image, label): + if random.random() < self.prob: + # https://discuss.pytorch.org/t/torch-from-numpy-not-support-negative-strides/3663 + image = image[:, ::-1].copy() + label = label[:, ::-1].copy() + return image, label diff --git a/CDARTS_segmentation/segmentation/evaluation/__init__.py b/CDARTS_segmentation/segmentation/evaluation/__init__.py new file mode 100644 index 0000000..a7d97b4 --- /dev/null +++ b/CDARTS_segmentation/segmentation/evaluation/__init__.py @@ -0,0 +1,5 @@ +from .semantic import SemanticEvaluator +from .instance import CityscapesInstanceEvaluator +from .panoptic import CityscapesPanopticEvaluator +from .coco_instance import COCOInstanceEvaluator +from .coco_panoptic import COCOPanopticEvaluator diff --git a/CDARTS_segmentation/segmentation/evaluation/coco_instance.py b/CDARTS_segmentation/segmentation/evaluation/coco_instance.py new file mode 100644 index 0000000..23d2944 --- /dev/null +++ b/CDARTS_segmentation/segmentation/evaluation/coco_instance.py @@ -0,0 +1,107 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/coco_evaluation.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import logging +from collections import OrderedDict +import os +import glob +import copy +import json + +import numpy as np + +from fvcore.common.file_io import PathManager +import pycocotools.mask as mask_util + + +class COCOInstanceEvaluator: + """ + Evaluate COCO instance segmentation + """ + def __init__(self, output_dir=None, train_id_to_eval_id=None, + gt_dir='./datasets/coco/annotations/instances_val2017.json'): + """ + Args: + output_dir (str): an output directory to dump results. + train_id_to_eval_id (list): maps training id to evaluation id. + gt_dir (str): path to ground truth annotations. + """ + if output_dir is None: + raise ValueError('Must provide a output directory.') + self._output_dir = output_dir + if self._output_dir: + PathManager.mkdirs(self._output_dir) + self._train_id_to_eval_id = train_id_to_eval_id + + self._predictions = [] + self._predictions_json = os.path.join(output_dir, 'predictions.json') + + self._logger = logging.getLogger(__name__) + + self._gt_dir = gt_dir + + def update(self, instances, image_filename=None): + if image_filename is None: + raise ValueError('Need to provide image_filename.') + num_instances = len(instances) + + for i in range(num_instances): + pred_class = instances[i]['pred_class'] + if self._train_id_to_eval_id is not None: + pred_class = self._train_id_to_eval_id[pred_class] + image_id = int(os.path.basename(image_filename).split('.')[0]) + score = instances[i]['score'] + mask = instances[i]['pred_mask'].astype("uint8") + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the pycocotools/_mask.pyx does). + mask_rle["counts"] = mask_rle["counts"].decode("utf-8") + + self._predictions.append( + { + 'image_id': image_id, + 'category_id': pred_class, + 'segmentation': mask_rle, + 'score': float(score) + } + ) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP" and "AP50". + """ + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + if self._gt_dir is None: + raise ValueError('Must provide coco gt path for evaluation.') + + self._logger.info("Evaluating results under {} ...".format(self._output_dir)) + + coco_gt = COCO(self._gt_dir) + + coco_results = copy.deepcopy(self._predictions) + # When evaluating mask AP, if the results contain bbox, cocoapi will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in coco_results: + c.pop("bbox", None) + + with PathManager.open(self._predictions_json, "w") as f: + f.write(json.dumps(coco_results)) + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = COCOeval(coco_gt, coco_dt, 'segm') + + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + return coco_eval.stats diff --git a/CDARTS_segmentation/segmentation/evaluation/coco_panoptic.py b/CDARTS_segmentation/segmentation/evaluation/coco_panoptic.py new file mode 100644 index 0000000..30c77f6 --- /dev/null +++ b/CDARTS_segmentation/segmentation/evaluation/coco_panoptic.py @@ -0,0 +1,137 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/panoptic_evaluation.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import logging +from collections import OrderedDict +import os +import json + +import numpy as np +from tabulate import tabulate + +from fvcore.common.file_io import PathManager +from segmentation.utils import save_annotation + +logger = logging.getLogger(__name__) + + +class COCOPanopticEvaluator: + """ + Evaluate panoptic segmentation + """ + def __init__(self, output_dir=None, train_id_to_eval_id=None, label_divisor=256, void_label=65280, + gt_dir='./datasets/coco', split='val2017', num_classes=133): + """ + Args: + corresponding pixels should be ignored. + output_dir (str): an output directory to dump results. + train_id_to_eval_id (list): maps training id to evaluation id. + label_divisor (int): + void_label (int): + gt_dir (str): path to ground truth annotations. + split (str): evaluation split. + num_classes (int): number of classes. + """ + if output_dir is None: + raise ValueError('Must provide a output directory.') + self._output_dir = output_dir + if self._output_dir: + PathManager.mkdirs(self._output_dir) + self._panoptic_dir = os.path.join(self._output_dir, 'predictions') + if self._panoptic_dir: + PathManager.mkdirs(self._panoptic_dir) + + self._predictions = [] + self._predictions_json = os.path.join(output_dir, 'predictions.json') + + self._train_id_to_eval_id = train_id_to_eval_id + self._label_divisor = label_divisor + self._void_label = void_label + self._num_classes = num_classes + + self._logger = logging.getLogger(__name__) + + self._gt_json_file = os.path.join(gt_dir, 'annotations', 'panoptic_{}.json'.format(split)) + self._gt_folder = os.path.join(gt_dir, 'annotations', 'panoptic_{}'.format(split)) + self._pred_json_file = os.path.join(output_dir, 'predictions.json') + self._pred_folder = self._panoptic_dir + + def update(self, panoptic, image_filename=None, image_id=None): + from panopticapi.utils import id2rgb + + if image_filename is None: + raise ValueError('Need to provide image_filename.') + if image_id is None: + raise ValueError('Need to provide image_id.') + + # Change void region. + panoptic[panoptic == self._void_label] = 0 + + segments_info = [] + for pan_lab in np.unique(panoptic): + pred_class = pan_lab // self._label_divisor + if self._train_id_to_eval_id is not None: + pred_class = self._train_id_to_eval_id[pred_class] + + segments_info.append( + { + 'id': int(pan_lab), + 'category_id': int(pred_class), + } + ) + + save_annotation(id2rgb(panoptic), self._panoptic_dir, image_filename, add_colormap=False) + self._predictions.append( + { + 'image_id': int(image_id), + 'file_name': image_filename + '.png', + 'segments_info': segments_info, + } + ) + + def evaluate(self): + from panopticapi.evaluation import pq_compute + + gt_json_file = self._gt_json_file + gt_folder = self._gt_folder + pred_json_file = self._pred_json_file + pred_folder = self._pred_folder + + with open(gt_json_file, "r") as f: + json_data = json.load(f) + json_data["annotations"] = self._predictions + with PathManager.open(self._predictions_json, "w") as f: + f.write(json.dumps(json_data)) + + pq_res = pq_compute(gt_json_file, pred_json_file, gt_folder, pred_folder) + + res = {} + res["PQ"] = 100 * pq_res["All"]["pq"] + res["SQ"] = 100 * pq_res["All"]["sq"] + res["RQ"] = 100 * pq_res["All"]["rq"] + res["PQ_th"] = 100 * pq_res["Things"]["pq"] + res["SQ_th"] = 100 * pq_res["Things"]["sq"] + res["RQ_th"] = 100 * pq_res["Things"]["rq"] + res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] + res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] + res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] + + results = OrderedDict({"panoptic_seg": res}) + self._logger.info(results) + _print_panoptic_results(pq_res) + + return results + + +def _print_panoptic_results(pq_res): + headers = ["", "PQ", "SQ", "RQ", "#categories"] + data = [] + for name in ["All", "Things", "Stuff"]: + row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] + data.append(row) + table = tabulate( + data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" + ) + logger.info("Panoptic Evaluation Results:\n" + table) diff --git a/CDARTS_segmentation/segmentation/evaluation/instance.py b/CDARTS_segmentation/segmentation/evaluation/instance.py new file mode 100644 index 0000000..50a9320 --- /dev/null +++ b/CDARTS_segmentation/segmentation/evaluation/instance.py @@ -0,0 +1,97 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/cityscapes_evaluation.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import logging +from collections import OrderedDict +import os +import glob + +from fvcore.common.file_io import PathManager +from segmentation.utils import save_annotation + + +class CityscapesInstanceEvaluator: + """ + Evaluate Cityscapes instance segmentation + """ + def __init__(self, output_dir=None, train_id_to_eval_id=None, gt_dir='./datasets/cityscapes/gtFine/val'): + """ + Args: + output_dir (str): an output directory to dump results. + train_id_to_eval_id (list): maps training id to evaluation id. + gt_dir (str): path to ground truth annotations (gtFine). + """ + if output_dir is None: + raise ValueError('Must provide a output directory.') + self._output_dir = output_dir + if self._output_dir: + PathManager.mkdirs(self._output_dir) + self._mask_dir = os.path.join(self._output_dir, 'mask') + if self._mask_dir: + PathManager.mkdirs(self._mask_dir) + self._train_id_to_eval_id = train_id_to_eval_id + + self._logger = logging.getLogger(__name__) + + self._gt_dir = gt_dir + + def update(self, instances, image_filename=None): + pred_txt = os.path.join(self._output_dir, image_filename + "_pred.txt") + num_instances = len(instances) + + with open(pred_txt, "w") as fout: + for i in range(num_instances): + pred_class = instances[i]['pred_class'] + if self._train_id_to_eval_id is not None: + pred_class = self._train_id_to_eval_id[pred_class] + + score = instances[i]['score'] + mask = instances[i]['pred_mask'].astype("uint8") + png_filename = os.path.join( + self._mask_dir, image_filename + "_{}_{}.png".format(i, pred_class) + ) + + save_annotation(mask, self._mask_dir, image_filename + "_{}_{}".format(i, pred_class), + add_colormap=False, scale_values=True) + fout.write("{} {} {}\n".format(os.path.join('mask', os.path.basename(png_filename)), pred_class, score)) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP" and "AP50". + """ + import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval + + if self._gt_dir is None: + raise ValueError('Must provide cityscapes path for evaluation.') + + self._logger.info("Evaluating results under {} ...".format(self._output_dir)) + + # set some global states in cityscapes evaluation API, before evaluating + cityscapes_eval.args.predictionPath = os.path.abspath(self._output_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + cityscapes_eval.args.gtInstancesFile = os.path.join(self._output_dir, "gtInstances.json") + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa + gt_dir = PathManager.get_local_path(self._gt_dir) + groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png")) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + )["averages"] + + ret = OrderedDict() + ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} + return ret diff --git a/CDARTS_segmentation/segmentation/evaluation/panoptic.py b/CDARTS_segmentation/segmentation/evaluation/panoptic.py new file mode 100644 index 0000000..5c4702c --- /dev/null +++ b/CDARTS_segmentation/segmentation/evaluation/panoptic.py @@ -0,0 +1,127 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/panoptic_evaluation.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import contextlib +import io +import logging +from collections import OrderedDict +import os +import json + +import numpy as np + +from fvcore.common.file_io import PathManager +from segmentation.utils import save_annotation + + +class CityscapesPanopticEvaluator: + """ + Evaluate panoptic segmentation + """ + def __init__(self, output_dir=None, train_id_to_eval_id=None, label_divisor=1000, void_label=255000, + gt_dir='./datasets/cityscapes', split='val', num_classes=19): + """ + Args: + corresponding pixels should be ignored. + output_dir (str): an output directory to dump results. + train_id_to_eval_id (list): maps training id to evaluation id. + label_divisor (int): + void_label (int): + gt_dir (str): path to ground truth annotations. + split (str): evaluation split. + num_classes (int): number of classes. + """ + if output_dir is None: + raise ValueError('Must provide a output directory.') + self._output_dir = output_dir + if self._output_dir: + PathManager.mkdirs(self._output_dir) + self._panoptic_dir = os.path.join(self._output_dir, 'predictions') + if self._panoptic_dir: + PathManager.mkdirs(self._panoptic_dir) + + self._predictions = [] + self._predictions_json = os.path.join(output_dir, 'predictions.json') + + self._train_id_to_eval_id = train_id_to_eval_id + self._label_divisor = label_divisor + self._void_label = void_label + self._num_classes = num_classes + + self._logger = logging.getLogger(__name__) + + self._gt_json_file = os.path.join(gt_dir, 'gtFine', 'cityscapes_panoptic_{}.json'.format(split)) + self._gt_folder = os.path.join(gt_dir, 'gtFine', 'cityscapes_panoptic_{}'.format(split)) + self._pred_json_file = os.path.join(output_dir, 'predictions.json') + self._pred_folder = self._panoptic_dir + self._resultsFile = os.path.join(output_dir, 'resultPanopticSemanticLabeling.json') + + @staticmethod + def id2rgb(id_map): + if isinstance(id_map, np.ndarray): + id_map_copy = id_map.copy() + rgb_shape = tuple(list(id_map.shape) + [3]) + rgb_map = np.zeros(rgb_shape, dtype=np.uint8) + for i in range(3): + rgb_map[..., i] = id_map_copy % 256 + id_map_copy //= 256 + return rgb_map + color = [] + for _ in range(3): + color.append(id_map % 256) + id_map //= 256 + return color + + def update(self, panoptic, image_filename=None, image_id=None): + if image_filename is None: + raise ValueError('Need to provide image_filename.') + if image_id is None: + raise ValueError('Need to provide image_id.') + + # Change void region. + panoptic[panoptic == self._void_label] = 0 + + segments_info = [] + for pan_lab in np.unique(panoptic): + pred_class = pan_lab // self._label_divisor + if self._train_id_to_eval_id is not None: + pred_class = self._train_id_to_eval_id[pred_class] + + segments_info.append( + { + 'id': int(pan_lab), + 'category_id': int(pred_class), + } + ) + + save_annotation(self.id2rgb(panoptic), self._panoptic_dir, image_filename, add_colormap=False) + self._predictions.append( + { + 'image_id': image_id, + 'file_name': image_filename + '.png', + 'segments_info': segments_info, + } + ) + + def evaluate(self): + import cityscapesscripts.evaluation.evalPanopticSemanticLabeling as cityscapes_eval + + gt_json_file = self._gt_json_file + gt_folder = self._gt_folder + pred_json_file = self._pred_json_file + pred_folder = self._pred_folder + resultsFile = self._resultsFile + + with open(gt_json_file, "r") as f: + json_data = json.load(f) + json_data["annotations"] = self._predictions + with PathManager.open(self._predictions_json, "w") as f: + f.write(json.dumps(json_data)) + + with contextlib.redirect_stdout(io.StringIO()): + results = cityscapes_eval.evaluatePanoptic(gt_json_file, gt_folder, pred_json_file, pred_folder, resultsFile) + + self._logger.info(results) + return results diff --git a/CDARTS_segmentation/segmentation/evaluation/semantic.py b/CDARTS_segmentation/segmentation/evaluation/semantic.py new file mode 100644 index 0000000..f4ee468 --- /dev/null +++ b/CDARTS_segmentation/segmentation/evaluation/semantic.py @@ -0,0 +1,106 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/sem_seg_evaluation.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import logging +from collections import OrderedDict + +import numpy as np + +from fvcore.common.file_io import PathManager +from segmentation.utils import save_annotation + + +class SemanticEvaluator: + """ + Evaluate semantic segmentation + """ + def __init__(self, num_classes, ignore_label=255, output_dir=None, train_id_to_eval_id=None): + """ + Args: + num_classes (int): number of classes + ignore_label (int): value in semantic segmentation ground truth. Predictions for the + corresponding pixels should be ignored. + output_dir (str): an output directory to dump results. + train_id_to_eval_id (list): maps training id to evaluation id. + """ + self._output_dir = output_dir + if self._output_dir: + PathManager.mkdirs(self._output_dir) + self._num_classes = num_classes + self._ignore_label = ignore_label + self._N = num_classes + 1 # store ignore label in the last class + self._train_id_to_eval_id = train_id_to_eval_id + + self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) + self._logger = logging.getLogger(__name__) + + @staticmethod + def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id): + """Converts the predicted label for evaluation. + There are cases where the training labels are not equal to the evaluation + labels. This function is used to perform the conversion so that we could + evaluate the results on the evaluation server. + Args: + prediction: Semantic segmentation prediction. + train_id_to_eval_id (list): maps training id to evaluation id. + Returns: + Semantic segmentation prediction whose labels have been changed. + """ + converted_prediction = prediction.copy() + for train_id, eval_id in enumerate(train_id_to_eval_id): + converted_prediction[prediction == train_id] = eval_id + + return converted_prediction + + def update(self, pred, gt, image_filename=None): + pred = pred.astype(np.int) + gt = gt.astype(np.int) + gt[gt == self._ignore_label] = self._num_classes + + self._conf_matrix += np.bincount( + self._N * pred.reshape(-1) + gt.reshape(-1), minlength=self._N ** 2 + ).reshape(self._N, self._N) + + if self._output_dir: + if self._train_id_to_eval_id is not None: + pred = self._convert_train_id_to_eval_id(pred, self._train_id_to_eval_id) + if image_filename is None: + raise ValueError('Need to provide filename to save.') + save_annotation( + pred, self._output_dir, image_filename, add_colormap=False) + + def evaluate(self): + """ + Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): + * Mean intersection-over-union averaged across classes (mIoU) + * Frequency Weighted IoU (fwIoU) + * Mean pixel accuracy averaged across classes (mACC) + * Pixel Accuracy (pACC) + """ + acc = np.zeros(self._num_classes, dtype=np.float) + iou = np.zeros(self._num_classes, dtype=np.float) + tp = self._conf_matrix.diagonal()[:-1].astype(np.float) + pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) + class_weights = pos_gt / np.sum(pos_gt) + pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) + acc_valid = pos_gt > 0 + acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] + iou_valid = (pos_gt + pos_pred) > 0 + union = pos_gt + pos_pred - tp + iou[acc_valid] = tp[acc_valid] / union[acc_valid] + macc = np.sum(acc) / np.sum(acc_valid) + miou = np.sum(iou) / np.sum(iou_valid) + fiou = np.sum(iou * class_weights) + pacc = np.sum(tp) / np.sum(pos_gt) + + res = {} + res["mIoU"] = 100 * miou + res["fwIoU"] = 100 * fiou + res["mACC"] = 100 * macc + res["pACC"] = 100 * pacc + + results = OrderedDict({"sem_seg": res}) + self._logger.info(results) + return results diff --git a/CDARTS_segmentation/segmentation/model/__init__.py b/CDARTS_segmentation/segmentation/model/__init__.py new file mode 100644 index 0000000..163bf5e --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/__init__.py @@ -0,0 +1 @@ +from .build import build_segmentation_model_from_cfg diff --git a/CDARTS_segmentation/segmentation/model/backbone/__init__.py b/CDARTS_segmentation/segmentation/model/backbone/__init__.py new file mode 100644 index 0000000..b834e03 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/backbone/__init__.py @@ -0,0 +1,5 @@ +from .resnet import * +from .mobilenet import * +from .mnasnet import * +from .hrnet import * +from .xception import * diff --git a/CDARTS_segmentation/segmentation/model/backbone/hrnet.py b/CDARTS_segmentation/segmentation/model/backbone/hrnet.py new file mode 100644 index 0000000..7211037 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/backbone/hrnet.py @@ -0,0 +1,526 @@ +""" +MIT License + +Copyright (c) 2019 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import os +import logging +import torch.nn as nn +import torch.nn.functional as F +from torchvision.models.utils import load_state_dict_from_url + +logger = logging.getLogger('hrnet_backbone') + +__all__ = ['hrnet18', 'hrnet32', 'hrnet48'] + + +model_urls = { + # all the checkpoints come from https://github.com/HRNet/HRNet-Image-Classification + 'hrnet18': 'https://opr0mq.dm.files.1drv.com/y4mIoWpP2n-LUohHHANpC0jrOixm1FZgO2OsUtP2DwIozH5RsoYVyv_De5wDgR6XuQmirMV3C0AljLeB-zQXevfLlnQpcNeJlT9Q8LwNYDwh3TsECkMTWXCUn3vDGJWpCxQcQWKONr5VQWO1hLEKPeJbbSZ6tgbWwJHgHF7592HY7ilmGe39o5BhHz7P9QqMYLBts6V7QGoaKrr0PL3wvvR4w', + 'hrnet32': 'https://opr74a.dm.files.1drv.com/y4mKOuRSNGQQlp6wm_a9bF-UEQwp6a10xFCLhm4bqjDu6aSNW9yhDRM7qyx0vK0WTh42gEaniUVm3h7pg0H-W0yJff5qQtoAX7Zze4vOsqjoIthp-FW3nlfMD0-gcJi8IiVrMWqVOw2N3MbCud6uQQrTaEAvAdNjtjMpym1JghN-F060rSQKmgtq5R-wJe185IyW4-_c5_ItbhYpCyLxdqdEQ', + 'hrnet48': 'https://optgaw.dm.files.1drv.com/y4mWNpya38VArcDInoPaL7GfPMgcop92G6YRkabO1QTSWkCbo7djk8BFZ6LK_KHHIYE8wqeSAChU58NVFOZEvqFaoz392OgcyBrq_f8XGkusQep_oQsuQ7DPQCUrdLwyze_NlsyDGWot0L9agkQ-M_SfNr10ETlCF5R7BdKDZdupmcMXZc-IE3Ysw1bVHdOH4l-XEbEKFAi6ivPUbeqlYkRMQ' +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, + num_channels, fuse_method, multi_scale_output=True, norm_layer=None): + super(HighResolutionModule, self).__init__() + self._check_branches( + num_branches, blocks, num_blocks, num_inchannels, num_channels) + + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self.norm_layer = norm_layer + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches( + num_branches, blocks, num_blocks, num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=True) + + def _check_branches(self, num_branches, blocks, num_blocks, + num_inchannels, num_channels): + if num_branches != len(num_blocks): + error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( + num_branches, len(num_blocks)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( + num_branches, len(num_channels)) + logger.error(error_msg) + raise ValueError(error_msg) + + if num_branches != len(num_inchannels): + error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( + num_branches, len(num_inchannels)) + logger.error(error_msg) + raise ValueError(error_msg) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, + stride=1): + downsample = None + if stride != 1 or \ + self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.num_inchannels[branch_index], + num_channels[branch_index] * block.expansion, + kernel_size=1, stride=stride, bias=False), + self.norm_layer(num_channels[branch_index] * block.expansion), + ) + + layers = [] + layers.append(block(self.num_inchannels[branch_index], + num_channels[branch_index], stride, downsample, norm_layer=self.norm_layer)) + self.num_inchannels[branch_index] = \ + num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], + num_channels[branch_index], norm_layer=self.norm_layer)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = [] + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_inchannels[i], + 1, + 1, + 0, + bias=False), + self.norm_layer(num_inchannels[i]))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = [] + for k in range(i-j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_outchannels_conv3x3, + 3, 2, 1, bias=False), + self.norm_layer(num_outchannels_conv3x3))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], + num_outchannels_conv3x3, + 3, 2, 1, bias=False), + self.norm_layer(num_outchannels_conv3x3), + nn.ReLU(inplace=True))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + elif j > i: + width_output = x[i].shape[-1] + height_output = x[i].shape[-2] + y = y + F.interpolate( + self.fuse_layers[i][j](x[j]), + size=[height_output, width_output], + mode='bilinear', + align_corners=True + ) + else: + y = y + self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + + return x_fuse + + +blocks_dict = { + 'BASIC': BasicBlock, + 'BOTTLENECK': Bottleneck +} + + +class HighResolutionNet(nn.Module): + + def __init__(self, + cfg, + norm_layer=None): + super(HighResolutionNet, self).__init__() + + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self.norm_layer = norm_layer + # stem network + # stem net + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, + bias=False) + self.bn1 = self.norm_layer(64) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, + bias=False) + self.bn2 = self.norm_layer(64) + self.relu = nn.ReLU(inplace=True) + + # stage 1 + self.stage1_cfg = cfg['STAGE1'] + num_channels = self.stage1_cfg['NUM_CHANNELS'][0] + block = blocks_dict[self.stage1_cfg['BLOCK']] + num_blocks = self.stage1_cfg['NUM_BLOCKS'][0] + self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) + stage1_out_channel = block.expansion*num_channels + + # stage 2 + self.stage2_cfg = cfg['STAGE2'] + num_channels = self.stage2_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage2_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition1 = self._make_transition_layer( + [stage1_out_channel], num_channels) + self.stage2, pre_stage_channels = self._make_stage( + self.stage2_cfg, num_channels) + + # stage 3 + self.stage3_cfg = cfg['STAGE3'] + num_channels = self.stage3_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage3_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition2 = self._make_transition_layer( + pre_stage_channels, num_channels) + self.stage3, pre_stage_channels = self._make_stage( + self.stage3_cfg, num_channels) + + # stage 4 + self.stage4_cfg = cfg['STAGE4'] + num_channels = self.stage4_cfg['NUM_CHANNELS'] + block = blocks_dict[self.stage4_cfg['BLOCK']] + num_channels = [ + num_channels[i] * block.expansion for i in range(len(num_channels))] + self.transition3 = self._make_transition_layer( + pre_stage_channels, num_channels) + self.stage4, pre_stage_channels = self._make_stage( + self.stage4_cfg, num_channels, multi_scale_output=True) + + + def _make_transition_layer( + self, num_channels_pre_layer, num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], + num_channels_cur_layer[i], + 3, + 1, + 1, + bias=False), + self.norm_layer(num_channels_cur_layer[i]), + nn.ReLU(inplace=True))) + else: + transition_layers.append(None) + else: + conv3x3s = [] + for j in range(i+1-num_branches_pre): + inchannels = num_channels_pre_layer[-1] + outchannels = num_channels_cur_layer[i] \ + if j == i-num_branches_pre else inchannels + conv3x3s.append(nn.Sequential( + nn.Conv2d( + inchannels, outchannels, 3, 2, 1, bias=False), + self.norm_layer(outchannels), + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + self.norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(inplanes, planes, stride, downsample, norm_layer=self.norm_layer)) + inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(inplanes, planes, norm_layer=self.norm_layer)) + + return nn.Sequential(*layers) + + def _make_stage(self, layer_config, num_inchannels, + multi_scale_output=True): + num_modules = layer_config['NUM_MODULES'] + num_branches = layer_config['NUM_BRANCHES'] + num_blocks = layer_config['NUM_BLOCKS'] + num_channels = layer_config['NUM_CHANNELS'] + block = blocks_dict[layer_config['BLOCK']] + fuse_method = layer_config['FUSE_METHOD'] + + modules = [] + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + + modules.append( + HighResolutionModule(num_branches, + block, + num_blocks, + num_inchannels, + num_channels, + fuse_method, + reset_multi_scale_output, + norm_layer=self.norm_layer) + ) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [] + for i in range(self.stage2_cfg['NUM_BRANCHES']): + if self.transition1[i] is not None: + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.stage3_cfg['NUM_BRANCHES']): + if self.transition2[i] is not None: + if i < self.stage2_cfg['NUM_BRANCHES']: + x_list.append(self.transition2[i](y_list[i])) + else: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.stage4_cfg['NUM_BRANCHES']): + if self.transition3[i] is not None: + if i < self.stage3_cfg['NUM_BRANCHES']: + x_list.append(self.transition3[i](y_list[i])) + else: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + x = self.stage4(x_list) + + outputs = {} + # See note [TorchScript super()] + outputs['res2'] = x[0] # 1/4 + outputs['res3'] = x[1] # 1/8 + outputs['res4'] = x[2] # 1/16 + outputs['res5'] = x[3] # 1/32 + + return outputs + + +def _hrnet(arch, pretrained, progress, **kwargs): + try: + from ...config.hrnet_config import MODEL_CONFIGS + except ImportError: + from segmentation.config.hrnet_config import MODEL_CONFIGS + model = HighResolutionNet(MODEL_CONFIGS[arch], **kwargs) + if pretrained: + if int(os.environ.get("mapillary_pretrain", 0)): + logger.info("load the mapillary pretrained hrnet-w48 weights.") + model_url = model_urls['hrnet48_mapillary_pretrain'] + else: + model_url = model_urls[arch] + + state_dict = load_state_dict_from_url(model_url, + progress=progress) + model.load_state_dict(state_dict, strict=False) + return model + + +def hrnet18(pretrained=False, progress=True, **kwargs): + r"""HRNet-18 model + """ + return _hrnet('hrnet18', pretrained, progress, + **kwargs) + + +def hrnet32(pretrained=False, progress=True, **kwargs): + r"""HRNet-32 model + """ + return _hrnet('hrnet32', pretrained, progress, + **kwargs) + + +def hrnet48(pretrained=False, progress=True, **kwargs): + r"""HRNet-48 model + """ + return _hrnet('hrnet48', pretrained, progress, + **kwargs) diff --git a/CDARTS_segmentation/segmentation/model/backbone/mnasnet.py b/CDARTS_segmentation/segmentation/model/backbone/mnasnet.py new file mode 100644 index 0000000..db5461b --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/backbone/mnasnet.py @@ -0,0 +1,280 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/pytorch/vision/blob/master/torchvision/models/mnasnet.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import math +import warnings + +import torch +import torch.nn as nn +from torchvision.models.utils import load_state_dict_from_url + +__all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3'] + +_MODEL_URLS = { + "mnasnet0_5": + "https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth", + "mnasnet0_75": None, + "mnasnet1_0": + "https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth", + "mnasnet1_3": None +} + +# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is +# 1.0 - tensorflow. +_BN_MOMENTUM = 1 - 0.9997 + + +class _InvertedResidual(nn.Module): + + def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, + bn_momentum=0.1): + super(_InvertedResidual, self).__init__() + assert stride in [1, 2] + assert kernel_size in [3, 5] + mid_ch = in_ch * expansion_factor + self.apply_residual = (in_ch == out_ch and stride == 1) + self.layers = nn.Sequential( + # Pointwise + nn.Conv2d(in_ch, mid_ch, 1, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Depthwise + nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, + stride=stride, groups=mid_ch, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Linear pointwise. Note that there's no activation. + nn.Conv2d(mid_ch, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch, momentum=bn_momentum)) + + def forward(self, input): + if self.apply_residual: + return self.layers(input) + input + else: + return self.layers(input) + + +def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats, + bn_momentum): + """ Creates a stack of inverted residuals. """ + assert repeats >= 1 + # First one has no skip, because feature map size changes. + first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, + bn_momentum=bn_momentum) + remaining = [] + for _ in range(1, repeats): + remaining.append( + _InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, + bn_momentum=bn_momentum)) + return nn.Sequential(first, *remaining) + + +def _round_to_multiple_of(val, divisor, round_up_bias=0.9): + """ Asymmetric rounding to make `val` divisible by `divisor`. With default + bias, will round up, unless the number is no more than 10% greater than the + smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """ + assert 0.0 < round_up_bias < 1.0 + new_val = max(divisor, int(val + divisor / 2) // divisor * divisor) + return new_val if new_val >= round_up_bias * val else new_val + divisor + + +def _get_depths(alpha): + """ Scales tensor depths as in reference MobileNet code, prefers rouding up + rather than down. """ + depths = [32, 16, 24, 40, 80, 96, 192, 320] + return [_round_to_multiple_of(depth * alpha, 8) for depth in depths] + + +class MNASNet(torch.nn.Module): + """ MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This + implements the B1 variant of the model. + >>> model = MNASNet(1000, 1.0) + >>> x = torch.rand(1, 3, 224, 224) + >>> y = model(x) + >>> y.dim() + 1 + >>> y.nelement() + 1000 + """ + # Version 2 adds depth scaling in the initial stages of the network. + _version = 2 + + def __init__(self, alpha, num_classes=1000, dropout=0.2): + super(MNASNet, self).__init__() + assert alpha > 0.0 + self.alpha = alpha + self.num_classes = num_classes + depths = _get_depths(alpha) + layers = [ + # First layer: regular conv. + nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False), + nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + # Depthwise separable, no skip. + nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1, + groups=depths[0], bias=False), + nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM), + # MNASNet blocks: stacks of inverted residuals. + _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM), + _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM), + _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM), + _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM), + _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM), + _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM), + # Final mapping to classifier input. + # nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False), + # nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM), + # nn.ReLU(inplace=True), + ] + self.layers = nn.Sequential(*layers) + # self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), + # nn.Linear(1280, num_classes)) + self._initialize_weights() + + def forward(self, x): + outputs = {} + for i, module in enumerate(self.layers): + x = module(x) + outputs['layer_%d' % (i + 1)] = x + return outputs + # x = self.layers(x) + # # Equivalent to global avgpool and removing H and W dimensions. + # x = x.mean([2, 3]) + # return self.classifier(x) + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", + nonlinearity="relu") + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.kaiming_uniform_(m.weight, mode="fan_out", + nonlinearity="sigmoid") + nn.init.zeros_(m.bias) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + version = local_metadata.get("version", None) + assert version in [1, 2] + + if version == 1 and not self.alpha == 1.0: + # In the initial version of the model (v1), stem was fixed-size. + # All other layer configurations were the same. This will patch + # the model so that it's identical to v1. Model with alpha 1.0 is + # unaffected. + depths = _get_depths(self.alpha) + v1_stem = [ + nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False), + nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, + bias=False), + nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(16, momentum=_BN_MOMENTUM), + _stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM), + ] + for idx, layer in enumerate(v1_stem): + self.layers[idx] = layer + + # The model is now identical to v1, and must be saved as such. + self._version = 1 + warnings.warn( + "A new version of MNASNet model has been implemented. " + "Your checkpoint was saved using the previous version. " + "This checkpoint will load and work as before, but " + "you may want to upgrade by training a newer model or " + "transfer learning from an updated ImageNet checkpoint.", + UserWarning) + + super(MNASNet, self)._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, + unexpected_keys, error_msgs) + + +def _load_pretrained(model_name, model, progress): + if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None: + raise ValueError( + "No checkpoint is available for model type {}".format(model_name)) + checkpoint_url = _MODEL_URLS[model_name] + model.load_state_dict( + load_state_dict_from_url(checkpoint_url, progress=progress), strict=False) + + +def mnasnet0_5(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 0.5 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + `_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.5, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_5", model, progress) + return model + + +def mnasnet0_75(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 0.75 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + `_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.75, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_75", model, progress) + return model + + +def mnasnet1_0(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 1.0 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + `_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.0, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_0", model, progress) + return model + + +def mnasnet1_3(pretrained=False, progress=True, **kwargs): + """MNASNet with depth multiplier of 1.3 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + `_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.3, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_3", model, progress) + return model + + +if __name__ == '__main__': + import torch + + model = mnasnet0_5(pretrained=False) + print(model) + data = torch.zeros(1, 3, 224, 224) + results = model.forward(data) + + for key in results.keys(): + print(key, results[key].size()) diff --git a/CDARTS_segmentation/segmentation/model/backbone/mobilenet.py b/CDARTS_segmentation/segmentation/model/backbone/mobilenet.py new file mode 100644 index 0000000..5ae4a4d --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/backbone/mobilenet.py @@ -0,0 +1,214 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/pytorch/vision/blob/master/torchvision/models/mobilenet.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from torch import nn +from torchvision.models.utils import load_state_dict_from_url + + +__all__ = ['MobileNetV2', 'mobilenet_v2'] + + +model_urls = { + 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth', +} + + +def _make_divisible(v, divisor, min_value=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + :param v: + :param divisor: + :param min_value: + :return: + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNReLU(nn.Sequential): + def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): + padding = (kernel_size - 1) // 2 + super(ConvBNReLU, self).__init__( + nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), + nn.BatchNorm2d(out_planes), + nn.ReLU6(inplace=True) + ) + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = int(round(inp * expand_ratio)) + self.use_res_connect = self.stride == 1 and inp == oup + + layers = [] + if expand_ratio != 1: + # pw + layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) + layers.extend([ + # dw + ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, + width_mult=1.0, + inverted_residual_setting=None, + round_nearest=8, + block=None): + """ + MobileNet V2 main class + Args: + width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount + inverted_residual_setting: Network structure + round_nearest (int): Round the number of channels in each layer to be a multiple of this number + Set to 1 to turn off rounding + block: Module specifying inverted residual building block for mobilenet + """ + super(MobileNetV2, self).__init__() + + if block is None: + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + + if inverted_residual_setting is None: + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1], + ] + + # op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]), layer_1 + # op(ops.expanded_conv, expansion_size=expand_input(1, divisible_by=1), num_outputs=16), layer_2 + # op(ops.expanded_conv, stride=2, num_outputs=24), layer_3 + # op(ops.expanded_conv, stride=1, num_outputs=24), layer_4 + # op(ops.expanded_conv, stride=2, num_outputs=32), layer_5 + # op(ops.expanded_conv, stride=1, num_outputs=32), layer_6 + # op(ops.expanded_conv, stride=1, num_outputs=32), layer_7 + # op(ops.expanded_conv, stride=2, num_outputs=64), layer_8 + # op(ops.expanded_conv, stride=1, num_outputs=64), layer_9 + # op(ops.expanded_conv, stride=1, num_outputs=64), layer_10 + # op(ops.expanded_conv, stride=1, num_outputs=64), layer_11 + # op(ops.expanded_conv, stride=1, num_outputs=96), layer_12 + # op(ops.expanded_conv, stride=1, num_outputs=96), layer_13 + # op(ops.expanded_conv, stride=1, num_outputs=96), layer_14 + # op(ops.expanded_conv, stride=2, num_outputs=160), layer_15 + # op(ops.expanded_conv, stride=1, num_outputs=160), layer_16 + # op(ops.expanded_conv, stride=1, num_outputs=160), layer_17 + # op(ops.expanded_conv, stride=1, num_outputs=320), layer_18 ==> use this + # op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280) layer_19 + + # only check the first element, assuming user knows t,c,n,s are required + if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: + raise ValueError("inverted_residual_setting should be non-empty " + "or a 4-element list, got {}".format(inverted_residual_setting)) + + # building first layer + input_channel = _make_divisible(input_channel * width_mult, round_nearest) + self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) + features = [ConvBNReLU(3, input_channel, stride=2)] + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + output_channel = _make_divisible(c * width_mult, round_nearest) + for i in range(n): + stride = s if i == 0 else 1 + features.append(block(input_channel, output_channel, stride, expand_ratio=t)) + input_channel = output_channel + # building last several layers + # features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) + # make it nn.Sequential + self.features = nn.Sequential(*features) + + # building classifier + # self.classifier = nn.Sequential( + # nn.Dropout(0.2), + # nn.Linear(self.last_channel, num_classes), + # ) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + def _forward_impl(self, x): + outputs = {} + # This exists since TorchScript doesn't support inheritance, so the superclass method + # (this one) needs to have a name other than `forward` that can be accessed in a subclass + # x = self.features(x) + for i, module in enumerate(self.features): + x = module(x) + outputs['layer_%d' % (i + 1)] = x + # Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0] + # x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1) + # x = self.classifier(x) + # return x + return outputs + + def forward(self, x): + return self._forward_impl(x) + + +def mobilenet_v2(pretrained=False, progress=True, **kwargs): + """ + Constructs a MobileNetV2 architecture from + `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MobileNetV2(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], + progress=progress) + model.load_state_dict(state_dict, strict=False) + return model + + +if __name__ == '__main__': + import torch + + model = mobilenet_v2(pretrained=False) + print(model) + data = torch.zeros(1, 3, 224, 224) + results = model.forward(data) + + for key in results.keys(): + print(key, results[key].size()) diff --git a/CDARTS_segmentation/segmentation/model/backbone/resnet.py b/CDARTS_segmentation/segmentation/model/backbone/resnet.py new file mode 100644 index 0000000..e936eba --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/backbone/resnet.py @@ -0,0 +1,351 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import torch.nn as nn +from torchvision.models.utils import load_state_dict_from_url + + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', + 'wide_resnet50_2', 'wide_resnet101_2'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + # self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + # self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x): + outputs = {} + # See note [TorchScript super()] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + outputs['stem'] = x + + x = self.layer1(x) # 1/4 + outputs['res2'] = x + + x = self.layer2(x) # 1/8 + outputs['res3'] = x + + x = self.layer3(x) # 1/16 + outputs['res4'] = x + + x = self.layer4(x) # 1/32 + outputs['res5'] = x + + return outputs + + def forward(self, x): + return self._forward_impl(x) + + +def _resnet(arch, block, layers, pretrained, progress, **kwargs): + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict, strict=False) + return model + + +def resnet18(pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained=False, progress=True, **kwargs): + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained=False, progress=True, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained=False, progress=True, **kwargs): + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained=False, progress=True, **kwargs): + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def resnext50_32x4d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def resnext101_32x8d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) + + +def wide_resnet50_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" `_ + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def wide_resnet101_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" `_ + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) diff --git a/CDARTS_segmentation/segmentation/model/backbone/xception.py b/CDARTS_segmentation/segmentation/model/backbone/xception.py new file mode 100644 index 0000000..1fa7e39 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/backbone/xception.py @@ -0,0 +1,237 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/models/backbones/xception.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import torch.nn as nn +from torchvision.models.utils import load_state_dict_from_url + +__all__ = ['Xception65', 'xception65'] + + +model_urls = { + 'xception65': 'https://github.com/LikeLy-Journey/SegmenTron/releases/download/v0.1.0/tf-xception65-270e81cf.pth', +} + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, relu_first=True, + bias=False, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, + stride=stride, padding=dilation, + dilation=dilation, groups=inplanes, bias=bias) + bn_depth = norm_layer(inplanes) + pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias) + bn_point = norm_layer(planes) + + if relu_first: + self.block = nn.Sequential(OrderedDict([('relu', nn.ReLU()), + ('depthwise', depthwise), + ('bn_depth', bn_depth), + ('pointwise', pointwise), + ('bn_point', bn_point) + ])) + else: + self.block = nn.Sequential(OrderedDict([('depthwise', depthwise), + ('bn_depth', bn_depth), + ('relu1', nn.ReLU(inplace=True)), + ('pointwise', pointwise), + ('bn_point', bn_point), + ('relu2', nn.ReLU(inplace=True)) + ])) + + def forward(self, x): + return self.block(x) + + +class XceptionBlock(nn.Module): + def __init__(self, channel_list, stride=1, dilation=1, skip_connection_type='conv', relu_first=True, + low_feat=False, norm_layer=nn.BatchNorm2d): + super(XceptionBlock, self).__init__() + + assert len(channel_list) == 4 + self.skip_connection_type = skip_connection_type + self.relu_first = relu_first + self.low_feat = low_feat + + if self.skip_connection_type == 'conv': + self.conv = nn.Conv2d(channel_list[0], channel_list[-1], 1, stride=stride, bias=False) + self.bn = norm_layer(channel_list[-1]) + + self.sep_conv1 = SeparableConv2d(channel_list[0], channel_list[1], dilation=dilation, + relu_first=relu_first, norm_layer=norm_layer) + self.sep_conv2 = SeparableConv2d(channel_list[1], channel_list[2], dilation=dilation, + relu_first=relu_first, norm_layer=norm_layer) + self.sep_conv3 = SeparableConv2d(channel_list[2], channel_list[3], dilation=dilation, + relu_first=relu_first, stride=stride, norm_layer=norm_layer) + self.last_inp_channels = channel_list[3] + + def forward(self, inputs): + sc1 = self.sep_conv1(inputs) + sc2 = self.sep_conv2(sc1) + residual = self.sep_conv3(sc2) + + if self.skip_connection_type == 'conv': + shortcut = self.conv(inputs) + shortcut = self.bn(shortcut) + outputs = residual + shortcut + elif self.skip_connection_type == 'sum': + outputs = residual + inputs + elif self.skip_connection_type == 'none': + outputs = residual + else: + raise ValueError('Unsupported skip connection type.') + + if self.low_feat: + return outputs, sc2 + else: + return outputs + + +class Xception65(nn.Module): + def __init__(self, replace_stride_with_dilation=None, + norm_layer=None): + super(Xception65, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + if replace_stride_with_dilation[1]: + assert replace_stride_with_dilation[2] + output_stride = 8 + elif replace_stride_with_dilation[2]: + output_stride = 16 + else: + output_stride = 32 + + if output_stride == 32: + entry_block3_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 1) + exit_block_stride = 2 + elif output_stride == 16: + entry_block3_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + exit_block_stride = 1 + elif output_stride == 8: + entry_block3_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + exit_block_stride = 1 + else: + raise NotImplementedError + + # Entry flow + self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False) + self.bn1 = norm_layer(32) + self.relu = nn.ReLU() + + self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False) + self.bn2 = norm_layer(64) + + self.block1 = XceptionBlock([64, 128, 128, 128], stride=2, norm_layer=norm_layer) + self.block2 = XceptionBlock([128, 256, 256, 256], stride=2, low_feat=True, norm_layer=norm_layer) + self.block3 = XceptionBlock([256, 728, 728, 728], stride=entry_block3_stride, low_feat=True, + norm_layer=norm_layer) + + # Middle flow (16 units) + self.block4 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block5 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block6 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block7 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block8 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block9 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block10 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block11 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block12 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block13 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block14 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block15 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block16 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block17 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block18 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + self.block19 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation, + skip_connection_type='sum', norm_layer=norm_layer) + + # Exit flow + self.block20 = XceptionBlock([728, 728, 1024, 1024], stride=exit_block_stride, + dilation=exit_block_dilations[0], norm_layer=norm_layer) + self.block21 = XceptionBlock([1024, 1536, 1536, 2048], dilation=exit_block_dilations[1], + skip_connection_type='none', relu_first=False, norm_layer=norm_layer) + + def forward(self, x): + outputs = {} + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + outputs['stem'] = x + + x = self.block1(x) + x, c1 = self.block2(x) # b, h//4, w//4, 256 + outputs['res2'] = c1 + x, c2 = self.block3(x) # b, h//8, w//8, 728 + outputs['res3'] = c2 + + # Middle flow + x = self.block4(x) + x = self.block5(x) + x = self.block6(x) + x = self.block7(x) + x = self.block8(x) + x = self.block9(x) + x = self.block10(x) + x = self.block11(x) + x = self.block12(x) + x = self.block13(x) + x = self.block14(x) + x = self.block15(x) + x = self.block16(x) + x = self.block17(x) + x = self.block18(x) + c3 = self.block19(x) + outputs['res4'] = c3 + + # Exit flow + x = self.block20(c3) + c4 = self.block21(x) + outputs['res5'] = c4 + + return outputs + + +def xception65(pretrained=False, progress=True, **kwargs): + model = Xception65(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['xception65'], + progress=progress) + model.load_state_dict(state_dict, strict=False) + return model diff --git a/CDARTS_segmentation/segmentation/model/build.py b/CDARTS_segmentation/segmentation/model/build.py new file mode 100644 index 0000000..e0b0f83 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/build.py @@ -0,0 +1,133 @@ +# ------------------------------------------------------------------------------ +# Builds model. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import torch + +from .backbone import resnet, mobilenet, mnasnet, hrnet, xception +from .meta_arch import DeepLabV3, DeepLabV3Plus, PanopticDeepLab +from .loss import RegularCE, OhemCE, DeepLabCE, L1Loss, MSELoss, CrossEntropyLoss + + +def build_segmentation_model_from_cfg(config): + """Builds segmentation model with specific configuration. + Args: + config: the configuration. + + Returns: + A nn.Module segmentation model. + """ + model_map = { + 'deeplabv3': DeepLabV3, + 'deeplabv3plus': DeepLabV3Plus, + 'panoptic_deeplab': PanopticDeepLab, + } + + model_cfg = { + 'deeplabv3': dict( + replace_stride_with_dilation=config.MODEL.BACKBONE.DILATION, + in_channels=config.MODEL.DECODER.IN_CHANNELS, + feature_key=config.MODEL.DECODER.FEATURE_KEY, + decoder_channels=config.MODEL.DECODER.DECODER_CHANNELS, + atrous_rates=config.MODEL.DECODER.ATROUS_RATES, + num_classes=config.DATASET.NUM_CLASSES, + semantic_loss=build_loss_from_cfg(config.LOSS.SEMANTIC), + semantic_loss_weight=config.LOSS.SEMANTIC.WEIGHT, + ), + 'deeplabv3plus': dict( + replace_stride_with_dilation=config.MODEL.BACKBONE.DILATION, + in_channels=config.MODEL.DECODER.IN_CHANNELS, + feature_key=config.MODEL.DECODER.FEATURE_KEY, + low_level_channels=config.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS, + low_level_key=config.MODEL.DEEPLABV3PLUS.LOW_LEVEL_KEY, + low_level_channels_project=config.MODEL.DEEPLABV3PLUS.LOW_LEVEL_CHANNELS_PROJECT, + decoder_channels=config.MODEL.DECODER.DECODER_CHANNELS, + atrous_rates=config.MODEL.DECODER.ATROUS_RATES, + num_classes=config.DATASET.NUM_CLASSES, + semantic_loss=build_loss_from_cfg(config.LOSS.SEMANTIC), + semantic_loss_weight=config.LOSS.SEMANTIC.WEIGHT, + ), + 'panoptic_deeplab': dict( + replace_stride_with_dilation=config.MODEL.BACKBONE.DILATION, + in_channels=config.MODEL.DECODER.IN_CHANNELS, + feature_key=config.MODEL.DECODER.FEATURE_KEY, + low_level_channels=config.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS, + low_level_key=config.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_KEY, + low_level_channels_project=config.MODEL.PANOPTIC_DEEPLAB.LOW_LEVEL_CHANNELS_PROJECT, + decoder_channels=config.MODEL.DECODER.DECODER_CHANNELS, + atrous_rates=config.MODEL.DECODER.ATROUS_RATES, + num_classes=config.DATASET.NUM_CLASSES, + has_instance=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ENABLE, + instance_low_level_channels_project=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.LOW_LEVEL_CHANNELS_PROJECT, + instance_decoder_channels=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.DECODER_CHANNELS, + instance_head_channels=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.HEAD_CHANNELS, + instance_aspp_channels=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.ASPP_CHANNELS, + instance_num_classes=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.NUM_CLASSES, + instance_class_key=config.MODEL.PANOPTIC_DEEPLAB.INSTANCE.CLASS_KEY, + semantic_loss=build_loss_from_cfg(config.LOSS.SEMANTIC), + semantic_loss_weight=config.LOSS.SEMANTIC.WEIGHT, + center_loss=build_loss_from_cfg(config.LOSS.CENTER), + center_loss_weight=config.LOSS.CENTER.WEIGHT, + offset_loss=build_loss_from_cfg(config.LOSS.OFFSET), + offset_loss_weight=config.LOSS.OFFSET.WEIGHT, + ), + } + + if config.MODEL.BACKBONE.META == 'resnet': + backbone = resnet.__dict__[config.MODEL.BACKBONE.NAME]( + pretrained=config.MODEL.BACKBONE.PRETRAINED, + replace_stride_with_dilation=model_cfg[config.MODEL.META_ARCHITECTURE]['replace_stride_with_dilation'] + ) + elif config.MODEL.BACKBONE.META == 'mobilenet_v2': + backbone = mobilenet.__dict__[config.MODEL.BACKBONE.NAME]( + pretrained=config.MODEL.BACKBONE.PRETRAINED, + ) + elif config.MODEL.BACKBONE.META == 'mnasnet': + backbone = mnasnet.__dict__[config.MODEL.BACKBONE.NAME]( + pretrained=config.MODEL.BACKBONE.PRETRAINED, + ) + elif config.MODEL.BACKBONE.META == 'hrnet': + backbone = hrnet.__dict__[config.MODEL.BACKBONE.NAME]( + pretrained=config.MODEL.BACKBONE.PRETRAINED, + ) + elif config.MODEL.BACKBONE.META == 'xception': + backbone = xception.__dict__[config.MODEL.BACKBONE.NAME]( + pretrained=config.MODEL.BACKBONE.PRETRAINED, + replace_stride_with_dilation=model_cfg[config.MODEL.META_ARCHITECTURE]['replace_stride_with_dilation'] + ) + else: + raise ValueError('Unknown meta backbone {}, please first implement it.'.format(config.MODEL.BACKBONE.META)) + + model = model_map[config.MODEL.META_ARCHITECTURE]( + backbone, + **model_cfg[config.MODEL.META_ARCHITECTURE] + ) + # set batchnorm momentum + for module in model.modules(): + if isinstance(module, torch.nn.BatchNorm2d): + module.momentum = config.MODEL.BN_MOMENTUM + return model + + +def build_loss_from_cfg(config): + """Builds loss function with specific configuration. + Args: + config: the configuration. + + Returns: + A nn.Module loss. + """ + if config.NAME == 'cross_entropy': + # return CrossEntropyLoss(ignore_index=config.IGNORE, reduction='mean') + return RegularCE(ignore_label=config.IGNORE) + elif config.NAME == 'ohem': + return OhemCE(ignore_label=config.IGNORE, threshold=config.THRESHOLD, min_kept=config.MIN_KEPT) + elif config.NAME == 'hard_pixel_mining': + return DeepLabCE(ignore_label=config.IGNORE, top_k_percent_pixels=config.TOP_K_PERCENT) + elif config.NAME == 'mse': + return MSELoss(reduction=config.REDUCTION) + elif config.NAME == 'l1': + return L1Loss(reduction=config.REDUCTION) + else: + raise ValueError('Unknown loss type: {}'.format(config.NAME)) diff --git a/CDARTS_segmentation/segmentation/model/decoder/__init__.py b/CDARTS_segmentation/segmentation/model/decoder/__init__.py new file mode 100644 index 0000000..825fad9 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/decoder/__init__.py @@ -0,0 +1,4 @@ +from .aspp import ASPP +from .deeplabv3 import DeepLabV3Decoder +from .deeplabv3plus import DeepLabV3PlusDecoder +from .panoptic_deeplab import PanopticDeepLabDecoder diff --git a/CDARTS_segmentation/segmentation/model/decoder/aspp.py b/CDARTS_segmentation/segmentation/model/decoder/aspp.py new file mode 100644 index 0000000..7d98676 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/decoder/aspp.py @@ -0,0 +1,76 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import torch +from torch import nn +from torch.nn import functional as F + +__all__ = ["ASPP"] + + +class ASPPConv(nn.Sequential): + def __init__(self, in_channels, out_channels, dilation): + modules = [ + nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU() + ] + super(ASPPConv, self).__init__(*modules) + + +class ASPPPooling(nn.Module): + def __init__(self, in_channels, out_channels): + super(ASPPPooling, self).__init__() + self.aspp_pooling = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.ReLU() + ) + + def set_image_pooling(self, pool_size=None): + if pool_size is None: + self.aspp_pooling[0] = nn.AdaptiveAvgPool2d(1) + else: + self.aspp_pooling[0] = nn.AvgPool2d(kernel_size=pool_size, stride=1) + + def forward(self, x): + size = x.shape[-2:] + x = self.aspp_pooling(x) + return F.interpolate(x, size=size, mode='bilinear', align_corners=True) + + +class ASPP(nn.Module): + def __init__(self, in_channels, out_channels, atrous_rates): + super(ASPP, self).__init__() + # out_channels = 256 + modules = [] + modules.append(nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU())) + + rate1, rate2, rate3 = tuple(atrous_rates) + modules.append(ASPPConv(in_channels, out_channels, rate1)) + modules.append(ASPPConv(in_channels, out_channels, rate2)) + modules.append(ASPPConv(in_channels, out_channels, rate3)) + modules.append(ASPPPooling(in_channels, out_channels)) + + self.convs = nn.ModuleList(modules) + + self.project = nn.Sequential( + nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(), + nn.Dropout(0.5)) + + def set_image_pooling(self, pool_size): + self.convs[-1].set_image_pooling(pool_size) + + def forward(self, x): + res = [] + for conv in self.convs: + res.append(conv(x)) + res = torch.cat(res, dim=1) + return self.project(res) diff --git a/CDARTS_segmentation/segmentation/model/decoder/conv_module.py b/CDARTS_segmentation/segmentation/model/decoder/conv_module.py new file mode 100644 index 0000000..a2c9c46 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/decoder/conv_module.py @@ -0,0 +1,73 @@ +# ------------------------------------------------------------------------------ +# Common modules. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from functools import partial + +import torch +from torch import nn +from torch.nn import functional as F + + +def basic_conv(in_planes, out_planes, kernel_size, stride=1, padding=1, groups=1, + with_bn=True, with_relu=True): + """convolution with bn and relu""" + module = [] + has_bias = not with_bn + module.append( + nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, + bias=has_bias) + ) + if with_bn: + module.append(nn.BatchNorm2d(out_planes)) + if with_relu: + module.append(nn.ReLU()) + return nn.Sequential(*module) + + +def depthwise_separable_conv(in_planes, out_planes, kernel_size, stride=1, padding=1, groups=1, + with_bn=True, with_relu=True): + """depthwise separable convolution with bn and relu""" + del groups + + module = [] + module.extend([ + basic_conv(in_planes, in_planes, kernel_size, stride, padding, groups=in_planes, + with_bn=True, with_relu=True), + nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False), + ]) + if with_bn: + module.append(nn.BatchNorm2d(out_planes)) + if with_relu: + module.append(nn.ReLU()) + return nn.Sequential(*module) + + +def stacked_conv(in_planes, out_planes, kernel_size, num_stack, stride=1, padding=1, groups=1, + with_bn=True, with_relu=True, conv_type='basic_conv'): + """stacked convolution with bn and relu""" + if num_stack < 1: + assert ValueError('`num_stack` has to be a positive integer.') + if conv_type == 'basic_conv': + conv = partial(basic_conv, out_planes=out_planes, kernel_size=kernel_size, stride=stride, + padding=padding, groups=groups, with_bn=with_bn, with_relu=with_relu) + elif conv_type == 'depthwise_separable_conv': + conv = partial(depthwise_separable_conv, out_planes=out_planes, kernel_size=kernel_size, stride=stride, + padding=padding, groups=1, with_bn=with_bn, with_relu=with_relu) + else: + raise ValueError('Unknown conv_type: {}'.format(conv_type)) + module = [] + module.append(conv(in_planes=in_planes)) + for n in range(1, num_stack): + module.append(conv(in_planes=out_planes)) + return nn.Sequential(*module) + + +if __name__ == '__main__': + import torch + + model = stacked_conv(4, 2, 3, 3) + print(model) + data = torch.zeros(1, 4, 5, 5) + print(model.forward(data).shape) diff --git a/CDARTS_segmentation/segmentation/model/decoder/deeplabv3.py b/CDARTS_segmentation/segmentation/model/decoder/deeplabv3.py new file mode 100644 index 0000000..98f5ee5 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/decoder/deeplabv3.py @@ -0,0 +1,37 @@ +# ------------------------------------------------------------------------------ +# DeepLabV3 decoder. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +from torch import nn + +from .aspp import ASPP + + +__all__ = ["DeepLabV3Decoder"] + + +class DeepLabV3Decoder(nn.Module): + def __init__(self, in_channels, feature_key, decoder_channels, atrous_rates, num_classes): + super(DeepLabV3Decoder, self).__init__() + self.aspp = ASPP(in_channels, out_channels=decoder_channels, atrous_rates=atrous_rates) + self.feature_key = feature_key + self.classifier = nn.Sequential( + nn.Conv2d(decoder_channels, decoder_channels, 3, padding=1, bias=False), + nn.BatchNorm2d(decoder_channels), + nn.ReLU(), + nn.Conv2d(decoder_channels, num_classes, 1) + ) + + def set_image_pooling(self, pool_size): + self.aspp.set_image_pooling(pool_size) + + def forward(self, features): + pred = OrderedDict() + res5 = features[self.feature_key] + x = self.aspp(res5) + x = self.classifier(x) + pred['semantic'] = x + return pred diff --git a/CDARTS_segmentation/segmentation/model/decoder/deeplabv3plus.py b/CDARTS_segmentation/segmentation/model/decoder/deeplabv3plus.py new file mode 100644 index 0000000..f178a0d --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/decoder/deeplabv3plus.py @@ -0,0 +1,59 @@ +# ------------------------------------------------------------------------------ +# DeepLabV3+ decoder. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import torch +from torch import nn +from torch.nn import functional as F + +from .aspp import ASPP +from .conv_module import stacked_conv + + +__all__ = ["DeepLabV3PlusDecoder"] + + +class DeepLabV3PlusDecoder(nn.Module): + def __init__(self, in_channels, feature_key, low_level_channels, low_level_key, low_level_channels_project, + decoder_channels, atrous_rates, num_classes): + super(DeepLabV3PlusDecoder, self).__init__() + self.aspp = ASPP(in_channels, out_channels=decoder_channels, atrous_rates=atrous_rates) + self.feature_key = feature_key + self.low_level_key = low_level_key + # Transform low-level feature + # low_level_channels_project = 48 + self.project = nn.Sequential( + nn.Conv2d(low_level_channels, low_level_channels_project, 1, bias=False), + nn.BatchNorm2d(low_level_channels_project), + nn.ReLU() + ) + # Fuse + self.fuse = stacked_conv( + decoder_channels + low_level_channels_project, + decoder_channels, + kernel_size=3, + padding=1, + num_stack=2, + conv_type='depthwise_separable_conv' + ) + self.classifier = nn.Conv2d(decoder_channels, num_classes, 1) + + def set_image_pooling(self, pool_size): + self.aspp.set_image_pooling(pool_size) + + def forward(self, features): + pred = OrderedDict() + l = features[self.low_level_key] + x = features[self.feature_key] + x = self.aspp(x) + # low-level feature + l = self.project(l) + x = F.interpolate(x, size=l.size()[2:], mode='bilinear', align_corners=True) + x = torch.cat((x, l), dim=1) + x = self.fuse(x) + x = self.classifier(x) + pred['semantic'] = x + return pred diff --git a/CDARTS_segmentation/segmentation/model/decoder/panoptic_deeplab.py b/CDARTS_segmentation/segmentation/model/decoder/panoptic_deeplab.py new file mode 100644 index 0000000..8d387e7 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/decoder/panoptic_deeplab.py @@ -0,0 +1,162 @@ +# ------------------------------------------------------------------------------ +# Panoptic-DeepLab decoder. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict +from functools import partial + +import torch +from torch import nn +from torch.nn import functional as F + +from .aspp import ASPP +from .conv_module import stacked_conv + + +__all__ = ["PanopticDeepLabDecoder"] + + +class SinglePanopticDeepLabDecoder(nn.Module): + def __init__(self, in_channels, feature_key, low_level_channels, low_level_key, low_level_channels_project, + decoder_channels, atrous_rates, aspp_channels=None): + super(SinglePanopticDeepLabDecoder, self).__init__() + if aspp_channels is None: + aspp_channels = decoder_channels + self.aspp = ASPP(in_channels, out_channels=aspp_channels, atrous_rates=atrous_rates) + self.feature_key = feature_key + self.decoder_stage = len(low_level_channels) + assert self.decoder_stage == len(low_level_key) + assert self.decoder_stage == len(low_level_channels_project) + self.low_level_key = low_level_key + fuse_conv = partial(stacked_conv, kernel_size=5, num_stack=1, padding=2, + conv_type='depthwise_separable_conv') + + # Transform low-level feature + project = [] + # Fuse + fuse = [] + # Top-down direction, i.e. starting from largest stride + for i in range(self.decoder_stage): + project.append( + nn.Sequential( + nn.Conv2d(low_level_channels[i], low_level_channels_project[i], 1, bias=False), + nn.BatchNorm2d(low_level_channels_project[i]), + nn.ReLU() + ) + ) + if i == 0: + fuse_in_channels = aspp_channels + low_level_channels_project[i] + else: + fuse_in_channels = decoder_channels + low_level_channels_project[i] + fuse.append( + fuse_conv( + fuse_in_channels, + decoder_channels, + ) + ) + self.project = nn.ModuleList(project) + self.fuse = nn.ModuleList(fuse) + + def set_image_pooling(self, pool_size): + self.aspp.set_image_pooling(pool_size) + + def forward(self, features): + x = features[self.feature_key] + x = self.aspp(x) + + # build decoder + for i in range(self.decoder_stage): + l = features[self.low_level_key[i]] + l = self.project[i](l) + x = F.interpolate(x, size=l.size()[2:], mode='bilinear', align_corners=True) + x = torch.cat((x, l), dim=1) + x = self.fuse[i](x) + + return x + + +class SinglePanopticDeepLabHead(nn.Module): + def __init__(self, decoder_channels, head_channels, num_classes, class_key): + super(SinglePanopticDeepLabHead, self).__init__() + fuse_conv = partial(stacked_conv, kernel_size=5, num_stack=1, padding=2, + conv_type='depthwise_separable_conv') + + self.num_head = len(num_classes) + assert self.num_head == len(class_key) + + classifier = {} + for i in range(self.num_head): + classifier[class_key[i]] = nn.Sequential( + fuse_conv( + decoder_channels, + head_channels, + ), + nn.Conv2d(head_channels, num_classes[i], 1) + ) + self.classifier = nn.ModuleDict(classifier) + self.class_key = class_key + + def forward(self, x): + pred = OrderedDict() + # build classifier + for key in self.class_key: + pred[key] = self.classifier[key](x) + + return pred + + +class PanopticDeepLabDecoder(nn.Module): + def __init__(self, in_channels, feature_key, low_level_channels, low_level_key, low_level_channels_project, + decoder_channels, atrous_rates, num_classes, **kwargs): + super(PanopticDeepLabDecoder, self).__init__() + # Build semantic decoder + self.semantic_decoder = SinglePanopticDeepLabDecoder(in_channels, feature_key, low_level_channels, + low_level_key, low_level_channels_project, + decoder_channels, atrous_rates) + self.semantic_head = SinglePanopticDeepLabHead(decoder_channels, decoder_channels, [num_classes], ['semantic']) + # Build instance decoder + self.instance_decoder = None + self.instance_head = None + if kwargs.get('has_instance', False): + instance_decoder_kwargs = dict( + in_channels=in_channels, + feature_key=feature_key, + low_level_channels=low_level_channels, + low_level_key=low_level_key, + low_level_channels_project=kwargs['instance_low_level_channels_project'], + decoder_channels=kwargs['instance_decoder_channels'], + atrous_rates=atrous_rates, + aspp_channels=kwargs['instance_aspp_channels'] + ) + self.instance_decoder = SinglePanopticDeepLabDecoder(**instance_decoder_kwargs) + instance_head_kwargs = dict( + decoder_channels=kwargs['instance_decoder_channels'], + head_channels=kwargs['instance_head_channels'], + num_classes=kwargs['instance_num_classes'], + class_key=kwargs['instance_class_key'] + ) + self.instance_head = SinglePanopticDeepLabHead(**instance_head_kwargs) + + def set_image_pooling(self, pool_size): + self.semantic_decoder.set_image_pooling(pool_size) + if self.instance_decoder is not None: + self.instance_decoder.set_image_pooling(pool_size) + + def forward(self, features): + pred = OrderedDict() + + # Semantic branch + semantic = self.semantic_decoder(features) + semantic = self.semantic_head(semantic) + for key in semantic.keys(): + pred[key] = semantic[key] + + # Instance branch + if self.instance_decoder is not None: + instance = self.instance_decoder(features) + instance = self.instance_head(instance) + for key in instance.keys(): + pred[key] = instance[key] + + return pred diff --git a/CDARTS_segmentation/segmentation/model/loss/__init__.py b/CDARTS_segmentation/segmentation/model/loss/__init__.py new file mode 100644 index 0000000..f9cccc9 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/loss/__init__.py @@ -0,0 +1,7 @@ +from torch import nn + +from .criterion import RegularCE, OhemCE, DeepLabCE + +L1Loss = nn.L1Loss +MSELoss = nn.MSELoss +CrossEntropyLoss = nn.CrossEntropyLoss diff --git a/CDARTS_segmentation/segmentation/model/loss/criterion.py b/CDARTS_segmentation/segmentation/model/loss/criterion.py new file mode 100644 index 0000000..e988015 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/loss/criterion.py @@ -0,0 +1,112 @@ +# ------------------------------------------------------------------------------ +# Loss functions. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import torch +import torch.nn as nn +from torch.nn import functional as F + + +class RegularCE(nn.Module): + """ + Regular cross entropy loss for semantic segmentation, support pixel-wise loss weight. + Arguments: + ignore_label: Integer, label to ignore. + weight: Tensor, a manual rescaling weight given to each class. + """ + def __init__(self, ignore_label=-1, weight=None): + super(RegularCE, self).__init__() + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss(weight=weight, + ignore_index=ignore_label, + reduction='none') + + def forward(self, logits, labels, **kwargs): + if 'semantic_weights' in kwargs: + pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights'] + pixel_losses = pixel_losses.contiguous().view(-1) + else: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + mask = labels.contiguous().view(-1) != self.ignore_label + + pixel_losses = pixel_losses[mask] + return pixel_losses.mean() + + +class OhemCE(nn.Module): + """ + Online hard example mining with cross entropy loss, for semantic segmentation. + This is widely used in PyTorch semantic segmentation frameworks. + Reference: https://github.com/HRNet/HRNet-Semantic-Segmentation/blob/1b3ae72f6025bde4ea404305d502abea3c2f5266/lib/core/criterion.py#L29 + Arguments: + ignore_label: Integer, label to ignore. + threshold: Float, threshold for softmax score (of gt class), only predictions with softmax score + below this threshold will be kept. + min_kept: Integer, minimum number of pixels to be kept, it is used to adjust the + threshold value to avoid number of examples being too small. + weight: Tensor, a manual rescaling weight given to each class. + """ + def __init__(self, ignore_label=-1, threshold=0.7, + min_kept=100000, weight=None): + super(OhemCE, self).__init__() + self.threshold = threshold + self.min_kept = max(1, min_kept) + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss(weight=weight, + ignore_index=ignore_label, + reduction='none') + + def forward(self, logits, labels, **kwargs): + predictions = F.softmax(logits, dim=1) + if 'semantic_weights' in kwargs: + pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights'] + pixel_losses = pixel_losses.contiguous().view(-1) + else: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + mask = labels.contiguous().view(-1) != self.ignore_label + + tmp_labels = labels.clone() + tmp_labels[tmp_labels == self.ignore_label] = 0 + # Get the score for gt class at each pixel location. + predictions = predictions.gather(1, tmp_labels.unsqueeze(1)) + predictions, indices = predictions.contiguous().view(-1, )[mask].contiguous().sort() + min_value = predictions[min(self.min_kept, predictions.numel() - 1)] + threshold = max(min_value, self.threshold) + + pixel_losses = pixel_losses[mask][indices] + pixel_losses = pixel_losses[predictions < threshold] + return pixel_losses.mean() + + +class DeepLabCE(nn.Module): + """ + Hard pixel mining mining with cross entropy loss, for semantic segmentation. + This is used in TensorFlow DeepLab frameworks. + Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33 + Arguments: + ignore_label: Integer, label to ignore. + top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its value < 1.0, only compute the loss for + the top k percent pixels (e.g., the top 20% pixels). This is useful for hard pixel mining. + weight: Tensor, a manual rescaling weight given to each class. + """ + def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None): + super(DeepLabCE, self).__init__() + self.top_k_percent_pixels = top_k_percent_pixels + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss(weight=weight, + ignore_index=ignore_label, + reduction='none') + + def forward(self, logits, labels, **kwargs): + if 'semantic_weights' in kwargs: + pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights'] + pixel_losses = pixel_losses.contiguous().view(-1) + else: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + if self.top_k_percent_pixels == 1.0: + return pixel_losses.mean() + + top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel()) + pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels) + return pixel_losses.mean() diff --git a/CDARTS_segmentation/segmentation/model/meta_arch/__init__.py b/CDARTS_segmentation/segmentation/model/meta_arch/__init__.py new file mode 100644 index 0000000..da75f94 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/meta_arch/__init__.py @@ -0,0 +1,3 @@ +from .deeplabv3 import DeepLabV3 +from .deeplabv3plus import DeepLabV3Plus +from .panoptic_deeplab import PanopticDeepLab diff --git a/CDARTS_segmentation/segmentation/model/meta_arch/base.py b/CDARTS_segmentation/segmentation/model/meta_arch/base.py new file mode 100644 index 0000000..eb3e2b4 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/meta_arch/base.py @@ -0,0 +1,64 @@ +# ------------------------------------------------------------------------------ +# Base model for segmentation. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +from torch import nn +from torch.nn import functional as F + + +class BaseSegmentationModel(nn.Module): + """ + Base class for segmentation models. + Arguments: + backbone: A nn.Module of backbone model. + decoder: A nn.Module of decoder. + """ + def __init__(self, backbone, decoder): + super(BaseSegmentationModel, self).__init__() + self.backbone = backbone + self.decoder = decoder + + def _init_params(self): + # Backbone is already initialized (either from pre-trained checkpoint or random init). + for m in self.decoder.modules(): + if isinstance(m, nn.Conv2d): + nn.init.normal_(m.weight, std=0.001) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def set_image_pooling(self, pool_size): + self.decoder.set_image_pooling(pool_size) + + def _upsample_predictions(self, pred, input_shape): + """Upsamples final prediction. + Args: + pred (dict): stores all output of the segmentation model. + input_shape (tuple): spatial resolution of the desired shape. + Returns: + result (OrderedDict): upsampled dictionary. + """ + result = OrderedDict() + for key in pred.keys(): + out = F.interpolate(pred[key], size=input_shape, mode='bilinear', align_corners=True) + result[key] = out + return result + + def forward(self, x, targets=None): + input_shape = x.shape[-2:] + + # contract: features is a dict of tensors + features = self.backbone(x) + pred = self.decoder(features) + results = self._upsample_predictions(pred, input_shape) + + if targets is None: + return results + else: + return self.loss(results, targets) + + def loss(self, results, targets=None): + raise NotImplementedError diff --git a/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3.py b/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3.py new file mode 100644 index 0000000..ee6517d --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3.py @@ -0,0 +1,58 @@ +# ------------------------------------------------------------------------------ +# DeepLabV3 meta architecture. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import torch +from torch import nn + +from .base import BaseSegmentationModel +from segmentation.model.decoder import DeepLabV3Decoder +from segmentation.utils import AverageMeter + + +__all__ = ["DeepLabV3"] + + +class DeepLabV3(BaseSegmentationModel): + """ + Implements DeepLabV3 model from + `"Rethinking Atrous Convolution for Semantic Image Segmentation" + `_. + Arguments: + backbone (nn.Module): the network used to compute the features for the model. + The backbone should return an OrderedDict[Tensor], with the key being + "out" for the last feature map used, and "aux" if an auxiliary classifier + is used. + in_channels (int): number of input channels from the backbone + feature_key (str): name of input feature from backbone + decoder_channels (int): number of channels in decoder + atrous_rates (tuple): atrous rates for ASPP + num_classes (int): number of classes + semantic_loss (nn.Module): loss function + semantic_loss_weight (float): loss weight + """ + + def __init__(self, backbone, in_channels, feature_key, decoder_channels, atrous_rates, num_classes, + semantic_loss, semantic_loss_weight, **kwargs): + decoder = DeepLabV3Decoder(in_channels, feature_key, decoder_channels, atrous_rates, num_classes) + super(DeepLabV3, self).__init__(backbone, decoder) + + self.semantic_loss = semantic_loss + self.semantic_loss_weight = semantic_loss_weight + + self.loss_meter_dict = OrderedDict() + self.loss_meter_dict['Loss'] = AverageMeter() + + # Initialize parameters. + self._init_params() + + def loss(self, results, targets=None): + batch_size = results['semantic'].size(0) + if targets is not None: + semantic_loss = self.semantic_loss(results['semantic'], targets['semantic']) * self.semantic_loss_weight + self.loss_meter_dict['Loss'].update(semantic_loss.detach().cpu().item(), batch_size) + results['loss'] = semantic_loss + return results diff --git a/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3plus.py b/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3plus.py new file mode 100644 index 0000000..066d90b --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3plus.py @@ -0,0 +1,63 @@ +# ------------------------------------------------------------------------------ +# DeepLabV3+ meta architecture. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import torch +from torch import nn + +from .base import BaseSegmentationModel +from segmentation.model.decoder import DeepLabV3PlusDecoder +from segmentation.utils import AverageMeter + + +__all__ = ["DeepLabV3Plus"] + + +class DeepLabV3Plus(BaseSegmentationModel): + """ + Implements DeepLabV3+ model from + `"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" + `_. + Arguments: + backbone (nn.Module): the network used to compute the features for the model. + The backbone should return an OrderedDict[Tensor], with the key being + "out" for the last feature map used, and "aux" if an auxiliary classifier + is used. + in_channels (int): number of input channels from the backbone + feature_key (str): name of input feature from backbone + low_level_channels (int): channels of low-level features + low_level_key (str): name of low-level features used in decoder + low_level_channels_project (int): channels of low-level features after projection in decoder + decoder_channels (int): number of channels in decoder + atrous_rates (tuple): atrous rates for ASPP + num_classes (int): number of classes + semantic_loss (nn.Module): loss function + semantic_loss_weight (float): loss weight + """ + + def __init__(self, backbone, in_channels, feature_key, low_level_channels, low_level_key, + low_level_channels_project, decoder_channels, atrous_rates, num_classes, + semantic_loss, semantic_loss_weight, **kwargs): + decoder = DeepLabV3PlusDecoder(in_channels, feature_key, low_level_channels, low_level_key, + low_level_channels_project, decoder_channels, atrous_rates, num_classes) + super(DeepLabV3Plus, self).__init__(backbone, decoder) + + self.semantic_loss = semantic_loss + self.semantic_loss_weight = semantic_loss_weight + + self.loss_meter_dict = OrderedDict() + self.loss_meter_dict['Loss'] = AverageMeter() + + # Initialize parameters. + self._init_params() + + def loss(self, results, targets=None): + batch_size = results['semantic'].size(0) + if targets is not None: + semantic_loss = self.semantic_loss(results['semantic'], targets['semantic']) * self.semantic_loss_weight + self.loss_meter_dict['Loss'].update(semantic_loss.detach().cpu().item(), batch_size) + results['loss'] = semantic_loss + return results diff --git a/CDARTS_segmentation/segmentation/model/meta_arch/panoptic_deeplab.py b/CDARTS_segmentation/segmentation/model/meta_arch/panoptic_deeplab.py new file mode 100644 index 0000000..4371a89 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/meta_arch/panoptic_deeplab.py @@ -0,0 +1,135 @@ +# ------------------------------------------------------------------------------ +# Panoptic-DeepLab meta architecture. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import torch +from torch import nn +from torch.nn import functional as F + +from .base import BaseSegmentationModel +from segmentation.model.decoder import PanopticDeepLabDecoder +from segmentation.utils import AverageMeter + + +__all__ = ["PanopticDeepLab"] + + +class PanopticDeepLab(BaseSegmentationModel): + """ + Implements Panoptic-DeepLab model from + `"Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation" + `_. + Arguments: + backbone (nn.Module): the network used to compute the features for the model. + The backbone should return an OrderedDict[Tensor], with the key being + "out" for the last feature map used, and "aux" if an auxiliary classifier + is used. + in_channels (int): number of input channels from the backbone + feature_key (str): names of input feature from backbone + low_level_channels (list): a list of channels of low-level features + low_level_key (list): a list of name of low-level features used in decoder + low_level_channels_project (list): a list of channels of low-level features after projection in decoder + decoder_channels (int): number of channels in decoder + atrous_rates (tuple): atrous rates for ASPP + num_classes (int): number of classes + semantic_loss (nn.Module): loss function + semantic_loss_weight (float): loss weight + center_loss (nn.Module): loss function + center_loss_weight (float): loss weight + offset_loss (nn.Module): loss function + offset_loss_weight (float): loss weight + **kwargs: arguments for instance head + """ + + def __init__(self, backbone, in_channels, feature_key, low_level_channels, low_level_key, + low_level_channels_project, decoder_channels, atrous_rates, num_classes, + semantic_loss, semantic_loss_weight, center_loss, center_loss_weight, + offset_loss, offset_loss_weight, **kwargs): + decoder = PanopticDeepLabDecoder(in_channels, feature_key, low_level_channels, low_level_key, + low_level_channels_project, decoder_channels, atrous_rates, num_classes, + **kwargs) + super(PanopticDeepLab, self).__init__(backbone, decoder) + + self.semantic_loss = semantic_loss + self.semantic_loss_weight = semantic_loss_weight + self.loss_meter_dict = OrderedDict() + self.loss_meter_dict['Loss'] = AverageMeter() + self.loss_meter_dict['Semantic loss'] = AverageMeter() + + if kwargs.get('has_instance', False): + self.center_loss = center_loss + self.center_loss_weight = center_loss_weight + self.offset_loss = offset_loss + self.offset_loss_weight = offset_loss_weight + self.loss_meter_dict['Center loss'] = AverageMeter() + self.loss_meter_dict['Offset loss'] = AverageMeter() + else: + self.center_loss = None + self.center_loss_weight = 0 + self.offset_loss = None + self.offset_loss_weight = 0 + + # Initialize parameters. + self._init_params() + + def _upsample_predictions(self, pred, input_shape): + """Upsamples final prediction, with special handling to offset. + Args: + pred (dict): stores all output of the segmentation model. + input_shape (tuple): spatial resolution of the desired shape. + Returns: + result (OrderedDict): upsampled dictionary. + """ + # Override upsample method to correctly handle `offset` + result = OrderedDict() + for key in pred.keys(): + out = F.interpolate(pred[key], size=input_shape, mode='bilinear', align_corners=True) + if 'offset' in key: + scale = (input_shape[0] - 1) // (pred[key].shape[2] - 1) + out *= scale + result[key] = out + return result + + def loss(self, results, targets=None): + batch_size = results['semantic'].size(0) + loss = 0 + if targets is not None: + if 'semantic_weights' in targets.keys(): + semantic_loss = self.semantic_loss( + results['semantic'], targets['semantic'], semantic_weights=targets['semantic_weights'] + ) * self.semantic_loss_weight + else: + semantic_loss = self.semantic_loss( + results['semantic'], targets['semantic']) * self.semantic_loss_weight + self.loss_meter_dict['Semantic loss'].update(semantic_loss.detach().cpu().item(), batch_size) + loss += semantic_loss + if self.center_loss is not None: + # Pixel-wise loss weight + center_loss_weights = targets['center_weights'][:, None, :, :].expand_as(results['center']) + center_loss = self.center_loss(results['center'], targets['center']) * center_loss_weights + # safe division + if center_loss_weights.sum() > 0: + center_loss = center_loss.sum() / center_loss_weights.sum() * self.center_loss_weight + else: + center_loss = center_loss.sum() * 0 + self.loss_meter_dict['Center loss'].update(center_loss.detach().cpu().item(), batch_size) + loss += center_loss + if self.offset_loss is not None: + # Pixel-wise loss weight + offset_loss_weights = targets['offset_weights'][:, None, :, :].expand_as(results['offset']) + offset_loss = self.offset_loss(results['offset'], targets['offset']) * offset_loss_weights + # safe division + if offset_loss_weights.sum() > 0: + offset_loss = offset_loss.sum() / offset_loss_weights.sum() * self.offset_loss_weight + else: + offset_loss = offset_loss.sum() * 0 + self.loss_meter_dict['Offset loss'].update(offset_loss.detach().cpu().item(), batch_size) + loss += offset_loss + # In distributed DataParallel, this is the loss on one machine, need to average the loss again + # in train loop. + results['loss'] = loss + self.loss_meter_dict['Loss'].update(loss.detach().cpu().item(), batch_size) + return results diff --git a/CDARTS_segmentation/segmentation/model/post_processing/__init__.py b/CDARTS_segmentation/segmentation/model/post_processing/__init__.py new file mode 100644 index 0000000..821efbf --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/post_processing/__init__.py @@ -0,0 +1,3 @@ +from .semantic_post_processing import get_semantic_segmentation +from .instance_post_processing import get_panoptic_segmentation +from .evaluation_format import get_cityscapes_instance_format diff --git a/CDARTS_segmentation/segmentation/model/post_processing/evaluation_format.py b/CDARTS_segmentation/segmentation/model/post_processing/evaluation_format.py new file mode 100644 index 0000000..16e6211 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/post_processing/evaluation_format.py @@ -0,0 +1,60 @@ +# ------------------------------------------------------------------------------ +# Generates the correct format for official evaluation code. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import numpy as np + + +def get_cityscapes_instance_format(panoptic, sem, ctr_hmp, label_divisor, score_type="semantic"): + """ + Get Cityscapes instance segmentation format. + Arguments: + panoptic: A Numpy Ndarray of shape [H, W]. + sem: A Numpy Ndarray of shape [C, H, W] of raw semantic output. + ctr_hmp: A Numpy Ndarray of shape [H, W] of raw center heatmap output. + label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + score_type: A string, how to calculates confidence scores for instance segmentation. + - "semantic": average of semantic segmentation confidence within the instance mask. + - "instance": confidence of heatmap at center point of the instance mask. + - "both": multiply "semantic" and "instance". + Returns: + A List contains instance segmentation in Cityscapes format. + """ + instances = [] + + pan_labels = np.unique(panoptic) + for pan_lab in pan_labels: + if pan_lab % label_divisor == 0: + # This is either stuff or ignored region. + continue + + ins = OrderedDict() + + train_class_id = pan_lab // label_divisor + ins['pred_class'] = train_class_id + + mask = panoptic == pan_lab + ins['pred_mask'] = np.array(mask, dtype='uint8') + + sem_scores = sem[train_class_id, ...] + ins_score = np.mean(sem_scores[mask]) + # mask center point + mask_index = np.where(panoptic == pan_lab) + center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1]) + ctr_score = ctr_hmp[int(center_y), int(center_x)] + + if score_type == "semantic": + ins['score'] = ins_score + elif score_type == "instance": + ins['score'] = ctr_score + elif score_type == "both": + ins['score'] = ins_score * ctr_score + else: + raise ValueError("Unknown confidence score type: {}".format(score_type)) + + instances.append(ins) + + return instances diff --git a/CDARTS_segmentation/segmentation/model/post_processing/instance_post_processing.py b/CDARTS_segmentation/segmentation/model/post_processing/instance_post_processing.py new file mode 100644 index 0000000..cf02f60 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/post_processing/instance_post_processing.py @@ -0,0 +1,237 @@ +# ------------------------------------------------------------------------------ +# Post-processing to get instance and panoptic segmentation results. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import torch +import torch.nn.functional as F + +from .semantic_post_processing import get_semantic_segmentation + +__all__ = ['find_instance_center', 'get_instance_segmentation', 'get_panoptic_segmentation'] + + +def find_instance_center(ctr_hmp, threshold=0.1, nms_kernel=3, top_k=None): + """ + Find the center points from the center heatmap. + Arguments: + ctr_hmp: A Tensor of shape [N, 1, H, W] of raw center heatmap output, where N is the batch size, + for consistent, we only support N=1. + threshold: A Float, threshold applied to center heatmap score. + nms_kernel: An Integer, NMS max pooling kernel size. + top_k: An Integer, top k centers to keep. + Returns: + A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). + """ + if ctr_hmp.size(0) != 1: + raise ValueError('Only supports inference for batch size = 1') + + # thresholding, setting values below threshold to -1 + ctr_hmp = F.threshold(ctr_hmp, threshold, -1) + + # NMS + nms_padding = (nms_kernel - 1) // 2 + ctr_hmp_max_pooled = F.max_pool2d(ctr_hmp, kernel_size=nms_kernel, stride=1, padding=nms_padding) + ctr_hmp[ctr_hmp != ctr_hmp_max_pooled] = -1 + + # squeeze first two dimensions + ctr_hmp = ctr_hmp.squeeze() + assert len(ctr_hmp.size()) == 2, 'Something is wrong with center heatmap dimension.' + + # find non-zero elements + ctr_all = torch.nonzero(ctr_hmp > 0) + if top_k is None: + return ctr_all + elif ctr_all.size(0) < top_k: + return ctr_all + else: + # find top k centers. + top_k_scores, _ = torch.topk(torch.flatten(ctr_hmp), top_k) + return torch.nonzero(ctr_hmp > top_k_scores[-1]) + + +def group_pixels(ctr, offsets): + """ + Gives each pixel in the image an instance id. + Arguments: + ctr: A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). + offsets: A Tensor of shape [N, 2, H, W] of raw offset output, where N is the batch size, + for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). + Returns: + A Tensor of shape [1, H, W] (to be gathered by distributed data parallel). + """ + if offsets.size(0) != 1: + raise ValueError('Only supports inference for batch size = 1') + + offsets = offsets.squeeze(0) + height, width = offsets.size()[1:] + + # generates a coordinate map, where each location is the coordinate of that loc + y_coord = torch.arange(height, dtype=offsets.dtype, device=offsets.device).repeat(1, width, 1).transpose(1, 2) + x_coord = torch.arange(width, dtype=offsets.dtype, device=offsets.device).repeat(1, height, 1) + coord = torch.cat((y_coord, x_coord), dim=0) + + ctr_loc = coord + offsets + ctr_loc = ctr_loc.reshape((2, height * width)).transpose(1, 0) + + # ctr: [K, 2] -> [K, 1, 2] + # ctr_loc = [H*W, 2] -> [1, H*W, 2] + ctr = ctr.unsqueeze(1) + ctr_loc = ctr_loc.unsqueeze(0) + + # distance: [K, H*W] + distance = torch.norm(ctr - ctr_loc, dim=-1) + + # finds center with minimum distance at each location, offset by 1, to reserve id=0 for stuff + instance_id = torch.argmin(distance, dim=0).reshape((1, height, width)) + 1 + return instance_id + + +def get_instance_segmentation(sem_seg, ctr_hmp, offsets, thing_list, threshold=0.1, nms_kernel=3, top_k=None, + thing_seg=None): + """ + Post-processing for instance segmentation, gets class agnostic instance id map. + Arguments: + sem_seg: A Tensor of shape [1, H, W], predicted semantic label. + ctr_hmp: A Tensor of shape [N, 1, H, W] of raw center heatmap output, where N is the batch size, + for consistent, we only support N=1. + offsets: A Tensor of shape [N, 2, H, W] of raw offset output, where N is the batch size, + for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). + thing_list: A List of thing class id. + threshold: A Float, threshold applied to center heatmap score. + nms_kernel: An Integer, NMS max pooling kernel size. + top_k: An Integer, top k centers to keep. + thing_seg: A Tensor of shape [1, H, W], predicted foreground mask, if not provided, inference from + semantic prediction. + Returns: + A Tensor of shape [1, H, W] (to be gathered by distributed data parallel). + A Tensor of shape [1, K, 2] where K is the number of center points. The order of second dim is (y, x). + """ + if thing_seg is None: + # gets foreground segmentation + thing_seg = torch.zeros_like(sem_seg) + for thing_class in thing_list: + thing_seg[sem_seg == thing_class] = 1 + + ctr = find_instance_center(ctr_hmp, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k) + if ctr.size(0) == 0: + return torch.zeros_like(sem_seg), ctr.unsqueeze(0) + ins_seg = group_pixels(ctr, offsets) + return thing_seg * ins_seg, ctr.unsqueeze(0) + + +def merge_semantic_and_instance(sem_seg, ins_seg, label_divisor, thing_list, stuff_area, void_label): + """ + Post-processing for panoptic segmentation, by merging semantic segmentation label and class agnostic + instance segmentation label. + Arguments: + sem_seg: A Tensor of shape [1, H, W], predicted semantic label. + ins_seg: A Tensor of shape [1, H, W], predicted instance label. + label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + thing_list: A List of thing class id. + stuff_area: An Integer, remove stuff whose area is less tan stuff_area. + void_label: An Integer, indicates the region has no confident prediction. + Returns: + A Tensor of shape [1, H, W] (to be gathered by distributed data parallel). + Raises: + ValueError, if batch size is not 1. + """ + # In case thing mask does not align with semantic prediction + pan_seg = torch.zeros_like(sem_seg) + void_label + thing_seg = ins_seg > 0 + semantic_thing_seg = torch.zeros_like(sem_seg) + for thing_class in thing_list: + semantic_thing_seg[sem_seg == thing_class] = 1 + + # keep track of instance id for each class + class_id_tracker = {} + + # paste thing by majority voting + instance_ids = torch.unique(ins_seg) + for ins_id in instance_ids: + if ins_id == 0: + continue + # Make sure only do majority voting within semantic_thing_seg + thing_mask = (ins_seg == ins_id) & (semantic_thing_seg == 1) + if torch.nonzero(thing_mask).size(0) == 0: + continue + class_id, _ = torch.mode(sem_seg[thing_mask].view(-1, )) + if class_id.item() in class_id_tracker: + new_ins_id = class_id_tracker[class_id.item()] + else: + class_id_tracker[class_id.item()] = 1 + new_ins_id = 1 + class_id_tracker[class_id.item()] += 1 + pan_seg[thing_mask] = class_id * label_divisor + new_ins_id + + # paste stuff to unoccupied area + class_ids = torch.unique(sem_seg) + for class_id in class_ids: + if class_id.item() in thing_list: + # thing class + continue + # calculate stuff area + stuff_mask = (sem_seg == class_id) & (~thing_seg) + area = torch.nonzero(stuff_mask).size(0) + if area >= stuff_area: + pan_seg[stuff_mask] = class_id * label_divisor + + return pan_seg + + +def get_panoptic_segmentation(sem, ctr_hmp, offsets, thing_list, label_divisor, stuff_area, void_label, + threshold=0.1, nms_kernel=3, top_k=None, foreground_mask=None): + """ + Post-processing for panoptic segmentation. + Arguments: + sem: A Tensor of shape [N, C, H, W] of raw semantic output, where N is the batch size, for consistent, + we only support N=1. Or, a processed Tensor of shape [1, H, W]. + ctr_hmp: A Tensor of shape [N, 1, H, W] of raw center heatmap output, where N is the batch size, + for consistent, we only support N=1. + offsets: A Tensor of shape [N, 2, H, W] of raw offset output, where N is the batch size, + for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). + thing_list: A List of thing class id. + label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + stuff_area: An Integer, remove stuff whose area is less tan stuff_area. + void_label: An Integer, indicates the region has no confident prediction. + threshold: A Float, threshold applied to center heatmap score. + nms_kernel: An Integer, NMS max pooling kernel size. + top_k: An Integer, top k centers to keep. + foreground_mask: A Tensor of shape [N, 2, H, W] of raw foreground mask, where N is the batch size, + we only support N=1. Or, a processed Tensor of shape [1, H, W]. + Returns: + A Tensor of shape [1, H, W] (to be gathered by distributed data parallel), int64. + Raises: + ValueError, if batch size is not 1. + """ + if sem.dim() != 4 and sem.dim() != 3: + raise ValueError('Semantic prediction with un-supported dimension: {}.'.format(sem.dim())) + if sem.dim() == 4 and sem.size(0) != 1: + raise ValueError('Only supports inference for batch size = 1') + if ctr_hmp.size(0) != 1: + raise ValueError('Only supports inference for batch size = 1') + if offsets.size(0) != 1: + raise ValueError('Only supports inference for batch size = 1') + if foreground_mask is not None: + if foreground_mask.dim() != 4 and foreground_mask.dim() != 3: + raise ValueError('Foreground prediction with un-supported dimension: {}.'.format(sem.dim())) + + if sem.dim() == 4: + semantic = get_semantic_segmentation(sem) + else: + semantic = sem + + if foreground_mask is not None: + if foreground_mask.dim() == 4: + thing_seg = get_semantic_segmentation(foreground_mask) + else: + thing_seg = foreground_mask + else: + thing_seg = None + + instance, center = get_instance_segmentation(semantic, ctr_hmp, offsets, thing_list, + threshold=threshold, nms_kernel=nms_kernel, top_k=top_k, + thing_seg=thing_seg) + panoptic = merge_semantic_and_instance(semantic, instance, label_divisor, thing_list, stuff_area, void_label) + + return panoptic, center diff --git a/CDARTS_segmentation/segmentation/model/post_processing/semantic_post_processing.py b/CDARTS_segmentation/segmentation/model/post_processing/semantic_post_processing.py new file mode 100644 index 0000000..7c700f2 --- /dev/null +++ b/CDARTS_segmentation/segmentation/model/post_processing/semantic_post_processing.py @@ -0,0 +1,25 @@ +# ------------------------------------------------------------------------------ +# Post-processing to get semantic segmentation results. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import torch + +__all__ = ['get_semantic_segmentation'] + + +def get_semantic_segmentation(sem): + """ + Post-processing for semantic segmentation branch. + Arguments: + sem: A Tensor of shape [N, C, H, W], where N is the batch size, for consistent, we only + support N=1. + Returns: + A Tensor of shape [1, H, W] (to be gathered by distributed data parallel). + Raises: + ValueError, if batch size is not 1. + """ + if sem.size(0) != 1: + raise ValueError('Only supports inference for batch size = 1') + sem = sem.squeeze(0) + return torch.argmax(sem, dim=0, keepdim=True) diff --git a/CDARTS_segmentation/segmentation/solver/__init__.py b/CDARTS_segmentation/segmentation/solver/__init__.py new file mode 100644 index 0000000..27b5240 --- /dev/null +++ b/CDARTS_segmentation/segmentation/solver/__init__.py @@ -0,0 +1,3 @@ +from .build import build_optimizer, build_lr_scheduler +from .lr_scheduler import WarmupMultiStepLR, WarmupCosineLR, WarmupPolyLR +from .utils import get_lr_group_id diff --git a/CDARTS_segmentation/segmentation/solver/build.py b/CDARTS_segmentation/segmentation/solver/build.py new file mode 100644 index 0000000..5e80f5c --- /dev/null +++ b/CDARTS_segmentation/segmentation/solver/build.py @@ -0,0 +1,185 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/solver/build.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from enum import Enum +from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union +import torch + +from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR, WarmupPolyLR + +_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]] +_GradientClipper = Callable[[_GradientClipperInput], None] + + +class GradientClipType(Enum): + VALUE = "value" + NORM = "norm" + + +def _create_gradient_clipper(config): + """ + Creates gradient clipping closure to clip by value or by norm, + according to the provided config. + """ + cfg = config.clone() + + def clip_grad_norm(p: _GradientClipperInput): + torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE) + + def clip_grad_value(p: _GradientClipperInput): + torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE) + + _GRADIENT_CLIP_TYPE_TO_CLIPPER = { + GradientClipType.VALUE: clip_grad_value, + GradientClipType.NORM: clip_grad_norm, + } + return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)] + + +def _generate_optimizer_class_with_gradient_clipping(optimizer_type, gradient_clipper): + """ + Dynamically creates a new type that inherits the type of a given instance + and overrides the `step` method to add gradient clipping + """ + + def optimizer_wgc_step(self, closure=None): + for group in self.param_groups: + for p in group["params"]: + gradient_clipper(p) + super(type(self), self).step(closure) + + OptimizerWithGradientClip = type( + optimizer_type.__name__ + "WithGradientClip", + (optimizer_type,), + {"step": optimizer_wgc_step}, + ) + return OptimizerWithGradientClip + + +def maybe_add_gradient_clipping(config, optimizer): + """ + If gradient clipping is enabled through config options, wraps the existing + optimizer instance of some type OptimizerType to become an instance + of the new dynamically created class OptimizerTypeWithGradientClip + that inherits OptimizerType and overrides the `step` method to + include gradient clipping. + Args: + config: configuration options + optimizer: torch.optim.Optimizer + existing optimizer instance + Return: + optimizer: torch.optim.Optimizer + either the unmodified optimizer instance (if gradient clipping is + disabled), or the same instance with adjusted __class__ to override + the `step` method and include gradient clipping + """ + if not config.SOLVER.CLIP_GRADIENTS.ENABLED: + return optimizer + grad_clipper = _create_gradient_clipper(config.SOLVER.CLIP_GRADIENTS) + OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping( + type(optimizer), grad_clipper + ) + optimizer.__class__ = OptimizerWithGradientClip + return optimizer + + +def build_optimizer(config, model): + """Build an optimizer from config. + Args: + config: configuration file. + model: nn.Module, the model. + Returns: + A torch Optimizer. + Raises: + ValueError: optimizer type has unexpected value. + """ + norm_module_types = ( + torch.nn.BatchNorm1d, + torch.nn.BatchNorm2d, + torch.nn.BatchNorm3d, + torch.nn.SyncBatchNorm, + # NaiveSyncBatchNorm inherits from BatchNorm2d + torch.nn.GroupNorm, + torch.nn.InstanceNorm1d, + torch.nn.InstanceNorm2d, + torch.nn.InstanceNorm3d, + torch.nn.LayerNorm, + torch.nn.LocalResponseNorm, + ) + # A list of dict: List[Dict[str, Any]]. + params: List[Dict[str, Any]] = [] + memo: Set[torch.nn.parameter.Parameter] = set() + for module in model.modules(): + for key, value in module.named_parameters(recurse=False): + if not value.requires_grad: + continue + # Avoid duplicating parameters + if value in memo: + continue + memo.add(value) + lr = config.SOLVER.BASE_LR + weight_decay = config.SOLVER.WEIGHT_DECAY + if isinstance(module, norm_module_types): + weight_decay = config.SOLVER.WEIGHT_DECAY_NORM + elif key == "bias": + # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0 + # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer + # hyperparameters are by default exactly the same as for regular + # weights. + lr = config.SOLVER.BASE_LR * config.SOLVER.BIAS_LR_FACTOR + weight_decay = config.SOLVER.WEIGHT_DECAY_BIAS + params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] + + if config.SOLVER.OPTIMIZER == "sgd": + optimizer = torch.optim.SGD(params, config.SOLVER.BASE_LR, momentum=config.SOLVER.MOMENTUM) + elif config.SOLVER.OPTIMIZER == "adam": + optimizer = torch.optim.Adam(params, config.SOLVER.BASE_LR, betas=config.SOLVER.ADAM_BETAS, + eps=config.SOLVER.ADAM_EPS) + else: + raise ValueError('Unknown optimizer: {}'.format(config.SOLVER.OPTIMIZER)) + optimizer = maybe_add_gradient_clipping(config, optimizer) + return optimizer + + +def build_lr_scheduler(config, optimizer): + """Build a LR scheduler from config. + Args: + config: configuration file. + optimizer: torch optimizer. + Returns: + A torch LRScheduler. + Raises: + ValueError: LRScheduler type has unexpected value. + """ + name = config.SOLVER.LR_SCHEDULER_NAME + if name == "WarmupMultiStepLR": + return WarmupMultiStepLR( + optimizer, + config.SOLVER.STEPS, + config.SOLVER.GAMMA, + warmup_factor=config.SOLVER.WARMUP_FACTOR, + warmup_iters=config.SOLVER.WARMUP_ITERS, + warmup_method=config.SOLVER.WARMUP_METHOD, + ) + elif name == "WarmupCosineLR": + return WarmupCosineLR( + optimizer, + config.TRAIN.MAX_ITER, + warmup_factor=config.SOLVER.WARMUP_FACTOR, + warmup_iters=config.SOLVER.WARMUP_ITERS, + warmup_method=config.SOLVER.WARMUP_METHOD, + ) + elif name == "WarmupPolyLR": + return WarmupPolyLR( + optimizer, + config.TRAIN.MAX_ITER, + warmup_factor=config.SOLVER.WARMUP_FACTOR, + warmup_iters=config.SOLVER.WARMUP_ITERS, + warmup_method=config.SOLVER.WARMUP_METHOD, + power=config.SOLVER.POLY_LR_POWER, + constant_ending=config.SOLVER.POLY_LR_CONSTANT_ENDING, + ) + else: + raise ValueError("Unknown LR scheduler: {}".format(name)) diff --git a/CDARTS_segmentation/segmentation/solver/lr_scheduler.py b/CDARTS_segmentation/segmentation/solver/lr_scheduler.py new file mode 100644 index 0000000..27333bc --- /dev/null +++ b/CDARTS_segmentation/segmentation/solver/lr_scheduler.py @@ -0,0 +1,162 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/solver/lr_scheduler.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import math +from bisect import bisect_right +from typing import List +import torch + +# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes +# only on epoch boundaries. We typically use iteration based schedules instead. +# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean +# "iteration" instead. + +# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating +# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. + + +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + milestones: List[int], + gamma: float = 0.1, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones + ) + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + return [ + base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + # Different definitions of half-cosine with warmup are possible. For + # simplicity we multiply the standard half-cosine schedule by the warmup + # factor. An alternative is to start the period of the cosine at warmup_iters + # instead of at 0. In the case that warmup_iters << max_iters the two are + # very close to each other. + return [ + base_lr + * warmup_factor + * 0.5 + * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + power: float = 0.9, + constant_ending: float = 0. + ): + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + self.power = power + self.constant_ending = constant_ending + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + if self.constant_ending > 0 and warmup_factor == 1.: + # Constant ending lr. + if math.pow((1.0 - self.last_epoch / self.max_iters), self.power) < self.constant_ending: + return [ + base_lr + * self.constant_ending + for base_lr in self.base_lrs + ] + return [ + base_lr + * warmup_factor + * math.pow((1.0 - self.last_epoch / self.max_iters), self.power) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +def _get_warmup_factor_at_iter( + method: str, iter: int, warmup_iters: int, warmup_factor: float +) -> float: + """ + Return the learning rate warmup factor at a specific iteration. + See https://arxiv.org/abs/1706.02677 for more details. + Args: + method (str): warmup method; either "constant" or "linear". + iter (int): iteration at which to calculate the warmup factor. + warmup_iters (int): the number of warmup iterations. + warmup_factor (float): the base warmup factor (the meaning changes according + to the method used). + Returns: + float: the effective warmup factor at the given iteration. + """ + if iter >= warmup_iters: + return 1.0 + + if method == "constant": + return warmup_factor + elif method == "linear": + alpha = iter / warmup_iters + return warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup method: {}".format(method)) \ No newline at end of file diff --git a/CDARTS_segmentation/segmentation/solver/utils.py b/CDARTS_segmentation/segmentation/solver/utils.py new file mode 100644 index 0000000..cfe61b3 --- /dev/null +++ b/CDARTS_segmentation/segmentation/solver/utils.py @@ -0,0 +1,29 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/hooks.py#L195 +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +from collections import Counter + + +def get_lr_group_id(optimizer): + """ + Returns the group id with majority of lr. + """ + # Get the correct parameter group id to access to lr info. + largest_group = max(len(g["params"]) for g in optimizer.param_groups) + if largest_group == 1: + # If all groups have one parameter, + # then find the most common initial LR, and use it for summary + lr_count = Counter([g["lr"] for g in optimizer.param_groups]) + lr = lr_count.most_common()[0][0] + for i, g in enumerate(optimizer.param_groups): + if g["lr"] == lr: + best_param_group_id = i + break + else: + for i, g in enumerate(optimizer.param_groups): + if len(g["params"]) == largest_group: + best_param_group_id = i + break + return best_param_group_id diff --git a/CDARTS_segmentation/segmentation/utils/__init__.py b/CDARTS_segmentation/segmentation/utils/__init__.py new file mode 100644 index 0000000..c2f22fd --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/__init__.py @@ -0,0 +1,6 @@ +from .save_annotation import ( + save_annotation, save_instance_annotation, save_panoptic_annotation, save_center_image, save_heatmap_image, + save_heatmap_and_center_image, save_offset_image) +from .flow_vis import flow_compute_color +from .utils import AverageMeter +from .debug import save_debug_images diff --git a/CDARTS_segmentation/segmentation/utils/comm.py b/CDARTS_segmentation/segmentation/utils/comm.py new file mode 100644 index 0000000..fad4cfd --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/comm.py @@ -0,0 +1,257 @@ +# ------------------------------------------------------------------------------ +# This file contains primitives for multi-gpu communication. +# This is useful when doing distributed training. +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/comm.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import functools +import logging +import numpy as np +import pickle +import torch +import torch.distributed as dist + +_LOCAL_PROCESS_GROUP = None +""" +A torch process group which only includes processes that on the same machine as the current process. +This variable is set when processes are spawned by `launch()` in "engine/launch.py". +""" + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank() -> int: + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert _LOCAL_PROCESS_GROUP is not None + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def _serialize_to_tensor(data, group): + backend = dist.get_backend(group) + assert backend in ["gloo", "nccl"] + device = torch.device("cpu" if backend == "gloo" else "cuda") + + buffer = pickle.dumps(data) + if len(buffer) > 1024 ** 3: + logger = logging.getLogger(__name__) + logger.warning( + "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( + get_rank(), len(buffer) / (1024 ** 3), device + ) + ) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to(device=device) + return tensor + + +def _pad_to_largest_tensor(tensor, group): + """ + Returns: + list[int]: size of the tensor, on each rank + Tensor: padded tensor that has the max size + """ + world_size = dist.get_world_size(group=group) + assert ( + world_size >= 1 + ), "comm.gather/all_gather must be called from ranks within the given group!" + local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) + size_list = [ + torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size) + ] + dist.all_gather(size_list, local_size, group=group) + size_list = [int(size.item()) for size in size_list] + + max_size = max(size_list) + + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + if local_size != max_size: + padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device) + tensor = torch.cat((tensor, padding), dim=0) + return size_list, tensor + + +def all_gather(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group) == 1: + return [data] + + tensor = _serialize_to_tensor(data, group) + + size_list, tensor = _pad_to_largest_tensor(tensor, group) + max_size = max(size_list) + + # receiving Tensor from all ranks + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.all_gather(tensor_list, tensor, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def gather(data, dst=0, group=None): + """ + Run gather on arbitrary picklable data (not necessarily tensors). + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group=group) == 1: + return [data] + rank = dist.get_rank(group=group) + + tensor = _serialize_to_tensor(data, group) + size_list, tensor = _pad_to_largest_tensor(tensor, group) + + # receiving Tensor from all ranks + if rank == dst: + max_size = max(size_list) + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.gather(tensor, tensor_list, dst=dst, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + return data_list + else: + dist.gather(tensor, [], dst=dst, group=group) + return [] + + +def shared_random_seed(): + """ + Returns: + int: a random number that is the same across all workers. + If workers need a shared RNG, they can use this shared seed to + create one. + All workers must call this function, otherwise it will deadlock. + """ + ints = np.random.randint(2 ** 31) + all_ints = all_gather(ints) + return all_ints[0] + + +def reduce_dict(input_dict, average=True): + """ + Reduce the values in the dictionary from all processes so that process with rank + 0 has the reduced results. + Args: + input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. + average (bool): whether to do average or sum + Returns: + a dict with the same keys as input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/CDARTS_segmentation/segmentation/utils/debug.py b/CDARTS_segmentation/segmentation/utils/debug.py new file mode 100644 index 0000000..48d9f89 --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/debug.py @@ -0,0 +1,196 @@ +# ------------------------------------------------------------------------------ +# Saves raw outputs and targets. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import os + +import numpy as np +import PIL.Image as img + +import torch + +from .save_annotation import label_to_color_image +from .flow_vis import flow_compute_color + + +def save_debug_images(dataset, batch_images, batch_targets, batch_outputs, out_dir=None, iteration=0, + target_keys=('semantic', 'center', 'offset', 'center_weights', 'offset_weights'), + output_keys=('semantic', 'center', 'offset'), + iteration_to_remove=-1, is_train=True): + """Saves a mini-batch of images for debugging purpose. + - image: the augmented input image + - label: the augmented labels including + - semantic: semantic segmentation label + - center: center heatmap + - offset: offset field + - instance_ignore_mask: ignore mask + - prediction: the raw output of the model (without post-processing) + - semantic: semantic segmentation label + - center: center heatmap + - offset: offset field + Args: + dataset: The Dataset. + batch_images: Tensor of shape [N, 3, H, W], a batch of input images. + batch_targets: Dict, a dict containing batch of targets. + - semantic: a Tensor of shape [N, H, W] + - center: a Tensor of shape [N, 1, H, W] + - offset: a Tensor of shape [N, 2, H, W] + - semantic_weights: a Tensor of shape [N, H, W] + - center_weights: a Tensor of shape [N, H, W] + - offset_weights: a Tensor of shape [N, H, W] + batch_outputs: Dict, a dict containing batch of outputs. + - semantic: a Tensor of shape [N, H, W] + - center: a Tensor of shape [N, 1, H, W] + - offset: a Tensor of shape [N, 2, H, W] + out_dir: String, the directory to which the results will be saved. + iteration: Integer, iteration number. + target_keys: List, target keys to save. + output_keys: List, output keys to save. + iteration_to_remove: Integer, iteration number to remove. + is_train: Boolean, save train or test debugging image. + """ + batch_size = batch_images.size(0) + map_height = batch_images.size(2) + map_width = batch_images.size(3) + + grid_image = np.zeros( + (map_height, batch_size * map_width, 3), dtype=np.uint8 + ) + + num_targets = len(target_keys) + grid_target = np.zeros( + (num_targets * map_height, batch_size * map_width, 3), dtype=np.uint8 + ) + + num_outputs = len(output_keys) + grid_output = np.zeros( + (num_outputs * map_height, batch_size * map_width, 3), dtype=np.uint8 + ) + + semantic_pred = torch.argmax(batch_outputs['semantic'].detach(), dim=1) + if 'foreground' in batch_outputs: + foreground_pred = torch.argmax(batch_outputs['foreground'].detach(), dim=1) + else: + foreground_pred = None + + for i in range(batch_size): + width_begin = map_width * i + width_end = map_width * (i + 1) + + # save images + image = dataset.reverse_transform(batch_images[i]) + grid_image[:, width_begin:width_end, :] = image + + if 'semantic' in target_keys: + # save gt semantic + gt_sem = batch_targets['semantic'][i].cpu().numpy() + gt_sem = label_to_color_image(gt_sem, dataset.create_label_colormap()) + grid_target[:map_height, width_begin:width_end, :] = gt_sem + + if 'center' in target_keys: + # save gt center + gt_ctr = batch_targets['center'][i].squeeze().cpu().numpy() + gt_ctr = gt_ctr[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3)) + gt_ctr = gt_ctr.clip(0, 255) + # gt_ctr = 0.7 * gt_ctr + (1 - 0.3) * image + grid_target[map_height:2 * map_height, width_begin:width_end, :] = gt_ctr + + if 'offset' in target_keys: + # save gt offset + gt_off = batch_targets['offset'][i].permute(1, 2, 0).cpu().numpy() + gt_off = flow_compute_color(gt_off[:, :, 1], gt_off[:, :, 0]) + grid_target[2 * map_height:3 * map_height, width_begin:width_end, :] = gt_off + + if 'semantic_weights' in target_keys: + # save ignore mask + gt_ign = batch_targets['semantic_weights'][i].cpu().numpy() + gt_ign = gt_ign[:, :, None] / np.max(gt_ign) * 255 + gt_ign = np.tile(gt_ign, (1, 1, 3)) + grid_target[3 * map_height:4 * map_height, width_begin:width_end, :] = gt_ign + + if 'center_weights' in target_keys: + # save ignore mask + gt_ign = batch_targets['center_weights'][i].cpu().numpy() + gt_ign = gt_ign[:, :, None] * 255 + gt_ign = np.tile(gt_ign, (1, 1, 3)) + grid_target[4 * map_height:5 * map_height, width_begin:width_end, :] = gt_ign + + if 'offset_weights' in target_keys: + # save ignore mask + gt_ign = batch_targets['offset_weights'][i].cpu().numpy() + gt_ign = gt_ign[:, :, None] * 255 + gt_ign = np.tile(gt_ign, (1, 1, 3)) + grid_target[5 * map_height:6 * map_height, width_begin:width_end, :] = gt_ign + + if 'foreground' in target_keys: + # save gt foreground + gt_fg = batch_targets['foreground'][i].cpu().numpy() + gt_fg = gt_fg[:, :, None] * 255 + grid_target[6 * map_height:7 * map_height, width_begin:width_end, :] = gt_fg + + if 'semantic' in output_keys: + # save pred semantic + pred_sem = semantic_pred[i].cpu().numpy() + pred_sem = label_to_color_image(pred_sem, dataset.create_label_colormap()) + grid_output[:map_height, width_begin:width_end, :] = pred_sem + + if 'center' in output_keys: + # save pred center + pred_ctr = batch_outputs['center'][i].detach().squeeze().cpu().numpy() + pred_ctr = pred_ctr[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3)) + pred_ctr = pred_ctr.clip(0, 255) + # pred_ctr = 0.7 * pred_ctr + (1 - 0.3) * image + grid_output[map_height:2 * map_height, width_begin:width_end, :] = pred_ctr + + if 'offset' in output_keys: + # save pred offset + pred_ctr = batch_outputs['offset'][i].detach().permute(1, 2, 0).cpu().numpy() + pred_ctr = flow_compute_color(pred_ctr[:, :, 1], pred_ctr[:, :, 0]) + grid_output[2 * map_height:3 * map_height, width_begin:width_end, :] = pred_ctr + + if 'foreground' in output_keys: + # save pred foreground + if foreground_pred is not None: + pred_fg = foreground_pred[i].cpu().numpy() + pred_fg = pred_fg[:, :, None] * 255 + grid_output[3 * map_height:4 * map_height, width_begin:width_end, :] = pred_fg + + if out_dir is not None: + if is_train: + pil_image = img.fromarray(grid_image.astype(dtype=np.uint8)) + with open('%s/%s_%d.png' % (out_dir, 'debug_batch_images', iteration), mode='wb') as f: + pil_image.save(f, 'PNG') + pil_image = img.fromarray(grid_target.astype(dtype=np.uint8)) + with open('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', iteration), mode='wb') as f: + pil_image.save(f, 'PNG') + pil_image = img.fromarray(grid_output.astype(dtype=np.uint8)) + with open('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', iteration), mode='wb') as f: + pil_image.save(f, 'PNG') + else: + pil_image = img.fromarray(grid_image.astype(dtype=np.uint8)) + with open('%s/%s_%d.png' % (out_dir, 'debug_test_images', iteration), mode='wb') as f: + pil_image.save(f, 'PNG') + if grid_target.size: + pil_image = img.fromarray(grid_target.astype(dtype=np.uint8)) + with open('%s/%s_%d.png' % (out_dir, 'debug_test_targets', iteration), mode='wb') as f: + pil_image.save(f, 'PNG') + pil_image = img.fromarray(grid_output.astype(dtype=np.uint8)) + with open('%s/%s_%d.png' % (out_dir, 'debug_test_outputs', iteration), mode='wb') as f: + pil_image.save(f, 'PNG') + + if is_train: + if iteration_to_remove >= 0: + if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_images', iteration_to_remove)): + os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_images', iteration_to_remove)) + if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', iteration_to_remove)): + os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', iteration_to_remove)) + if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', iteration_to_remove)): + os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', iteration_to_remove)) + # 0 is a special iter + if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_images', 0)): + os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_images', 0)) + if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', 0)): + os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_targets', 0)) + if os.path.exists('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', 0)): + os.remove('%s/%s_%d.png' % (out_dir, 'debug_batch_outputs', 0)) diff --git a/CDARTS_segmentation/segmentation/utils/env.py b/CDARTS_segmentation/segmentation/utils/env.py new file mode 100644 index 0000000..b724dcf --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/env.py @@ -0,0 +1,35 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/env.py#L15 +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import importlib +import importlib.util +import logging +import numpy as np +import os +import random +import sys +from datetime import datetime +import torch + +__all__ = ["seed_all_rng"] + + +def seed_all_rng(seed=None): + """ + Set the random seed for the RNG in torch, numpy and python. + Args: + seed (int): if None, will use a strong random seed. + """ + if seed is None: + seed = ( + os.getpid() + + int(datetime.now().strftime("%S%f")) + + int.from_bytes(os.urandom(2), "big") + ) + logger = logging.getLogger(__name__) + logger.info("Using a generated random seed {}".format(seed)) + np.random.seed(seed) + torch.set_rng_state(torch.manual_seed(seed).get_state()) + random.seed(seed) diff --git a/CDARTS_segmentation/segmentation/utils/flow_vis.py b/CDARTS_segmentation/segmentation/utils/flow_vis.py new file mode 100644 index 0000000..6f4305d --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/flow_vis.py @@ -0,0 +1,134 @@ +# MIT License +# +# Copyright (c) 2018 Tom Runia +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to conditions. +# +# Author: Tom Runia +# Date Created: 2018-08-03 +# Reference: https://github.com/tomrunia/OpticalFlow_Visualization + +import numpy as np + + +def make_colorwheel(): + ''' + Generates a color wheel for optical flow visualization as presented in: + Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) + URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf + According to the C++ source code of Daniel Scharstein + According to the Matlab source code of Deqing Sun + ''' + + RY = 15 + YG = 6 + GC = 4 + CB = 11 + BM = 13 + MR = 6 + + ncols = RY + YG + GC + CB + BM + MR + colorwheel = np.zeros((ncols, 3)) + col = 0 + + # RY + colorwheel[0:RY, 0] = 255 + colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY) + col = col+RY + # YG + colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG) + colorwheel[col:col+YG, 1] = 255 + col = col+YG + # GC + colorwheel[col:col+GC, 1] = 255 + colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC) + col = col+GC + # CB + colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) + colorwheel[col:col+CB, 2] = 255 + col = col+CB + # BM + colorwheel[col:col+BM, 2] = 255 + colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM) + col = col+BM + # MR + colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) + colorwheel[col:col+MR, 0] = 255 + return colorwheel + + +def flow_compute_color(u, v, convert_to_bgr=False): + ''' + Applies the flow color wheel to (possibly clipped) flow components u and v. + According to the C++ source code of Daniel Scharstein + According to the Matlab source code of Deqing Sun + :param u: np.ndarray, input horizontal flow + :param v: np.ndarray, input vertical flow + :param convert_to_bgr: bool, whether to change ordering and output BGR instead of RGB + :return: + ''' + + flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) + + colorwheel = make_colorwheel() # shape [55x3] + ncols = colorwheel.shape[0] + + rad = np.sqrt(np.square(u) + np.square(v)) + a = np.arctan2(-v, -u)/np.pi + + fk = (a+1) / 2*(ncols-1) + k0 = np.floor(fk).astype(np.int32) + k1 = k0 + 1 + k1[k1 == ncols] = 0 + f = fk - k0 + + for i in range(colorwheel.shape[1]): + + tmp = colorwheel[:,i] + col0 = tmp[k0] / 255.0 + col1 = tmp[k1] / 255.0 + col = (1-f)*col0 + f*col1 + + idx = (rad <= 1) + col[idx] = 1 - rad[idx] * (1-col[idx]) + col[~idx] = col[~idx] * 0.75 # out of range? + + # Note the 2-i => BGR instead of RGB + ch_idx = 2-i if convert_to_bgr else i + flow_image[:,:,ch_idx] = np.floor(255 * col) + + return flow_image + + +def flow_to_color(flow_uv, clip_flow=None, convert_to_bgr=False): + ''' + Expects a two dimensional flow image of shape [H,W,2] + According to the C++ source code of Daniel Scharstein + According to the Matlab source code of Deqing Sun + :param flow_uv: np.ndarray of shape [H,W,2] + :param clip_flow: float, maximum clipping value for flow + :return: + ''' + + assert flow_uv.ndim == 3, 'input flow must have three dimensions' + assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' + + if clip_flow is not None: + flow_uv = np.clip(flow_uv, 0, clip_flow) + + u = flow_uv[:,:,0] + v = flow_uv[:,:,1] + + rad = np.sqrt(np.square(u) + np.square(v)) + rad_max = np.max(rad) + + epsilon = 1e-5 + u = u / (rad_max + epsilon) + v = v / (rad_max + epsilon) + + return flow_compute_color(u, v, convert_to_bgr) diff --git a/CDARTS_segmentation/segmentation/utils/logger.py b/CDARTS_segmentation/segmentation/utils/logger.py new file mode 100644 index 0000000..9d3f021 --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/logger.py @@ -0,0 +1,219 @@ +# ------------------------------------------------------------------------------ +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/logger.py +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import functools +import logging +import os +import sys +import time +from collections import Counter +from fvcore.common.file_io import PathManager +from tabulate import tabulate +from termcolor import colored + + +class _ColorfulFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + self._root_name = kwargs.pop("root_name") + "." + self._abbrev_name = kwargs.pop("abbrev_name", "") + if len(self._abbrev_name): + self._abbrev_name = self._abbrev_name + "." + super(_ColorfulFormatter, self).__init__(*args, **kwargs) + + def formatMessage(self, record): + record.name = record.name.replace(self._root_name, self._abbrev_name) + log = super(_ColorfulFormatter, self).formatMessage(record) + if record.levelno == logging.WARNING: + prefix = colored("WARNING", "red", attrs=["blink"]) + elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: + prefix = colored("ERROR", "red", attrs=["blink", "underline"]) + else: + return log + return prefix + " " + log + + +@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers +def setup_logger( + output=None, distributed_rank=0, *, color=True, name="segmentation", abbrev_name=None +): + """ + Initialize the segmentation logger and set its verbosity level to "INFO". + Args: + output (str): a file name or a directory to save log. If None, will not save log file. + If ends with ".txt" or ".log", assumed to be a file name. + Otherwise, logs will be saved to `output/log.txt`. + name (str): the root module name of this logger + abbrev_name (str): an abbreviation of the module, to avoid long names in logs. + Set to "" to not log the root module in logs. + By default, will abbreviate "segmentation" to "seg" and leave other + modules unchanged. + Returns: + logging.Logger: a logger + """ + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = False + + if abbrev_name is None: + abbrev_name = "seg" if name == "segmentation" else name + + plain_formatter = logging.Formatter( + "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" + ) + # stdout logging: master only + if distributed_rank == 0: + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + if color: + formatter = _ColorfulFormatter( + colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", + datefmt="%m/%d %H:%M:%S", + root_name=name, + abbrev_name=str(abbrev_name), + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + logger.addHandler(ch) + + # file logging: all workers + if output is not None: + if output.endswith(".txt") or output.endswith(".log"): + filename = output + else: + filename = os.path.join(output, "log.txt") + if distributed_rank > 0: + filename = filename + ".rank{}".format(distributed_rank) + # os.makedirs(os.path.dirname(filename)) + PathManager.mkdirs(os.path.dirname(filename)) + + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + + return logger + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + return open(filename, "a") + + +""" +Below are some other convenient logging methods. +They are mainly adopted from +https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py +""" + + +def _find_caller(): + """ + Returns: + str: module name of the caller + tuple: a hashable key to be used to identify different callers + """ + frame = sys._getframe(2) + while frame: + code = frame.f_code + if os.path.join("utils", "logger.") not in code.co_filename: + mod_name = frame.f_globals["__name__"] + if mod_name == "__main__": + mod_name = "segmentation" + return mod_name, (code.co_filename, frame.f_lineno, code.co_name) + frame = frame.f_back + + +_LOG_COUNTER = Counter() +_LOG_TIMER = {} + + +def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): + """ + Log only for the first n times. + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + key (str or tuple[str]): the string(s) can be one of "caller" or + "message", which defines how to identify duplicated logs. + For example, if called with `n=1, key="caller"`, this function + will only log the first call from the same caller, regardless of + the message content. + If called with `n=1, key="message"`, this function will log the + same content only once, even if they are called from different places. + If called with `n=1, key=("caller", "message")`, this function + will not log only if the same caller has logged the same message before. + """ + if isinstance(key, str): + key = (key,) + assert len(key) > 0 + + caller_module, caller_key = _find_caller() + hash_key = () + if "caller" in key: + hash_key = hash_key + caller_key + if "message" in key: + hash_key = hash_key + (msg,) + + _LOG_COUNTER[hash_key] += 1 + if _LOG_COUNTER[hash_key] <= n: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n(lvl, msg, n=1, *, name=None): + """ + Log once per n times. + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + _LOG_COUNTER[key] += 1 + if n == 1 or _LOG_COUNTER[key] % n == 1: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n_seconds(lvl, msg, n=1, *, name=None): + """ + Log no more than once per n seconds. + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + last_logged = _LOG_TIMER.get(key, None) + current_time = time.time() + if last_logged is None or current_time - last_logged >= n: + logging.getLogger(name or caller_module).log(lvl, msg) + _LOG_TIMER[key] = current_time + + +def create_small_table(small_dict): + """ + Create a small table using the keys of small_dict as headers. This is only + suitable for small dictionaries. + Args: + small_dict (dict): a result dictionary of only a few items. + Returns: + str: the table as a string. + """ + keys, values = tuple(zip(*small_dict.items())) + table = tabulate( + [values], + headers=keys, + tablefmt="pipe", + floatfmt=".3f", + stralign="center", + numalign="center", + ) + return table \ No newline at end of file diff --git a/CDARTS_segmentation/segmentation/utils/save_annotation.py b/CDARTS_segmentation/segmentation/utils/save_annotation.py new file mode 100644 index 0000000..9f98234 --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/save_annotation.py @@ -0,0 +1,360 @@ +# ------------------------------------------------------------------------------ +# Saves output to png image for visualization. +# Reference: https://github.com/tensorflow/models/blob/master/research/deeplab/utils/save_annotation.py +# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import numpy as np +import PIL.Image as img +from PIL import ImageDraw + +from .flow_vis import flow_compute_color + +# Refence: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L14 +_COLORS = np.array( + [ + 0.000, 0.447, 0.741, + 0.850, 0.325, 0.098, + 0.929, 0.694, 0.125, + 0.494, 0.184, 0.556, + 0.466, 0.674, 0.188, + 0.301, 0.745, 0.933, + 0.635, 0.078, 0.184, + 0.300, 0.300, 0.300, + 0.600, 0.600, 0.600, + 1.000, 0.000, 0.000, + 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 1.000, + 0.667, 0.000, 1.000, + 0.333, 0.333, 0.000, + 0.333, 0.667, 0.000, + 0.333, 1.000, 0.000, + 0.667, 0.333, 0.000, + 0.667, 0.667, 0.000, + 0.667, 1.000, 0.000, + 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, + 1.000, 1.000, 0.000, + 0.000, 0.333, 0.500, + 0.000, 0.667, 0.500, + 0.000, 1.000, 0.500, + 0.333, 0.000, 0.500, + 0.333, 0.333, 0.500, + 0.333, 0.667, 0.500, + 0.333, 1.000, 0.500, + 0.667, 0.000, 0.500, + 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, + 0.667, 1.000, 0.500, + 1.000, 0.000, 0.500, + 1.000, 0.333, 0.500, + 1.000, 0.667, 0.500, + 1.000, 1.000, 0.500, + 0.000, 0.333, 1.000, + 0.000, 0.667, 1.000, + 0.000, 1.000, 1.000, + 0.333, 0.000, 1.000, + 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, + 0.333, 1.000, 1.000, + 0.667, 0.000, 1.000, + 0.667, 0.333, 1.000, + 0.667, 0.667, 1.000, + 0.667, 1.000, 1.000, + 1.000, 0.000, 1.000, + 1.000, 0.333, 1.000, + 1.000, 0.667, 1.000, + 0.333, 0.000, 0.000, + 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, + 0.000, 0.167, 0.000, + 0.000, 0.333, 0.000, + 0.000, 0.500, 0.000, + 0.000, 0.667, 0.000, + 0.000, 0.833, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, + 0.000, 0.000, 0.667, + 0.000, 0.000, 0.833, + 0.000, 0.000, 1.000, + 0.000, 0.000, 0.000, + 0.143, 0.143, 0.143, + 0.857, 0.857, 0.857, + 1.000, 1.000, 1.000 + ] +).astype(np.float32).reshape(-1, 3) + + +def random_color(rgb=False, maximum=255): + """ + Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L111 + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +def save_annotation(label, + save_dir, + filename, + add_colormap=True, + normalize_to_unit_values=False, + scale_values=False, + colormap=None, + image=None): + """Saves the given label to image on disk. + Args: + label: The numpy array to be saved. The data will be converted + to uint8 and saved as png image. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + add_colormap: Boolean, add color map to the label or not. + normalize_to_unit_values: Boolean, normalize the input values to [0, 1]. + scale_values: Boolean, scale the input values to [0, 255] for visualization. + colormap: A colormap for visualizing segmentation results. + image: merge label with image if provided + """ + # Add colormap for visualizing the prediction. + if add_colormap: + colored_label = label_to_color_image(label, colormap) + else: + colored_label = label + if normalize_to_unit_values: + min_value = np.amin(colored_label) + max_value = np.amax(colored_label) + range_value = max_value - min_value + if range_value != 0: + colored_label = (colored_label - min_value) / range_value + + if scale_values: + colored_label = 255. * colored_label + + if image is not None: + colored_label = 0.5 * colored_label + 0.5 * image + + pil_image = img.fromarray(colored_label.astype(dtype=np.uint8)) + with open('%s/%s.png' % (save_dir, filename), mode='wb') as f: + pil_image.save(f, 'PNG') + + +def label_to_color_image(label, colormap=None): + """Adds color defined by the dataset colormap to the label. + Args: + label: A 2D array with integer type, storing the segmentation label. + colormap: A colormap for visualizing segmentation results. + Returns: + result: A 2D array with floating type. The element of the array + is the color indexed by the corresponding element in the input label + to the dataset color map. + Raises: + ValueError: If label is not of rank 2 or its value is larger than color + map maximum entry. + """ + if label.ndim != 2: + raise ValueError('Expect 2-D input label. Got {}'.format(label.shape)) + + if colormap is None: + raise ValueError('Expect a valid colormap.') + + return colormap[label] + + +def save_instance_annotation(label, + save_dir, + filename, + stuff_id=0, + image=None): + """Saves the given label to image on disk. + Args: + label: The numpy array to be saved. The data will be converted + to uint8 and saved as png image. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + stuff_id: Integer, id that not want to plot. + image: merge label with image if provided + """ + # Add colormap for visualizing the prediction. + ids = np.unique(label) + num_colors = len(ids) + colormap = np.zeros((num_colors, 3), dtype=np.uint8) + # Maps label to continuous value. + for i in range(num_colors): + label[label == ids[i]] = i + colormap[i, :] = random_color(rgb=True, maximum=255) + if ids[i] == stuff_id: + colormap[i, :] = np.array([0, 0, 0]) + colored_label = colormap[label] + + if image is not None: + colored_label = 0.5 * colored_label + 0.5 * image + + pil_image = img.fromarray(colored_label.astype(dtype=np.uint8)) + with open('%s/%s.png' % (save_dir, filename), mode='wb') as f: + pil_image.save(f, 'PNG') + + +def save_panoptic_annotation(label, + save_dir, + filename, + label_divisor, + colormap=None, + image=None): + """Saves the given label to image on disk. + Args: + label: The numpy array to be saved. The data will be converted + to uint8 and saved as png image. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + colormap: A colormap for visualizing segmentation results. + image: merge label with image if provided + """ + if colormap is None: + raise ValueError('Expect a valid colormap.') + + # Add colormap to label. + colored_label = np.zeros((label.shape[0], label.shape[1], 3), dtype=np.uint8) + taken_colors = set([0, 0, 0]) + + def _random_color(base, max_dist=30): + new_color = base + np.random.randint(low=-max_dist, + high=max_dist + 1, + size=3) + return tuple(np.maximum(0, np.minimum(255, new_color))) + + for lab in np.unique(label): + mask = label == lab + base_color = colormap[lab // label_divisor] + if tuple(base_color) not in taken_colors: + taken_colors.add(tuple(base_color)) + color = base_color + else: + while True: + color = _random_color(base_color) + if color not in taken_colors: + taken_colors.add(color) + break + colored_label[mask] = color + + if image is not None: + colored_label = 0.5 * colored_label + 0.5 * image + + pil_image = img.fromarray(colored_label.astype(dtype=np.uint8)) + with open('%s/%s.png' % (save_dir, filename), mode='wb') as f: + pil_image.save(f, 'PNG') + + +def save_center_image(image, + center_points, + save_dir, + filename, + radius=3): + """Saves image with center points. + Args: + image: The image. + center_points: List of tuple [(y, x)], center point coordinates. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + radius: Int, radius of the center point. + """ + pil_image = img.fromarray(image.astype(dtype=np.uint8)) + draw = ImageDraw.Draw(pil_image) + r = radius + assigned_colors = [list(random_color(rgb=True, maximum=255)) + [255] for _ in range(len(center_points))] + for i, point in enumerate(center_points): + leftUpPoint = (point[1] - r, point[0] - r) + rightDownPoint = (point[1] + r, point[0] + r) + twoPointList = [leftUpPoint, rightDownPoint] + draw.ellipse(twoPointList, fill=tuple(assigned_colors[i])) + with open('%s/%s.png' % (save_dir, filename), mode='wb') as f: + pil_image.save(f, 'PNG') + + +def save_heatmap_image(image, + center_heatmap, + save_dir, + filename, + ratio=0.5): + """Saves image with heatmap. + Args: + image: The image. + center_heatmap: Ndarray, center heatmap. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + radio: Float, ratio to mix heatmap and image, out = ratio * heatmap + (1 - ratio) * image. + """ + center_heatmap = center_heatmap[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3)) + center_heatmap = center_heatmap.clip(0, 255) + image = ratio * center_heatmap + (1 - ratio) * image + pil_image = img.fromarray(image.astype(dtype=np.uint8)) + with open('%s/%s.png' % (save_dir, filename), mode='wb') as f: + pil_image.save(f, 'PNG') + + +def save_heatmap_and_center_image(image, + center_heatmap, + center_points, + save_dir, + filename, + ratio=0.5, + radius=25, + binarize_heatmap=True): + """Saves image with non-negative heatmap and center radius. + Args: + image: The image. + center_heatmap: Ndarray, center heatmap. + center_points: List of tuple [(y, x)], center point coordinates. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + radio: Float, ratio to mix heatmap and image, out = ratio * heatmap + (1 - ratio) * image. + radius: Int, radius of the center point. + """ + if binarize_heatmap: + center_heatmap = (center_heatmap[:, :, None] > 0) * np.array([255, 0, 0]).reshape((1, 1, 3)) + else: + center_heatmap = center_heatmap[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3)) + center_heatmap = center_heatmap.clip(0, 255) + image = ratio * center_heatmap + (1 - ratio) * image + pil_image = img.fromarray(image.astype(dtype=np.uint8)) + draw = ImageDraw.Draw(pil_image) + r = radius + assigned_colors = [list(random_color(rgb=True, maximum=255)) + [255] for _ in range(len(center_points))] + for i, point in enumerate(center_points): + leftUpPoint = (point[1] - r, point[0] - r) + rightDownPoint = (point[1] + r, point[0] + r) + twoPointList = [leftUpPoint, rightDownPoint] + if binarize_heatmap: + draw.ellipse(twoPointList, outline='blue') + else: + draw.ellipse(twoPointList, fill=tuple(assigned_colors[i])) + with open('%s/%s.png' % (save_dir, filename), mode='wb') as f: + pil_image.save(f, 'PNG') + + +def save_offset_image(offset, + save_dir, + filename): + """Saves image with heatmap. + Args: + image: The offset to save. + save_dir: String, the directory to which the results will be saved. + filename: String, the image filename. + """ + offset_image = flow_compute_color(offset[:, :, 1], offset[:, :, 0]) + pil_image = img.fromarray(offset_image.astype(dtype=np.uint8)) + with open('%s/%s.png' % (save_dir, filename), mode='wb') as f: + pil_image.save(f, 'PNG') diff --git a/CDARTS_segmentation/segmentation/utils/test_utils.py b/CDARTS_segmentation/segmentation/utils/test_utils.py new file mode 100644 index 0000000..8277815 --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/test_utils.py @@ -0,0 +1,137 @@ +# ------------------------------------------------------------------------------ +# Utility functions for multi-scale testing. +# Written by Pingjun (https://github.com/bowenc0221/panoptic-deeplab/issues/25) +# Modified by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ +import cv2 +from collections import OrderedDict + +import numpy as np +import torch +import torch.nn.functional as F + +import segmentation.data.transforms.transforms as T + + +def flip_tensor(x, dim): + """ + Flip Tensor along a dimension + """ + dim = x.dim() + dim if dim < 0 else dim + return x[tuple(slice(None, None) if i != dim + else torch.arange(x.size(i) - 1, -1, -1).long() + for i in range(x.dim()))] + + +def upsample_predictions(pred, input_shape,scale): + # Override upsample method to correctly handle `offset` + result = OrderedDict() + for key in pred.keys(): + out = F.interpolate(pred[key], size=input_shape, mode='bilinear', align_corners=True) + if 'offset' in key: #The order of second dim is (offset_y, offset_x) + out *= 1.0 / scale + result[key] = out + return result + +def get_semantic_segmentation(sem): + """ + Post-processing for semantic segmentation branch. + Arguments: + sem: A Tensor of shape [N, C, H, W], where N is the batch size, for consistent, we only + support N=1. + Returns: + A Tensor of shape [1, H, W] (to be gathered by distributed data parallel). + Raises: + ValueError, if batch size is not 1. + """ + if sem.size(0) != 1: + raise ValueError('Only supports inference for batch size = 1') + sem = sem.squeeze(0) + return torch.argmax(sem, dim=0, keepdim=True) + +def multi_scale_inference(config, model, raw_image, t_image, device): + scales = config.TEST.SCALE_LIST + flip = config.TEST.FLIP_TEST + # output_stride = 2 ** (5 - sum(config.MODEL.BACKBONE.DILATION)) + # train_crop_h, train_crop_w = config.TEST.CROP_SIZE + # scale = 1. / output_stride + # pool_h = int((float(train_crop_h) - 1.0) * scale + 1.0) + # pool_w = int((float(train_crop_w) - 1.0) * scale + 1.0) + # transforms + transforms = T.Compose( + [ + T.ToTensor(), + T.Normalize(config.DATASET.MEAN, config.DATASET.STD) + ] + ) + if flip: + flip_range = 2 + else: + flip_range = 1 + + # h,w,_ = raw_image.shape + _, _, h, w = t_image.shape + org_h_pad = (h + 31) // 32 * 32 + org_w_pad = (w + 31) // 32 * 32 + + sum_semantic_with_flip = 0 + sum_center_with_flip = 0 + sum_offset_with_flip = 0 + + for i in range(len(scales)): + image = raw_image + scale = scales[i] + raw_h = int(h * scale) + raw_w = int(w * scale) + + image = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR).astype(np.int32) + nh,nw,_ = image.shape + + # pad image + new_h = (raw_h + 31) // 32 * 32 + new_w = (raw_w + 31) // 32 * 32 + input_image = np.zeros((new_h, new_w, 3), dtype=np.uint8) + input_image[:, :] = config.DATASET.MEAN + # input_image[:raw_h, :raw_w, :] = image + input_image[:nh, :nw, :] = image + + image, _ = transforms(input_image, None) + image = image.unsqueeze(0).to(device) + + + model = model.to(device) + + for flip in range(flip_range): + if flip: + image = flip_tensor(image, 3) + out_dict = model(image) + for key in out_dict.keys(): # return to raw_input shape + out_dict[key] = out_dict[key][:, :, : raw_h, : raw_w] + + if raw_h != org_h_pad or raw_w != org_w_pad: + out_dict = upsample_predictions(out_dict, (org_h_pad, org_w_pad), scale) + + # average softmax or logit? + semantic_pred = out_dict['semantic'] + # semantic_pred = F.softmax(out_dict['semantic'],dim=1) + + center_pred = out_dict['center'] + offset_pred = out_dict['offset'] + if flip: + semantic_pred = flip_tensor(semantic_pred,3) + center_pred = flip_tensor(center_pred,3) + offset_pred = flip_tensor(offset_pred,3) + offset_pred[:, 1, :, :] *= (-1) + + sum_semantic_with_flip += semantic_pred + sum_center_with_flip += center_pred + sum_offset_with_flip += offset_pred + + semantic_mean = sum_semantic_with_flip / (flip_range * len(scales)) + center_mean = sum_center_with_flip / (flip_range * len(scales)) + offset_mean = sum_offset_with_flip / (flip_range * len(scales)) + + out_dict['semantic'] = semantic_mean + out_dict['center'] = center_mean + out_dict['offset'] = offset_mean + return out_dict diff --git a/CDARTS_segmentation/segmentation/utils/utils.py b/CDARTS_segmentation/segmentation/utils/utils.py new file mode 100644 index 0000000..14742dd --- /dev/null +++ b/CDARTS_segmentation/segmentation/utils/utils.py @@ -0,0 +1,52 @@ +# ------------------------------------------------------------------------------ +# Utility functions. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ +import torch + + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count if self.count != 0 else 0 + + +def get_loss_info_str(loss_meter_dict): + msg = '' + for key in loss_meter_dict.keys(): + msg += '{name}: {meter.val:.3e} ({meter.avg:.3e})\t'.format( + name=key, meter=loss_meter_dict[key] + ) + + return msg + + +def to_cuda(batch, device): + if type(batch) == torch.Tensor: + batch = batch.to(device) + elif type(batch) == dict: + for key in batch.keys(): + batch[key] = to_cuda(batch[key], device) + elif type(batch) == list: + for i in range(len(batch)): + batch[i] = to_cuda(batch[i], device) + return batch + + +def get_module(model, distributed): + if distributed: + return model.module + else: + return model diff --git a/CDARTS_segmentation/tools/__init__.py b/CDARTS_segmentation/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_segmentation/tools/datasets/BaseDataset.py b/CDARTS_segmentation/tools/datasets/BaseDataset.py new file mode 100644 index 0000000..3544e61 --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/BaseDataset.py @@ -0,0 +1,161 @@ +import os +import cv2 +cv2.setNumThreads(0) +import torch +import numpy as np +from random import shuffle + +import torch.utils.data as data + + +class BaseDataset(data.Dataset): + def __init__(self, setting, split_name, preprocess=None, file_length=None): + super(BaseDataset, self).__init__() + self._split_name = split_name + self._img_path = setting['img_root'] + self._gt_path = setting['gt_root'] + self._portion = setting['portion'] if 'portion' in setting else None + self._train_source = setting['train_source'] + self._eval_source = setting['eval_source'] + self._test_source = setting['test_source'] if 'test_source' in setting else setting['eval_source'] + self._down_sampling = setting['down_sampling'] + print("using downsampling:", self._down_sampling) + self._file_names = self._get_file_names(split_name) + print("Found %d images"%len(self._file_names)) + self._file_length = file_length + self.preprocess = preprocess + + def __len__(self): + if self._file_length is not None: + return self._file_length + return len(self._file_names) + + def __getitem__(self, index): + if self._file_length is not None: + names = self._construct_new_file_names(self._file_length)[index] + else: + names = self._file_names[index] + img_path = os.path.join(self._img_path, names[0]) + gt_path = os.path.join(self._gt_path, names[1]) + item_name = names[1].split("/")[-1].split(".")[0] + + img, gt = self._fetch_data(img_path, gt_path) + img = img[:, :, ::-1] + if self.preprocess is not None: + img, gt, extra_dict = self.preprocess(img, gt) + + if self._split_name is 'train': + img = torch.from_numpy(np.ascontiguousarray(img)).float() + gt = torch.from_numpy(np.ascontiguousarray(gt)).long() + if self.preprocess is not None and extra_dict is not None: + for k, v in extra_dict.items(): + extra_dict[k] = torch.from_numpy(np.ascontiguousarray(v)) + if 'label' in k: + extra_dict[k] = extra_dict[k].long() + if 'img' in k: + extra_dict[k] = extra_dict[k].float() + + output_dict = dict(data=img, label=gt, fn=str(item_name), + n=len(self._file_names)) + if self.preprocess is not None and extra_dict is not None: + output_dict.update(**extra_dict) + + return output_dict + + def _fetch_data(self, img_path, gt_path, dtype=None): + img = self._open_image(img_path, down_sampling=self._down_sampling) + gt = self._open_image(gt_path, cv2.IMREAD_GRAYSCALE, dtype=dtype, down_sampling=self._down_sampling) + + return img, gt + + def _get_file_names(self, split_name): + assert split_name in ['train', 'val', 'test'] + source = self._train_source + if split_name == "val": + source = self._eval_source + elif split_name == 'test': + source = self._test_source + + file_names = [] + with open(source) as f: + files = f.readlines() + if self._portion is not None: + shuffle(files) + num_files = len(files) + if self._portion > 0: + split = int(np.floor(self._portion * num_files)) + files = files[:split] + elif self._portion < 0: + split = int(np.floor((1 + self._portion) * num_files)) + files = files[split:] + + for item in files: + img_name, gt_name = self._process_item_names(item) + file_names.append([img_name, gt_name]) + + return file_names + + def _construct_new_file_names(self, length): + assert isinstance(length, int) + files_len = len(self._file_names) + new_file_names = self._file_names * (length // files_len) + + rand_indices = torch.randperm(files_len).tolist() + new_indices = rand_indices[:length % files_len] + + new_file_names += [self._file_names[i] for i in new_indices] + + return new_file_names + + @staticmethod + def _process_item_names(item): + item = item.strip() + # item = item.split('\t') + item = item.split(' ') + img_name = item[0] + gt_name = item[1] + + return img_name, gt_name + + def get_length(self): + return self.__len__() + + @staticmethod + def _open_image(filepath, mode=cv2.IMREAD_COLOR, dtype=None, down_sampling=1): + # cv2: B G R + # h w c + img = np.array(cv2.imread(filepath, mode), dtype=dtype) + + if isinstance(down_sampling, int): + H, W = img.shape[:2] + if len(img.shape) == 3: + img = cv2.resize(img, (W // down_sampling, H // down_sampling), interpolation=cv2.INTER_LINEAR) + else: + img = cv2.resize(img, (W // down_sampling, H // down_sampling), interpolation=cv2.INTER_NEAREST) + assert img.shape[0] == H // down_sampling and img.shape[1] == W // down_sampling + else: + assert (isinstance(down_sampling, tuple) or isinstance(down_sampling, list)) and len(down_sampling) == 2 + if len(img.shape) == 3: + img = cv2.resize(img, (down_sampling[1], down_sampling[0]), interpolation=cv2.INTER_LINEAR) + else: + img = cv2.resize(img, (down_sampling[1], down_sampling[0]), interpolation=cv2.INTER_NEAREST) + assert img.shape[0] == down_sampling[0] and img.shape[1] == down_sampling[1] + + return img + + @classmethod + def get_class_colors(*args): + raise NotImplementedError + + @classmethod + def get_class_names(*args): + raise NotImplementedError + + +if __name__ == "__main__": + data_setting = {'img_root': '', + 'gt_root': '', + 'train_source': '', + 'eval_source': ''} + bd = BaseDataset(data_setting, 'train', None) + print(bd.get_class_names()) diff --git a/CDARTS_segmentation/tools/datasets/__init__.py b/CDARTS_segmentation/tools/datasets/__init__.py new file mode 100644 index 0000000..a154495 --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/__init__.py @@ -0,0 +1,6 @@ +from .cityscapes import Cityscapes +from .bdd import BDD +from .coco import COCO +from .camvid import CamVid + +__all__ = ['Cityscapes', 'BDD', 'CamVid', 'COCO'] diff --git a/CDARTS_segmentation/tools/datasets/bdd/__init__.py b/CDARTS_segmentation/tools/datasets/bdd/__init__.py new file mode 100644 index 0000000..8304abb --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/bdd/__init__.py @@ -0,0 +1,3 @@ +from .bdd import BDD + +__all__ = ['BDD'] diff --git a/CDARTS_segmentation/tools/datasets/bdd/bdd.py b/CDARTS_segmentation/tools/datasets/bdd/bdd.py new file mode 100644 index 0000000..959e8f1 --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/bdd/bdd.py @@ -0,0 +1,41 @@ +# import numpy as np + +from datasets.BaseDataset import BaseDataset + + +class BDD(BaseDataset): + # trans_labels = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33] + + @classmethod + def get_class_colors(*args): + return [[128, 64, 128], [244, 35, 232], [70, 70, 70], + [102, 102, 156], [190, 153, 153], [153, 153, 153], + [250, 170, 30], [220, 220, 0], [107, 142, 35], + [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], + [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], + [0, 0, 230], [119, 11, 32]] + + @classmethod + def get_class_names(*args): + # class counting(gtFine) + # 2953 2811 2934 970 1296 2949 1658 2808 2891 1654 2686 2343 1023 2832 + # 359 274 142 513 1646 + return ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', + 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', + 'truck', 'bus', 'train', 'motorcycle', 'bicycle'] + + # @classmethod + # def transform_label(cls, pred, name): + # label = np.zeros(pred.shape) + # ids = np.unique(pred) + # for id in ids: + # label[np.where(pred == id)] = cls.trans_labels[id] + + # new_name = (name.split('.')[0]).split('_')[:-1] + # new_name = '_'.join(new_name) + '.png' + + # print('Trans', name, 'to', new_name, ' ', + # np.unique(np.array(pred, np.uint8)), ' ---------> ', + # np.unique(np.array(label, np.uint8))) + # return label, new_name diff --git a/CDARTS_segmentation/tools/datasets/camvid/__init__.py b/CDARTS_segmentation/tools/datasets/camvid/__init__.py new file mode 100644 index 0000000..a4b0174 --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/camvid/__init__.py @@ -0,0 +1,3 @@ +from .camvid import CamVid + +__all__ = ['CamVid'] \ No newline at end of file diff --git a/CDARTS_segmentation/tools/datasets/camvid/camvid.py b/CDARTS_segmentation/tools/datasets/camvid/camvid.py new file mode 100644 index 0000000..1b3cf16 --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/camvid/camvid.py @@ -0,0 +1,17 @@ +from datasets.BaseDataset import BaseDataset + + +class CamVid(BaseDataset): + @classmethod + def get_class_colors(*args): + return [[128, 0, 0], [128, 128, 0], [128, 128, 128], [64, 0, 128], + [192, 128, 128], [128, 64, 128], [64, 64, 0], [64, 64, 128], + [192, 192, 128], [0, 0, 192], [0, 128, 192]] + + @classmethod + def get_class_names(*args): + # class counting(gtFine) + # 2953 2811 2934 970 1296 2949 1658 2808 2891 1654 2686 2343 1023 2832 + # 359 274 142 513 1646 + return ['Building', 'Tree', 'Sky', 'Car', 'Sign-Symbol', 'Road', + 'Pedestrian', 'Fence', 'Column-Pole', 'Side-Walk', 'Bicyclist', 'Void'] diff --git a/CDARTS_segmentation/tools/datasets/cityscapes/__init__.py b/CDARTS_segmentation/tools/datasets/cityscapes/__init__.py new file mode 100644 index 0000000..538cf4d --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/cityscapes/__init__.py @@ -0,0 +1,3 @@ +from .cityscapes import Cityscapes + +__all__ = ['Cityscapes'] \ No newline at end of file diff --git a/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes.py b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes.py new file mode 100644 index 0000000..47efdaf --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes.py @@ -0,0 +1,42 @@ +import numpy as np + +from datasets.BaseDataset import BaseDataset + + +class Cityscapes(BaseDataset): + trans_labels = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 31, 32, 33] + + @classmethod + def get_class_colors(*args): + return [[128, 64, 128], [244, 35, 232], [70, 70, 70], + [102, 102, 156], [190, 153, 153], [153, 153, 153], + [250, 170, 30], [220, 220, 0], [107, 142, 35], + [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], + [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], + [0, 0, 230], [119, 11, 32]] + + @classmethod + def get_class_names(*args): + # class counting(gtFine) + # 2953 2811 2934 970 1296 2949 1658 2808 2891 1654 2686 2343 1023 2832 + # 359 274 142 513 1646 + return ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', + 'traffic light', 'traffic sign', + 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', + 'truck', 'bus', 'train', 'motorcycle', 'bicycle'] + + @classmethod + def transform_label(cls, pred, name): + label = np.zeros(pred.shape) + ids = np.unique(pred) + for id in ids: + label[np.where(pred == id)] = cls.trans_labels[id] + + new_name = (name.split('.')[0]).split('_')[:-1] + new_name = '_'.join(new_name) + '.png' + + print('Trans', name, 'to', new_name, ' ', + np.unique(np.array(pred, np.uint8)), ' ---------> ', + np.unique(np.array(label, np.uint8))) + return label, new_name \ No newline at end of file diff --git a/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_test.txt b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_test.txt new file mode 100644 index 0000000..36db09e --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_test.txt @@ -0,0 +1,1525 @@ +leftImg8bit/test/berlin/berlin_000000_000019_leftImg8bit.png gtFine/test/berlin/berlin_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000001_000019_leftImg8bit.png gtFine/test/berlin/berlin_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000002_000019_leftImg8bit.png gtFine/test/berlin/berlin_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000003_000019_leftImg8bit.png gtFine/test/berlin/berlin_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000004_000019_leftImg8bit.png gtFine/test/berlin/berlin_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000005_000019_leftImg8bit.png gtFine/test/berlin/berlin_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000006_000019_leftImg8bit.png gtFine/test/berlin/berlin_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000007_000019_leftImg8bit.png gtFine/test/berlin/berlin_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000008_000019_leftImg8bit.png gtFine/test/berlin/berlin_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000009_000019_leftImg8bit.png gtFine/test/berlin/berlin_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000010_000019_leftImg8bit.png gtFine/test/berlin/berlin_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000011_000019_leftImg8bit.png gtFine/test/berlin/berlin_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000012_000019_leftImg8bit.png gtFine/test/berlin/berlin_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000013_000019_leftImg8bit.png gtFine/test/berlin/berlin_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000014_000019_leftImg8bit.png gtFine/test/berlin/berlin_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000015_000019_leftImg8bit.png gtFine/test/berlin/berlin_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000016_000019_leftImg8bit.png gtFine/test/berlin/berlin_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000017_000019_leftImg8bit.png gtFine/test/berlin/berlin_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000018_000019_leftImg8bit.png gtFine/test/berlin/berlin_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000019_000019_leftImg8bit.png gtFine/test/berlin/berlin_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000020_000019_leftImg8bit.png gtFine/test/berlin/berlin_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000021_000019_leftImg8bit.png gtFine/test/berlin/berlin_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000022_000019_leftImg8bit.png gtFine/test/berlin/berlin_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000023_000019_leftImg8bit.png gtFine/test/berlin/berlin_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000024_000019_leftImg8bit.png gtFine/test/berlin/berlin_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000025_000019_leftImg8bit.png gtFine/test/berlin/berlin_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000026_000019_leftImg8bit.png gtFine/test/berlin/berlin_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000027_000019_leftImg8bit.png gtFine/test/berlin/berlin_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000028_000019_leftImg8bit.png gtFine/test/berlin/berlin_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000029_000019_leftImg8bit.png gtFine/test/berlin/berlin_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000030_000019_leftImg8bit.png gtFine/test/berlin/berlin_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000031_000019_leftImg8bit.png gtFine/test/berlin/berlin_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000032_000019_leftImg8bit.png gtFine/test/berlin/berlin_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000033_000019_leftImg8bit.png gtFine/test/berlin/berlin_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000034_000019_leftImg8bit.png gtFine/test/berlin/berlin_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000035_000019_leftImg8bit.png gtFine/test/berlin/berlin_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000036_000019_leftImg8bit.png gtFine/test/berlin/berlin_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000037_000019_leftImg8bit.png gtFine/test/berlin/berlin_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000038_000019_leftImg8bit.png gtFine/test/berlin/berlin_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000039_000019_leftImg8bit.png gtFine/test/berlin/berlin_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000040_000019_leftImg8bit.png gtFine/test/berlin/berlin_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000041_000019_leftImg8bit.png gtFine/test/berlin/berlin_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000042_000019_leftImg8bit.png gtFine/test/berlin/berlin_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000043_000019_leftImg8bit.png gtFine/test/berlin/berlin_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000044_000019_leftImg8bit.png gtFine/test/berlin/berlin_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000045_000019_leftImg8bit.png gtFine/test/berlin/berlin_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000046_000019_leftImg8bit.png gtFine/test/berlin/berlin_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000047_000019_leftImg8bit.png gtFine/test/berlin/berlin_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000048_000019_leftImg8bit.png gtFine/test/berlin/berlin_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000049_000019_leftImg8bit.png gtFine/test/berlin/berlin_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000050_000019_leftImg8bit.png gtFine/test/berlin/berlin_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000051_000019_leftImg8bit.png gtFine/test/berlin/berlin_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000052_000019_leftImg8bit.png gtFine/test/berlin/berlin_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000053_000019_leftImg8bit.png gtFine/test/berlin/berlin_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000054_000019_leftImg8bit.png gtFine/test/berlin/berlin_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000055_000019_leftImg8bit.png gtFine/test/berlin/berlin_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000056_000019_leftImg8bit.png gtFine/test/berlin/berlin_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000057_000019_leftImg8bit.png gtFine/test/berlin/berlin_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000058_000019_leftImg8bit.png gtFine/test/berlin/berlin_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000059_000019_leftImg8bit.png gtFine/test/berlin/berlin_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000060_000019_leftImg8bit.png gtFine/test/berlin/berlin_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000061_000019_leftImg8bit.png gtFine/test/berlin/berlin_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000062_000019_leftImg8bit.png gtFine/test/berlin/berlin_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000063_000019_leftImg8bit.png gtFine/test/berlin/berlin_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000064_000019_leftImg8bit.png gtFine/test/berlin/berlin_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000065_000019_leftImg8bit.png gtFine/test/berlin/berlin_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000066_000019_leftImg8bit.png gtFine/test/berlin/berlin_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000067_000019_leftImg8bit.png gtFine/test/berlin/berlin_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000068_000019_leftImg8bit.png gtFine/test/berlin/berlin_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000069_000019_leftImg8bit.png gtFine/test/berlin/berlin_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000070_000019_leftImg8bit.png gtFine/test/berlin/berlin_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000071_000019_leftImg8bit.png gtFine/test/berlin/berlin_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000072_000019_leftImg8bit.png gtFine/test/berlin/berlin_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000073_000019_leftImg8bit.png gtFine/test/berlin/berlin_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000074_000019_leftImg8bit.png gtFine/test/berlin/berlin_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000075_000019_leftImg8bit.png gtFine/test/berlin/berlin_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000076_000019_leftImg8bit.png gtFine/test/berlin/berlin_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000077_000019_leftImg8bit.png gtFine/test/berlin/berlin_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000078_000019_leftImg8bit.png gtFine/test/berlin/berlin_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000079_000019_leftImg8bit.png gtFine/test/berlin/berlin_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000080_000019_leftImg8bit.png gtFine/test/berlin/berlin_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000081_000019_leftImg8bit.png gtFine/test/berlin/berlin_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000082_000019_leftImg8bit.png gtFine/test/berlin/berlin_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000083_000019_leftImg8bit.png gtFine/test/berlin/berlin_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000084_000019_leftImg8bit.png gtFine/test/berlin/berlin_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000085_000019_leftImg8bit.png gtFine/test/berlin/berlin_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000086_000019_leftImg8bit.png gtFine/test/berlin/berlin_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000087_000019_leftImg8bit.png gtFine/test/berlin/berlin_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000088_000019_leftImg8bit.png gtFine/test/berlin/berlin_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000089_000019_leftImg8bit.png gtFine/test/berlin/berlin_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000090_000019_leftImg8bit.png gtFine/test/berlin/berlin_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000091_000019_leftImg8bit.png gtFine/test/berlin/berlin_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000092_000019_leftImg8bit.png gtFine/test/berlin/berlin_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000093_000019_leftImg8bit.png gtFine/test/berlin/berlin_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000094_000019_leftImg8bit.png gtFine/test/berlin/berlin_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000095_000019_leftImg8bit.png gtFine/test/berlin/berlin_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000096_000019_leftImg8bit.png gtFine/test/berlin/berlin_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000097_000019_leftImg8bit.png gtFine/test/berlin/berlin_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000098_000019_leftImg8bit.png gtFine/test/berlin/berlin_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000099_000019_leftImg8bit.png gtFine/test/berlin/berlin_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000100_000019_leftImg8bit.png gtFine/test/berlin/berlin_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000101_000019_leftImg8bit.png gtFine/test/berlin/berlin_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000102_000019_leftImg8bit.png gtFine/test/berlin/berlin_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000103_000019_leftImg8bit.png gtFine/test/berlin/berlin_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000104_000019_leftImg8bit.png gtFine/test/berlin/berlin_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000105_000019_leftImg8bit.png gtFine/test/berlin/berlin_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000106_000019_leftImg8bit.png gtFine/test/berlin/berlin_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000107_000019_leftImg8bit.png gtFine/test/berlin/berlin_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000108_000019_leftImg8bit.png gtFine/test/berlin/berlin_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000109_000019_leftImg8bit.png gtFine/test/berlin/berlin_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000110_000019_leftImg8bit.png gtFine/test/berlin/berlin_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000111_000019_leftImg8bit.png gtFine/test/berlin/berlin_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000112_000019_leftImg8bit.png gtFine/test/berlin/berlin_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000113_000019_leftImg8bit.png gtFine/test/berlin/berlin_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000114_000019_leftImg8bit.png gtFine/test/berlin/berlin_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000115_000019_leftImg8bit.png gtFine/test/berlin/berlin_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000116_000019_leftImg8bit.png gtFine/test/berlin/berlin_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000117_000019_leftImg8bit.png gtFine/test/berlin/berlin_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000118_000019_leftImg8bit.png gtFine/test/berlin/berlin_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000119_000019_leftImg8bit.png gtFine/test/berlin/berlin_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000120_000019_leftImg8bit.png gtFine/test/berlin/berlin_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000121_000019_leftImg8bit.png gtFine/test/berlin/berlin_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000122_000019_leftImg8bit.png gtFine/test/berlin/berlin_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000123_000019_leftImg8bit.png gtFine/test/berlin/berlin_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000124_000019_leftImg8bit.png gtFine/test/berlin/berlin_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000125_000019_leftImg8bit.png gtFine/test/berlin/berlin_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000126_000019_leftImg8bit.png gtFine/test/berlin/berlin_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000127_000019_leftImg8bit.png gtFine/test/berlin/berlin_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000128_000019_leftImg8bit.png gtFine/test/berlin/berlin_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000129_000019_leftImg8bit.png gtFine/test/berlin/berlin_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000130_000019_leftImg8bit.png gtFine/test/berlin/berlin_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000131_000019_leftImg8bit.png gtFine/test/berlin/berlin_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000132_000019_leftImg8bit.png gtFine/test/berlin/berlin_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000133_000019_leftImg8bit.png gtFine/test/berlin/berlin_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000134_000019_leftImg8bit.png gtFine/test/berlin/berlin_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000135_000019_leftImg8bit.png gtFine/test/berlin/berlin_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000136_000019_leftImg8bit.png gtFine/test/berlin/berlin_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000137_000019_leftImg8bit.png gtFine/test/berlin/berlin_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000138_000019_leftImg8bit.png gtFine/test/berlin/berlin_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000139_000019_leftImg8bit.png gtFine/test/berlin/berlin_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000140_000019_leftImg8bit.png gtFine/test/berlin/berlin_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000141_000019_leftImg8bit.png gtFine/test/berlin/berlin_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000142_000019_leftImg8bit.png gtFine/test/berlin/berlin_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000143_000019_leftImg8bit.png gtFine/test/berlin/berlin_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000144_000019_leftImg8bit.png gtFine/test/berlin/berlin_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000145_000019_leftImg8bit.png gtFine/test/berlin/berlin_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000146_000019_leftImg8bit.png gtFine/test/berlin/berlin_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000147_000019_leftImg8bit.png gtFine/test/berlin/berlin_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000148_000019_leftImg8bit.png gtFine/test/berlin/berlin_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000149_000019_leftImg8bit.png gtFine/test/berlin/berlin_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000150_000019_leftImg8bit.png gtFine/test/berlin/berlin_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000151_000019_leftImg8bit.png gtFine/test/berlin/berlin_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000152_000019_leftImg8bit.png gtFine/test/berlin/berlin_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000153_000019_leftImg8bit.png gtFine/test/berlin/berlin_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000154_000019_leftImg8bit.png gtFine/test/berlin/berlin_000154_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000155_000019_leftImg8bit.png gtFine/test/berlin/berlin_000155_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000156_000019_leftImg8bit.png gtFine/test/berlin/berlin_000156_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000157_000019_leftImg8bit.png gtFine/test/berlin/berlin_000157_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000158_000019_leftImg8bit.png gtFine/test/berlin/berlin_000158_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000159_000019_leftImg8bit.png gtFine/test/berlin/berlin_000159_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000160_000019_leftImg8bit.png gtFine/test/berlin/berlin_000160_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000161_000019_leftImg8bit.png gtFine/test/berlin/berlin_000161_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000162_000019_leftImg8bit.png gtFine/test/berlin/berlin_000162_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000163_000019_leftImg8bit.png gtFine/test/berlin/berlin_000163_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000164_000019_leftImg8bit.png gtFine/test/berlin/berlin_000164_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000165_000019_leftImg8bit.png gtFine/test/berlin/berlin_000165_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000166_000019_leftImg8bit.png gtFine/test/berlin/berlin_000166_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000167_000019_leftImg8bit.png gtFine/test/berlin/berlin_000167_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000168_000019_leftImg8bit.png gtFine/test/berlin/berlin_000168_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000169_000019_leftImg8bit.png gtFine/test/berlin/berlin_000169_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000170_000019_leftImg8bit.png gtFine/test/berlin/berlin_000170_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000171_000019_leftImg8bit.png gtFine/test/berlin/berlin_000171_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000172_000019_leftImg8bit.png gtFine/test/berlin/berlin_000172_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000173_000019_leftImg8bit.png gtFine/test/berlin/berlin_000173_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000174_000019_leftImg8bit.png gtFine/test/berlin/berlin_000174_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000175_000019_leftImg8bit.png gtFine/test/berlin/berlin_000175_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000176_000019_leftImg8bit.png gtFine/test/berlin/berlin_000176_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000177_000019_leftImg8bit.png gtFine/test/berlin/berlin_000177_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000178_000019_leftImg8bit.png gtFine/test/berlin/berlin_000178_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000179_000019_leftImg8bit.png gtFine/test/berlin/berlin_000179_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000180_000019_leftImg8bit.png gtFine/test/berlin/berlin_000180_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000181_000019_leftImg8bit.png gtFine/test/berlin/berlin_000181_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000182_000019_leftImg8bit.png gtFine/test/berlin/berlin_000182_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000183_000019_leftImg8bit.png gtFine/test/berlin/berlin_000183_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000184_000019_leftImg8bit.png gtFine/test/berlin/berlin_000184_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000185_000019_leftImg8bit.png gtFine/test/berlin/berlin_000185_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000186_000019_leftImg8bit.png gtFine/test/berlin/berlin_000186_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000187_000019_leftImg8bit.png gtFine/test/berlin/berlin_000187_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000188_000019_leftImg8bit.png gtFine/test/berlin/berlin_000188_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000189_000019_leftImg8bit.png gtFine/test/berlin/berlin_000189_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000190_000019_leftImg8bit.png gtFine/test/berlin/berlin_000190_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000191_000019_leftImg8bit.png gtFine/test/berlin/berlin_000191_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000192_000019_leftImg8bit.png gtFine/test/berlin/berlin_000192_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000193_000019_leftImg8bit.png gtFine/test/berlin/berlin_000193_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000194_000019_leftImg8bit.png gtFine/test/berlin/berlin_000194_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000195_000019_leftImg8bit.png gtFine/test/berlin/berlin_000195_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000196_000019_leftImg8bit.png gtFine/test/berlin/berlin_000196_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000197_000019_leftImg8bit.png gtFine/test/berlin/berlin_000197_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000198_000019_leftImg8bit.png gtFine/test/berlin/berlin_000198_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000199_000019_leftImg8bit.png gtFine/test/berlin/berlin_000199_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000200_000019_leftImg8bit.png gtFine/test/berlin/berlin_000200_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000201_000019_leftImg8bit.png gtFine/test/berlin/berlin_000201_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000202_000019_leftImg8bit.png gtFine/test/berlin/berlin_000202_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000203_000019_leftImg8bit.png gtFine/test/berlin/berlin_000203_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000204_000019_leftImg8bit.png gtFine/test/berlin/berlin_000204_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000205_000019_leftImg8bit.png gtFine/test/berlin/berlin_000205_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000206_000019_leftImg8bit.png gtFine/test/berlin/berlin_000206_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000207_000019_leftImg8bit.png gtFine/test/berlin/berlin_000207_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000208_000019_leftImg8bit.png gtFine/test/berlin/berlin_000208_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000209_000019_leftImg8bit.png gtFine/test/berlin/berlin_000209_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000210_000019_leftImg8bit.png gtFine/test/berlin/berlin_000210_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000211_000019_leftImg8bit.png gtFine/test/berlin/berlin_000211_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000212_000019_leftImg8bit.png gtFine/test/berlin/berlin_000212_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000213_000019_leftImg8bit.png gtFine/test/berlin/berlin_000213_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000214_000019_leftImg8bit.png gtFine/test/berlin/berlin_000214_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000215_000019_leftImg8bit.png gtFine/test/berlin/berlin_000215_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000216_000019_leftImg8bit.png gtFine/test/berlin/berlin_000216_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000217_000019_leftImg8bit.png gtFine/test/berlin/berlin_000217_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000218_000019_leftImg8bit.png gtFine/test/berlin/berlin_000218_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000219_000019_leftImg8bit.png gtFine/test/berlin/berlin_000219_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000220_000019_leftImg8bit.png gtFine/test/berlin/berlin_000220_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000221_000019_leftImg8bit.png gtFine/test/berlin/berlin_000221_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000222_000019_leftImg8bit.png gtFine/test/berlin/berlin_000222_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000223_000019_leftImg8bit.png gtFine/test/berlin/berlin_000223_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000224_000019_leftImg8bit.png gtFine/test/berlin/berlin_000224_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000225_000019_leftImg8bit.png gtFine/test/berlin/berlin_000225_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000226_000019_leftImg8bit.png gtFine/test/berlin/berlin_000226_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000227_000019_leftImg8bit.png gtFine/test/berlin/berlin_000227_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000228_000019_leftImg8bit.png gtFine/test/berlin/berlin_000228_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000229_000019_leftImg8bit.png gtFine/test/berlin/berlin_000229_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000230_000019_leftImg8bit.png gtFine/test/berlin/berlin_000230_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000231_000019_leftImg8bit.png gtFine/test/berlin/berlin_000231_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000232_000019_leftImg8bit.png gtFine/test/berlin/berlin_000232_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000233_000019_leftImg8bit.png gtFine/test/berlin/berlin_000233_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000234_000019_leftImg8bit.png gtFine/test/berlin/berlin_000234_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000235_000019_leftImg8bit.png gtFine/test/berlin/berlin_000235_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000236_000019_leftImg8bit.png gtFine/test/berlin/berlin_000236_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000237_000019_leftImg8bit.png gtFine/test/berlin/berlin_000237_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000238_000019_leftImg8bit.png gtFine/test/berlin/berlin_000238_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000239_000019_leftImg8bit.png gtFine/test/berlin/berlin_000239_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000240_000019_leftImg8bit.png gtFine/test/berlin/berlin_000240_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000241_000019_leftImg8bit.png gtFine/test/berlin/berlin_000241_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000242_000019_leftImg8bit.png gtFine/test/berlin/berlin_000242_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000243_000019_leftImg8bit.png gtFine/test/berlin/berlin_000243_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000244_000019_leftImg8bit.png gtFine/test/berlin/berlin_000244_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000245_000019_leftImg8bit.png gtFine/test/berlin/berlin_000245_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000246_000019_leftImg8bit.png gtFine/test/berlin/berlin_000246_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000247_000019_leftImg8bit.png gtFine/test/berlin/berlin_000247_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000248_000019_leftImg8bit.png gtFine/test/berlin/berlin_000248_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000249_000019_leftImg8bit.png gtFine/test/berlin/berlin_000249_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000250_000019_leftImg8bit.png gtFine/test/berlin/berlin_000250_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000251_000019_leftImg8bit.png gtFine/test/berlin/berlin_000251_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000252_000019_leftImg8bit.png gtFine/test/berlin/berlin_000252_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000253_000019_leftImg8bit.png gtFine/test/berlin/berlin_000253_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000254_000019_leftImg8bit.png gtFine/test/berlin/berlin_000254_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000255_000019_leftImg8bit.png gtFine/test/berlin/berlin_000255_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000256_000019_leftImg8bit.png gtFine/test/berlin/berlin_000256_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000257_000019_leftImg8bit.png gtFine/test/berlin/berlin_000257_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000258_000019_leftImg8bit.png gtFine/test/berlin/berlin_000258_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000259_000019_leftImg8bit.png gtFine/test/berlin/berlin_000259_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000260_000019_leftImg8bit.png gtFine/test/berlin/berlin_000260_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000261_000019_leftImg8bit.png gtFine/test/berlin/berlin_000261_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000262_000019_leftImg8bit.png gtFine/test/berlin/berlin_000262_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000263_000019_leftImg8bit.png gtFine/test/berlin/berlin_000263_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000264_000019_leftImg8bit.png gtFine/test/berlin/berlin_000264_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000265_000019_leftImg8bit.png gtFine/test/berlin/berlin_000265_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000266_000019_leftImg8bit.png gtFine/test/berlin/berlin_000266_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000267_000019_leftImg8bit.png gtFine/test/berlin/berlin_000267_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000268_000019_leftImg8bit.png gtFine/test/berlin/berlin_000268_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000269_000019_leftImg8bit.png gtFine/test/berlin/berlin_000269_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000270_000019_leftImg8bit.png gtFine/test/berlin/berlin_000270_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000271_000019_leftImg8bit.png gtFine/test/berlin/berlin_000271_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000272_000019_leftImg8bit.png gtFine/test/berlin/berlin_000272_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000273_000019_leftImg8bit.png gtFine/test/berlin/berlin_000273_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000274_000019_leftImg8bit.png gtFine/test/berlin/berlin_000274_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000275_000019_leftImg8bit.png gtFine/test/berlin/berlin_000275_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000276_000019_leftImg8bit.png gtFine/test/berlin/berlin_000276_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000277_000019_leftImg8bit.png gtFine/test/berlin/berlin_000277_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000278_000019_leftImg8bit.png gtFine/test/berlin/berlin_000278_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000279_000019_leftImg8bit.png gtFine/test/berlin/berlin_000279_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000280_000019_leftImg8bit.png gtFine/test/berlin/berlin_000280_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000281_000019_leftImg8bit.png gtFine/test/berlin/berlin_000281_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000282_000019_leftImg8bit.png gtFine/test/berlin/berlin_000282_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000283_000019_leftImg8bit.png gtFine/test/berlin/berlin_000283_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000284_000019_leftImg8bit.png gtFine/test/berlin/berlin_000284_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000285_000019_leftImg8bit.png gtFine/test/berlin/berlin_000285_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000286_000019_leftImg8bit.png gtFine/test/berlin/berlin_000286_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000287_000019_leftImg8bit.png gtFine/test/berlin/berlin_000287_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000288_000019_leftImg8bit.png gtFine/test/berlin/berlin_000288_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000289_000019_leftImg8bit.png gtFine/test/berlin/berlin_000289_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000290_000019_leftImg8bit.png gtFine/test/berlin/berlin_000290_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000291_000019_leftImg8bit.png gtFine/test/berlin/berlin_000291_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000292_000019_leftImg8bit.png gtFine/test/berlin/berlin_000292_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000293_000019_leftImg8bit.png gtFine/test/berlin/berlin_000293_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000294_000019_leftImg8bit.png gtFine/test/berlin/berlin_000294_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000295_000019_leftImg8bit.png gtFine/test/berlin/berlin_000295_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000296_000019_leftImg8bit.png gtFine/test/berlin/berlin_000296_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000297_000019_leftImg8bit.png gtFine/test/berlin/berlin_000297_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000298_000019_leftImg8bit.png gtFine/test/berlin/berlin_000298_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000299_000019_leftImg8bit.png gtFine/test/berlin/berlin_000299_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000300_000019_leftImg8bit.png gtFine/test/berlin/berlin_000300_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000301_000019_leftImg8bit.png gtFine/test/berlin/berlin_000301_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000302_000019_leftImg8bit.png gtFine/test/berlin/berlin_000302_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000303_000019_leftImg8bit.png gtFine/test/berlin/berlin_000303_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000304_000019_leftImg8bit.png gtFine/test/berlin/berlin_000304_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000305_000019_leftImg8bit.png gtFine/test/berlin/berlin_000305_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000306_000019_leftImg8bit.png gtFine/test/berlin/berlin_000306_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000307_000019_leftImg8bit.png gtFine/test/berlin/berlin_000307_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000308_000019_leftImg8bit.png gtFine/test/berlin/berlin_000308_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000309_000019_leftImg8bit.png gtFine/test/berlin/berlin_000309_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000310_000019_leftImg8bit.png gtFine/test/berlin/berlin_000310_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000311_000019_leftImg8bit.png gtFine/test/berlin/berlin_000311_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000312_000019_leftImg8bit.png gtFine/test/berlin/berlin_000312_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000313_000019_leftImg8bit.png gtFine/test/berlin/berlin_000313_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000314_000019_leftImg8bit.png gtFine/test/berlin/berlin_000314_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000315_000019_leftImg8bit.png gtFine/test/berlin/berlin_000315_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000316_000019_leftImg8bit.png gtFine/test/berlin/berlin_000316_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000317_000019_leftImg8bit.png gtFine/test/berlin/berlin_000317_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000318_000019_leftImg8bit.png gtFine/test/berlin/berlin_000318_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000319_000019_leftImg8bit.png gtFine/test/berlin/berlin_000319_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000320_000019_leftImg8bit.png gtFine/test/berlin/berlin_000320_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000321_000019_leftImg8bit.png gtFine/test/berlin/berlin_000321_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000322_000019_leftImg8bit.png gtFine/test/berlin/berlin_000322_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000323_000019_leftImg8bit.png gtFine/test/berlin/berlin_000323_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000324_000019_leftImg8bit.png gtFine/test/berlin/berlin_000324_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000325_000019_leftImg8bit.png gtFine/test/berlin/berlin_000325_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000326_000019_leftImg8bit.png gtFine/test/berlin/berlin_000326_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000327_000019_leftImg8bit.png gtFine/test/berlin/berlin_000327_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000328_000019_leftImg8bit.png gtFine/test/berlin/berlin_000328_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000329_000019_leftImg8bit.png gtFine/test/berlin/berlin_000329_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000330_000019_leftImg8bit.png gtFine/test/berlin/berlin_000330_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000331_000019_leftImg8bit.png gtFine/test/berlin/berlin_000331_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000332_000019_leftImg8bit.png gtFine/test/berlin/berlin_000332_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000333_000019_leftImg8bit.png gtFine/test/berlin/berlin_000333_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000334_000019_leftImg8bit.png gtFine/test/berlin/berlin_000334_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000335_000019_leftImg8bit.png gtFine/test/berlin/berlin_000335_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000336_000019_leftImg8bit.png gtFine/test/berlin/berlin_000336_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000337_000019_leftImg8bit.png gtFine/test/berlin/berlin_000337_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000338_000019_leftImg8bit.png gtFine/test/berlin/berlin_000338_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000339_000019_leftImg8bit.png gtFine/test/berlin/berlin_000339_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000340_000019_leftImg8bit.png gtFine/test/berlin/berlin_000340_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000341_000019_leftImg8bit.png gtFine/test/berlin/berlin_000341_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000342_000019_leftImg8bit.png gtFine/test/berlin/berlin_000342_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000343_000019_leftImg8bit.png gtFine/test/berlin/berlin_000343_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000344_000019_leftImg8bit.png gtFine/test/berlin/berlin_000344_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000345_000019_leftImg8bit.png gtFine/test/berlin/berlin_000345_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000346_000019_leftImg8bit.png gtFine/test/berlin/berlin_000346_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000347_000019_leftImg8bit.png gtFine/test/berlin/berlin_000347_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000348_000019_leftImg8bit.png gtFine/test/berlin/berlin_000348_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000349_000019_leftImg8bit.png gtFine/test/berlin/berlin_000349_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000350_000019_leftImg8bit.png gtFine/test/berlin/berlin_000350_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000351_000019_leftImg8bit.png gtFine/test/berlin/berlin_000351_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000352_000019_leftImg8bit.png gtFine/test/berlin/berlin_000352_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000353_000019_leftImg8bit.png gtFine/test/berlin/berlin_000353_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000354_000019_leftImg8bit.png gtFine/test/berlin/berlin_000354_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000355_000019_leftImg8bit.png gtFine/test/berlin/berlin_000355_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000356_000019_leftImg8bit.png gtFine/test/berlin/berlin_000356_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000357_000019_leftImg8bit.png gtFine/test/berlin/berlin_000357_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000358_000019_leftImg8bit.png gtFine/test/berlin/berlin_000358_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000359_000019_leftImg8bit.png gtFine/test/berlin/berlin_000359_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000360_000019_leftImg8bit.png gtFine/test/berlin/berlin_000360_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000361_000019_leftImg8bit.png gtFine/test/berlin/berlin_000361_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000362_000019_leftImg8bit.png gtFine/test/berlin/berlin_000362_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000363_000019_leftImg8bit.png gtFine/test/berlin/berlin_000363_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000364_000019_leftImg8bit.png gtFine/test/berlin/berlin_000364_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000365_000019_leftImg8bit.png gtFine/test/berlin/berlin_000365_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000366_000019_leftImg8bit.png gtFine/test/berlin/berlin_000366_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000367_000019_leftImg8bit.png gtFine/test/berlin/berlin_000367_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000368_000019_leftImg8bit.png gtFine/test/berlin/berlin_000368_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000369_000019_leftImg8bit.png gtFine/test/berlin/berlin_000369_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000370_000019_leftImg8bit.png gtFine/test/berlin/berlin_000370_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000371_000019_leftImg8bit.png gtFine/test/berlin/berlin_000371_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000372_000019_leftImg8bit.png gtFine/test/berlin/berlin_000372_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000373_000019_leftImg8bit.png gtFine/test/berlin/berlin_000373_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000374_000019_leftImg8bit.png gtFine/test/berlin/berlin_000374_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000375_000019_leftImg8bit.png gtFine/test/berlin/berlin_000375_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000376_000019_leftImg8bit.png gtFine/test/berlin/berlin_000376_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000377_000019_leftImg8bit.png gtFine/test/berlin/berlin_000377_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000378_000019_leftImg8bit.png gtFine/test/berlin/berlin_000378_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000379_000019_leftImg8bit.png gtFine/test/berlin/berlin_000379_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000380_000019_leftImg8bit.png gtFine/test/berlin/berlin_000380_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000381_000019_leftImg8bit.png gtFine/test/berlin/berlin_000381_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000382_000019_leftImg8bit.png gtFine/test/berlin/berlin_000382_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000383_000019_leftImg8bit.png gtFine/test/berlin/berlin_000383_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000384_000019_leftImg8bit.png gtFine/test/berlin/berlin_000384_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000385_000019_leftImg8bit.png gtFine/test/berlin/berlin_000385_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000386_000019_leftImg8bit.png gtFine/test/berlin/berlin_000386_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000387_000019_leftImg8bit.png gtFine/test/berlin/berlin_000387_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000388_000019_leftImg8bit.png gtFine/test/berlin/berlin_000388_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000389_000019_leftImg8bit.png gtFine/test/berlin/berlin_000389_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000390_000019_leftImg8bit.png gtFine/test/berlin/berlin_000390_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000391_000019_leftImg8bit.png gtFine/test/berlin/berlin_000391_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000392_000019_leftImg8bit.png gtFine/test/berlin/berlin_000392_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000393_000019_leftImg8bit.png gtFine/test/berlin/berlin_000393_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000394_000019_leftImg8bit.png gtFine/test/berlin/berlin_000394_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000395_000019_leftImg8bit.png gtFine/test/berlin/berlin_000395_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000396_000019_leftImg8bit.png gtFine/test/berlin/berlin_000396_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000397_000019_leftImg8bit.png gtFine/test/berlin/berlin_000397_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000398_000019_leftImg8bit.png gtFine/test/berlin/berlin_000398_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000399_000019_leftImg8bit.png gtFine/test/berlin/berlin_000399_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000400_000019_leftImg8bit.png gtFine/test/berlin/berlin_000400_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000401_000019_leftImg8bit.png gtFine/test/berlin/berlin_000401_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000402_000019_leftImg8bit.png gtFine/test/berlin/berlin_000402_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000403_000019_leftImg8bit.png gtFine/test/berlin/berlin_000403_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000404_000019_leftImg8bit.png gtFine/test/berlin/berlin_000404_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000405_000019_leftImg8bit.png gtFine/test/berlin/berlin_000405_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000406_000019_leftImg8bit.png gtFine/test/berlin/berlin_000406_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000407_000019_leftImg8bit.png gtFine/test/berlin/berlin_000407_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000408_000019_leftImg8bit.png gtFine/test/berlin/berlin_000408_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000409_000019_leftImg8bit.png gtFine/test/berlin/berlin_000409_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000410_000019_leftImg8bit.png gtFine/test/berlin/berlin_000410_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000411_000019_leftImg8bit.png gtFine/test/berlin/berlin_000411_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000412_000019_leftImg8bit.png gtFine/test/berlin/berlin_000412_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000413_000019_leftImg8bit.png gtFine/test/berlin/berlin_000413_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000414_000019_leftImg8bit.png gtFine/test/berlin/berlin_000414_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000415_000019_leftImg8bit.png gtFine/test/berlin/berlin_000415_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000416_000019_leftImg8bit.png gtFine/test/berlin/berlin_000416_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000417_000019_leftImg8bit.png gtFine/test/berlin/berlin_000417_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000418_000019_leftImg8bit.png gtFine/test/berlin/berlin_000418_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000419_000019_leftImg8bit.png gtFine/test/berlin/berlin_000419_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000420_000019_leftImg8bit.png gtFine/test/berlin/berlin_000420_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000421_000019_leftImg8bit.png gtFine/test/berlin/berlin_000421_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000422_000019_leftImg8bit.png gtFine/test/berlin/berlin_000422_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000423_000019_leftImg8bit.png gtFine/test/berlin/berlin_000423_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000424_000019_leftImg8bit.png gtFine/test/berlin/berlin_000424_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000425_000019_leftImg8bit.png gtFine/test/berlin/berlin_000425_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000426_000019_leftImg8bit.png gtFine/test/berlin/berlin_000426_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000427_000019_leftImg8bit.png gtFine/test/berlin/berlin_000427_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000428_000019_leftImg8bit.png gtFine/test/berlin/berlin_000428_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000429_000019_leftImg8bit.png gtFine/test/berlin/berlin_000429_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000430_000019_leftImg8bit.png gtFine/test/berlin/berlin_000430_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000431_000019_leftImg8bit.png gtFine/test/berlin/berlin_000431_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000432_000019_leftImg8bit.png gtFine/test/berlin/berlin_000432_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000433_000019_leftImg8bit.png gtFine/test/berlin/berlin_000433_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000434_000019_leftImg8bit.png gtFine/test/berlin/berlin_000434_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000435_000019_leftImg8bit.png gtFine/test/berlin/berlin_000435_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000436_000019_leftImg8bit.png gtFine/test/berlin/berlin_000436_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000437_000019_leftImg8bit.png gtFine/test/berlin/berlin_000437_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000438_000019_leftImg8bit.png gtFine/test/berlin/berlin_000438_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000439_000019_leftImg8bit.png gtFine/test/berlin/berlin_000439_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000440_000019_leftImg8bit.png gtFine/test/berlin/berlin_000440_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000441_000019_leftImg8bit.png gtFine/test/berlin/berlin_000441_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000442_000019_leftImg8bit.png gtFine/test/berlin/berlin_000442_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000443_000019_leftImg8bit.png gtFine/test/berlin/berlin_000443_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000444_000019_leftImg8bit.png gtFine/test/berlin/berlin_000444_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000445_000019_leftImg8bit.png gtFine/test/berlin/berlin_000445_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000446_000019_leftImg8bit.png gtFine/test/berlin/berlin_000446_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000447_000019_leftImg8bit.png gtFine/test/berlin/berlin_000447_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000448_000019_leftImg8bit.png gtFine/test/berlin/berlin_000448_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000449_000019_leftImg8bit.png gtFine/test/berlin/berlin_000449_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000450_000019_leftImg8bit.png gtFine/test/berlin/berlin_000450_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000451_000019_leftImg8bit.png gtFine/test/berlin/berlin_000451_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000452_000019_leftImg8bit.png gtFine/test/berlin/berlin_000452_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000453_000019_leftImg8bit.png gtFine/test/berlin/berlin_000453_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000454_000019_leftImg8bit.png gtFine/test/berlin/berlin_000454_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000455_000019_leftImg8bit.png gtFine/test/berlin/berlin_000455_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000456_000019_leftImg8bit.png gtFine/test/berlin/berlin_000456_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000457_000019_leftImg8bit.png gtFine/test/berlin/berlin_000457_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000458_000019_leftImg8bit.png gtFine/test/berlin/berlin_000458_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000459_000019_leftImg8bit.png gtFine/test/berlin/berlin_000459_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000460_000019_leftImg8bit.png gtFine/test/berlin/berlin_000460_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000461_000019_leftImg8bit.png gtFine/test/berlin/berlin_000461_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000462_000019_leftImg8bit.png gtFine/test/berlin/berlin_000462_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000463_000019_leftImg8bit.png gtFine/test/berlin/berlin_000463_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000464_000019_leftImg8bit.png gtFine/test/berlin/berlin_000464_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000465_000019_leftImg8bit.png gtFine/test/berlin/berlin_000465_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000466_000019_leftImg8bit.png gtFine/test/berlin/berlin_000466_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000467_000019_leftImg8bit.png gtFine/test/berlin/berlin_000467_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000468_000019_leftImg8bit.png gtFine/test/berlin/berlin_000468_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000469_000019_leftImg8bit.png gtFine/test/berlin/berlin_000469_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000470_000019_leftImg8bit.png gtFine/test/berlin/berlin_000470_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000471_000019_leftImg8bit.png gtFine/test/berlin/berlin_000471_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000472_000019_leftImg8bit.png gtFine/test/berlin/berlin_000472_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000473_000019_leftImg8bit.png gtFine/test/berlin/berlin_000473_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000474_000019_leftImg8bit.png gtFine/test/berlin/berlin_000474_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000475_000019_leftImg8bit.png gtFine/test/berlin/berlin_000475_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000476_000019_leftImg8bit.png gtFine/test/berlin/berlin_000476_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000477_000019_leftImg8bit.png gtFine/test/berlin/berlin_000477_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000478_000019_leftImg8bit.png gtFine/test/berlin/berlin_000478_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000479_000019_leftImg8bit.png gtFine/test/berlin/berlin_000479_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000480_000019_leftImg8bit.png gtFine/test/berlin/berlin_000480_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000481_000019_leftImg8bit.png gtFine/test/berlin/berlin_000481_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000482_000019_leftImg8bit.png gtFine/test/berlin/berlin_000482_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000483_000019_leftImg8bit.png gtFine/test/berlin/berlin_000483_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000484_000019_leftImg8bit.png gtFine/test/berlin/berlin_000484_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000485_000019_leftImg8bit.png gtFine/test/berlin/berlin_000485_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000486_000019_leftImg8bit.png gtFine/test/berlin/berlin_000486_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000487_000019_leftImg8bit.png gtFine/test/berlin/berlin_000487_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000488_000019_leftImg8bit.png gtFine/test/berlin/berlin_000488_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000489_000019_leftImg8bit.png gtFine/test/berlin/berlin_000489_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000490_000019_leftImg8bit.png gtFine/test/berlin/berlin_000490_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000491_000019_leftImg8bit.png gtFine/test/berlin/berlin_000491_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000492_000019_leftImg8bit.png gtFine/test/berlin/berlin_000492_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000493_000019_leftImg8bit.png gtFine/test/berlin/berlin_000493_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000494_000019_leftImg8bit.png gtFine/test/berlin/berlin_000494_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000495_000019_leftImg8bit.png gtFine/test/berlin/berlin_000495_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000496_000019_leftImg8bit.png gtFine/test/berlin/berlin_000496_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000497_000019_leftImg8bit.png gtFine/test/berlin/berlin_000497_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000498_000019_leftImg8bit.png gtFine/test/berlin/berlin_000498_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000499_000019_leftImg8bit.png gtFine/test/berlin/berlin_000499_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000500_000019_leftImg8bit.png gtFine/test/berlin/berlin_000500_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000501_000019_leftImg8bit.png gtFine/test/berlin/berlin_000501_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000502_000019_leftImg8bit.png gtFine/test/berlin/berlin_000502_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000503_000019_leftImg8bit.png gtFine/test/berlin/berlin_000503_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000504_000019_leftImg8bit.png gtFine/test/berlin/berlin_000504_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000505_000019_leftImg8bit.png gtFine/test/berlin/berlin_000505_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000506_000019_leftImg8bit.png gtFine/test/berlin/berlin_000506_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000507_000019_leftImg8bit.png gtFine/test/berlin/berlin_000507_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000508_000019_leftImg8bit.png gtFine/test/berlin/berlin_000508_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000509_000019_leftImg8bit.png gtFine/test/berlin/berlin_000509_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000510_000019_leftImg8bit.png gtFine/test/berlin/berlin_000510_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000511_000019_leftImg8bit.png gtFine/test/berlin/berlin_000511_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000512_000019_leftImg8bit.png gtFine/test/berlin/berlin_000512_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000513_000019_leftImg8bit.png gtFine/test/berlin/berlin_000513_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000514_000019_leftImg8bit.png gtFine/test/berlin/berlin_000514_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000515_000019_leftImg8bit.png gtFine/test/berlin/berlin_000515_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000516_000019_leftImg8bit.png gtFine/test/berlin/berlin_000516_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000517_000019_leftImg8bit.png gtFine/test/berlin/berlin_000517_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000518_000019_leftImg8bit.png gtFine/test/berlin/berlin_000518_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000519_000019_leftImg8bit.png gtFine/test/berlin/berlin_000519_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000520_000019_leftImg8bit.png gtFine/test/berlin/berlin_000520_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000521_000019_leftImg8bit.png gtFine/test/berlin/berlin_000521_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000522_000019_leftImg8bit.png gtFine/test/berlin/berlin_000522_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000523_000019_leftImg8bit.png gtFine/test/berlin/berlin_000523_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000524_000019_leftImg8bit.png gtFine/test/berlin/berlin_000524_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000525_000019_leftImg8bit.png gtFine/test/berlin/berlin_000525_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000526_000019_leftImg8bit.png gtFine/test/berlin/berlin_000526_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000527_000019_leftImg8bit.png gtFine/test/berlin/berlin_000527_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000528_000019_leftImg8bit.png gtFine/test/berlin/berlin_000528_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000529_000019_leftImg8bit.png gtFine/test/berlin/berlin_000529_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000530_000019_leftImg8bit.png gtFine/test/berlin/berlin_000530_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000531_000019_leftImg8bit.png gtFine/test/berlin/berlin_000531_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000532_000019_leftImg8bit.png gtFine/test/berlin/berlin_000532_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000533_000019_leftImg8bit.png gtFine/test/berlin/berlin_000533_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000534_000019_leftImg8bit.png gtFine/test/berlin/berlin_000534_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000535_000019_leftImg8bit.png gtFine/test/berlin/berlin_000535_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000536_000019_leftImg8bit.png gtFine/test/berlin/berlin_000536_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000537_000019_leftImg8bit.png gtFine/test/berlin/berlin_000537_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000538_000019_leftImg8bit.png gtFine/test/berlin/berlin_000538_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000539_000019_leftImg8bit.png gtFine/test/berlin/berlin_000539_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000540_000019_leftImg8bit.png gtFine/test/berlin/berlin_000540_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000541_000019_leftImg8bit.png gtFine/test/berlin/berlin_000541_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000542_000019_leftImg8bit.png gtFine/test/berlin/berlin_000542_000019_gtFine_labelTrainIds.png +leftImg8bit/test/berlin/berlin_000543_000019_leftImg8bit.png gtFine/test/berlin/berlin_000543_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_000321_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_000321_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_000856_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_000856_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_001011_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_001011_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_001187_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_001187_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_001505_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_001505_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_001705_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_001705_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_002308_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_002308_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_002528_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_002528_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_002735_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_002735_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_003080_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_003080_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_003406_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_003406_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_003546_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_003546_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_003731_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_003731_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_004345_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_004345_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_005068_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_005068_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_005260_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_005260_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_005372_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_005372_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_005584_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_005584_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_005741_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_005741_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_005942_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_005942_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_006239_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_006239_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_006603_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_006603_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_006802_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_006802_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_007030_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_007030_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_007186_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_007186_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_007545_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_007545_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_008279_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_008279_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_008581_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_008581_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_008800_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_008800_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_009728_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_009728_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_009928_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_009928_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_010156_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_010156_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_011367_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_011367_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_011831_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_011831_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_012080_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_012080_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_012584_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_012584_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_012788_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_012788_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_013570_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_013570_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_013665_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_013665_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_013814_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_013814_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_014068_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_014068_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_015301_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_015301_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_015411_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_015411_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_015587_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_015587_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_015867_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_015867_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_015942_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_015942_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_016019_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_016019_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_016718_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_016718_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_016924_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_016924_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_017051_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_017051_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_017279_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_017279_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_017438_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_017438_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_017774_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_017774_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_018102_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_018102_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_018345_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_018345_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_018644_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_018644_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_019416_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_019416_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_020757_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_020757_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_020900_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_020900_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_021221_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_021221_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_021341_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_021341_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_021381_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_021381_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_021625_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_021625_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_021826_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_021826_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_022261_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_022261_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_022835_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_022835_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_025061_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_025061_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_025426_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_025426_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_025748_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_025748_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_026053_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_026053_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_026296_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_026296_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_026550_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_026550_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_026660_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_026660_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_026823_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_026823_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_027221_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_027221_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_027586_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_027586_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_027928_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_027928_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_028046_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_028046_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_028148_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_028148_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_028414_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_028414_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_028550_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_028550_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_028747_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_028747_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_029148_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_029148_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_030038_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_030038_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_030366_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_030366_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_030958_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_030958_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_031244_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_031244_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_031510_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_031510_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_032388_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_032388_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_032766_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_032766_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_033675_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_033675_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_033770_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_033770_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_033979_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_033979_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_034705_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_034705_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_034929_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_034929_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_035223_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_035223_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_035537_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_035537_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_035879_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_035879_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_036362_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_036362_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_036732_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_036732_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_037016_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_037016_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_037159_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_037159_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_037422_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_037422_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_038924_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_038924_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_039082_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_039082_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_039221_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_039221_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_039596_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_039596_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_040035_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_040035_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_040472_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_040472_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_041014_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_041014_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_041142_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_041142_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_041223_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_041223_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_041444_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_041444_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_042403_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_042403_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_042571_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_042571_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_042717_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_042717_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_043100_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_043100_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_043389_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_043389_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_043610_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_043610_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_044085_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_044085_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_045117_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_045117_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_045232_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_045232_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_046023_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_046023_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_046212_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_046212_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_046495_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_046495_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_047542_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_047542_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_047918_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_047918_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_048227_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_048227_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_048518_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_048518_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_048754_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_048754_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_048864_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_048864_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_049313_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_049313_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_049446_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_049446_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_050021_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_050021_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_050426_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_050426_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_050586_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_050586_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_051102_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_051102_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_051223_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_051223_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_051894_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_051894_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_052155_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_052155_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_053028_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_053028_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_053384_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_053384_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_053583_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_053583_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_053779_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_053779_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_055003_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_055003_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_055145_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_055145_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_056175_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_056175_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_056226_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_056226_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_056310_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_056310_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_056493_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_056493_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_056603_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_056603_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_056866_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_056866_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_058374_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_058374_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_058776_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_058776_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_058934_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_058934_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059119_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059119_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059303_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059303_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059355_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059355_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059501_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059501_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059651_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059651_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059729_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059729_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059766_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059766_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_059842_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_059842_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_060786_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_060786_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_060861_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_060861_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_061094_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_061094_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_061341_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_061341_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_061975_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_061975_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_062121_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_062121_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_063427_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_063427_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_063623_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_063623_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_063939_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_063939_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_064271_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_064271_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_064583_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_064583_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_064805_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_064805_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_064910_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_064910_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_065023_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_065023_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_065154_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_065154_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_066195_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_066195_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_066405_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_066405_gtFine_labelTrainIds.png +leftImg8bit/test/bielefeld/bielefeld_000000_066495_leftImg8bit.png gtFine/test/bielefeld/bielefeld_000000_066495_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000000_000019_leftImg8bit.png gtFine/test/bonn/bonn_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000001_000019_leftImg8bit.png gtFine/test/bonn/bonn_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000002_000019_leftImg8bit.png gtFine/test/bonn/bonn_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000003_000019_leftImg8bit.png gtFine/test/bonn/bonn_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000004_000019_leftImg8bit.png gtFine/test/bonn/bonn_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000005_000019_leftImg8bit.png gtFine/test/bonn/bonn_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000006_000019_leftImg8bit.png gtFine/test/bonn/bonn_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000007_000019_leftImg8bit.png gtFine/test/bonn/bonn_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000008_000019_leftImg8bit.png gtFine/test/bonn/bonn_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000009_000019_leftImg8bit.png gtFine/test/bonn/bonn_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000010_000019_leftImg8bit.png gtFine/test/bonn/bonn_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000011_000019_leftImg8bit.png gtFine/test/bonn/bonn_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000012_000019_leftImg8bit.png gtFine/test/bonn/bonn_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000013_000019_leftImg8bit.png gtFine/test/bonn/bonn_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000014_000019_leftImg8bit.png gtFine/test/bonn/bonn_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000015_000019_leftImg8bit.png gtFine/test/bonn/bonn_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000016_000019_leftImg8bit.png gtFine/test/bonn/bonn_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000017_000019_leftImg8bit.png gtFine/test/bonn/bonn_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000018_000019_leftImg8bit.png gtFine/test/bonn/bonn_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000019_000019_leftImg8bit.png gtFine/test/bonn/bonn_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000020_000019_leftImg8bit.png gtFine/test/bonn/bonn_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000021_000019_leftImg8bit.png gtFine/test/bonn/bonn_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000022_000019_leftImg8bit.png gtFine/test/bonn/bonn_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000023_000019_leftImg8bit.png gtFine/test/bonn/bonn_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000024_000019_leftImg8bit.png gtFine/test/bonn/bonn_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000025_000019_leftImg8bit.png gtFine/test/bonn/bonn_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000026_000019_leftImg8bit.png gtFine/test/bonn/bonn_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000027_000019_leftImg8bit.png gtFine/test/bonn/bonn_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000028_000019_leftImg8bit.png gtFine/test/bonn/bonn_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000029_000019_leftImg8bit.png gtFine/test/bonn/bonn_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000030_000019_leftImg8bit.png gtFine/test/bonn/bonn_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000031_000019_leftImg8bit.png gtFine/test/bonn/bonn_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000032_000019_leftImg8bit.png gtFine/test/bonn/bonn_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000033_000019_leftImg8bit.png gtFine/test/bonn/bonn_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000034_000019_leftImg8bit.png gtFine/test/bonn/bonn_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000035_000019_leftImg8bit.png gtFine/test/bonn/bonn_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000036_000019_leftImg8bit.png gtFine/test/bonn/bonn_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000037_000019_leftImg8bit.png gtFine/test/bonn/bonn_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000038_000019_leftImg8bit.png gtFine/test/bonn/bonn_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000039_000019_leftImg8bit.png gtFine/test/bonn/bonn_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000040_000019_leftImg8bit.png gtFine/test/bonn/bonn_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000041_000019_leftImg8bit.png gtFine/test/bonn/bonn_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000042_000019_leftImg8bit.png gtFine/test/bonn/bonn_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000043_000019_leftImg8bit.png gtFine/test/bonn/bonn_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000044_000019_leftImg8bit.png gtFine/test/bonn/bonn_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/test/bonn/bonn_000045_000019_leftImg8bit.png gtFine/test/bonn/bonn_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000000_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000001_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000002_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000003_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000004_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000005_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000006_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000007_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000008_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000009_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000010_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000011_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000012_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000013_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000014_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000015_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000016_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000017_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000018_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000019_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000020_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000021_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000022_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000023_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000024_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000025_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000026_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000027_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000028_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000029_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000030_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000031_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000032_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000033_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000034_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000035_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000036_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000037_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000038_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000039_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000040_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000041_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000042_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000043_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000044_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000045_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000046_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000047_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000048_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000049_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000050_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000051_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000052_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000053_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000054_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000055_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000056_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/test/leverkusen/leverkusen_000057_000019_leftImg8bit.png gtFine/test/leverkusen/leverkusen_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_000093_leftImg8bit.png gtFine/test/mainz/mainz_000000_000093_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_000293_leftImg8bit.png gtFine/test/mainz/mainz_000000_000293_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_001003_leftImg8bit.png gtFine/test/mainz/mainz_000000_001003_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_001068_leftImg8bit.png gtFine/test/mainz/mainz_000000_001068_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_001265_leftImg8bit.png gtFine/test/mainz/mainz_000000_001265_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_001410_leftImg8bit.png gtFine/test/mainz/mainz_000000_001410_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_001601_leftImg8bit.png gtFine/test/mainz/mainz_000000_001601_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_001857_leftImg8bit.png gtFine/test/mainz/mainz_000000_001857_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_002212_leftImg8bit.png gtFine/test/mainz/mainz_000000_002212_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_002353_leftImg8bit.png gtFine/test/mainz/mainz_000000_002353_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_003049_leftImg8bit.png gtFine/test/mainz/mainz_000000_003049_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_003250_leftImg8bit.png gtFine/test/mainz/mainz_000000_003250_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_003506_leftImg8bit.png gtFine/test/mainz/mainz_000000_003506_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_003619_leftImg8bit.png gtFine/test/mainz/mainz_000000_003619_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_004000_leftImg8bit.png gtFine/test/mainz/mainz_000000_004000_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_004237_leftImg8bit.png gtFine/test/mainz/mainz_000000_004237_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_004542_leftImg8bit.png gtFine/test/mainz/mainz_000000_004542_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_004740_leftImg8bit.png gtFine/test/mainz/mainz_000000_004740_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_005403_leftImg8bit.png gtFine/test/mainz/mainz_000000_005403_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_005549_leftImg8bit.png gtFine/test/mainz/mainz_000000_005549_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_005817_leftImg8bit.png gtFine/test/mainz/mainz_000000_005817_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_006141_leftImg8bit.png gtFine/test/mainz/mainz_000000_006141_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_006263_leftImg8bit.png gtFine/test/mainz/mainz_000000_006263_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_006368_leftImg8bit.png gtFine/test/mainz/mainz_000000_006368_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_006649_leftImg8bit.png gtFine/test/mainz/mainz_000000_006649_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_007415_leftImg8bit.png gtFine/test/mainz/mainz_000000_007415_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_007813_leftImg8bit.png gtFine/test/mainz/mainz_000000_007813_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_008001_leftImg8bit.png gtFine/test/mainz/mainz_000000_008001_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_008165_leftImg8bit.png gtFine/test/mainz/mainz_000000_008165_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_008509_leftImg8bit.png gtFine/test/mainz/mainz_000000_008509_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_008645_leftImg8bit.png gtFine/test/mainz/mainz_000000_008645_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_008871_leftImg8bit.png gtFine/test/mainz/mainz_000000_008871_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_009751_leftImg8bit.png gtFine/test/mainz/mainz_000000_009751_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_009985_leftImg8bit.png gtFine/test/mainz/mainz_000000_009985_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_010171_leftImg8bit.png gtFine/test/mainz/mainz_000000_010171_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_010417_leftImg8bit.png gtFine/test/mainz/mainz_000000_010417_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_010550_leftImg8bit.png gtFine/test/mainz/mainz_000000_010550_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_011339_leftImg8bit.png gtFine/test/mainz/mainz_000000_011339_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_011879_leftImg8bit.png gtFine/test/mainz/mainz_000000_011879_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_011965_leftImg8bit.png gtFine/test/mainz/mainz_000000_011965_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_012392_leftImg8bit.png gtFine/test/mainz/mainz_000000_012392_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_012737_leftImg8bit.png gtFine/test/mainz/mainz_000000_012737_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_013095_leftImg8bit.png gtFine/test/mainz/mainz_000000_013095_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_013437_leftImg8bit.png gtFine/test/mainz/mainz_000000_013437_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_013671_leftImg8bit.png gtFine/test/mainz/mainz_000000_013671_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_013960_leftImg8bit.png gtFine/test/mainz/mainz_000000_013960_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_014193_leftImg8bit.png gtFine/test/mainz/mainz_000000_014193_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_014742_leftImg8bit.png gtFine/test/mainz/mainz_000000_014742_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_015052_leftImg8bit.png gtFine/test/mainz/mainz_000000_015052_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_015170_leftImg8bit.png gtFine/test/mainz/mainz_000000_015170_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_015760_leftImg8bit.png gtFine/test/mainz/mainz_000000_015760_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_016083_leftImg8bit.png gtFine/test/mainz/mainz_000000_016083_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_016281_leftImg8bit.png gtFine/test/mainz/mainz_000000_016281_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_016612_leftImg8bit.png gtFine/test/mainz/mainz_000000_016612_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_016651_leftImg8bit.png gtFine/test/mainz/mainz_000000_016651_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_016915_leftImg8bit.png gtFine/test/mainz/mainz_000000_016915_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_017927_leftImg8bit.png gtFine/test/mainz/mainz_000000_017927_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_018249_leftImg8bit.png gtFine/test/mainz/mainz_000000_018249_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_018883_leftImg8bit.png gtFine/test/mainz/mainz_000000_018883_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_019043_leftImg8bit.png gtFine/test/mainz/mainz_000000_019043_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_019227_leftImg8bit.png gtFine/test/mainz/mainz_000000_019227_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_019439_leftImg8bit.png gtFine/test/mainz/mainz_000000_019439_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_019686_leftImg8bit.png gtFine/test/mainz/mainz_000000_019686_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_019847_leftImg8bit.png gtFine/test/mainz/mainz_000000_019847_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_020139_leftImg8bit.png gtFine/test/mainz/mainz_000000_020139_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_020415_leftImg8bit.png gtFine/test/mainz/mainz_000000_020415_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_021457_leftImg8bit.png gtFine/test/mainz/mainz_000000_021457_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_021524_leftImg8bit.png gtFine/test/mainz/mainz_000000_021524_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_021735_leftImg8bit.png gtFine/test/mainz/mainz_000000_021735_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_021833_leftImg8bit.png gtFine/test/mainz/mainz_000000_021833_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_022091_leftImg8bit.png gtFine/test/mainz/mainz_000000_022091_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000000_022417_leftImg8bit.png gtFine/test/mainz/mainz_000000_022417_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_000120_leftImg8bit.png gtFine/test/mainz/mainz_000001_000120_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_000428_leftImg8bit.png gtFine/test/mainz/mainz_000001_000428_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_001509_leftImg8bit.png gtFine/test/mainz/mainz_000001_001509_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_002033_leftImg8bit.png gtFine/test/mainz/mainz_000001_002033_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_002543_leftImg8bit.png gtFine/test/mainz/mainz_000001_002543_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_002884_leftImg8bit.png gtFine/test/mainz/mainz_000001_002884_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_003012_leftImg8bit.png gtFine/test/mainz/mainz_000001_003012_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_003624_leftImg8bit.png gtFine/test/mainz/mainz_000001_003624_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_003702_leftImg8bit.png gtFine/test/mainz/mainz_000001_003702_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_003907_leftImg8bit.png gtFine/test/mainz/mainz_000001_003907_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_004132_leftImg8bit.png gtFine/test/mainz/mainz_000001_004132_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_004219_leftImg8bit.png gtFine/test/mainz/mainz_000001_004219_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_004823_leftImg8bit.png gtFine/test/mainz/mainz_000001_004823_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_005016_leftImg8bit.png gtFine/test/mainz/mainz_000001_005016_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_005163_leftImg8bit.png gtFine/test/mainz/mainz_000001_005163_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_005366_leftImg8bit.png gtFine/test/mainz/mainz_000001_005366_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_005665_leftImg8bit.png gtFine/test/mainz/mainz_000001_005665_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_005815_leftImg8bit.png gtFine/test/mainz/mainz_000001_005815_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_005911_leftImg8bit.png gtFine/test/mainz/mainz_000001_005911_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_006194_leftImg8bit.png gtFine/test/mainz/mainz_000001_006194_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_006768_leftImg8bit.png gtFine/test/mainz/mainz_000001_006768_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_007171_leftImg8bit.png gtFine/test/mainz/mainz_000001_007171_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_007460_leftImg8bit.png gtFine/test/mainz/mainz_000001_007460_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_007595_leftImg8bit.png gtFine/test/mainz/mainz_000001_007595_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_007956_leftImg8bit.png gtFine/test/mainz/mainz_000001_007956_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_008056_leftImg8bit.png gtFine/test/mainz/mainz_000001_008056_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_008264_leftImg8bit.png gtFine/test/mainz/mainz_000001_008264_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_008540_leftImg8bit.png gtFine/test/mainz/mainz_000001_008540_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_008638_leftImg8bit.png gtFine/test/mainz/mainz_000001_008638_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_008771_leftImg8bit.png gtFine/test/mainz/mainz_000001_008771_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_009152_leftImg8bit.png gtFine/test/mainz/mainz_000001_009152_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_009328_leftImg8bit.png gtFine/test/mainz/mainz_000001_009328_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_009811_leftImg8bit.png gtFine/test/mainz/mainz_000001_009811_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_009867_leftImg8bit.png gtFine/test/mainz/mainz_000001_009867_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_009996_leftImg8bit.png gtFine/test/mainz/mainz_000001_009996_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_010853_leftImg8bit.png gtFine/test/mainz/mainz_000001_010853_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_011333_leftImg8bit.png gtFine/test/mainz/mainz_000001_011333_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_011736_leftImg8bit.png gtFine/test/mainz/mainz_000001_011736_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_011785_leftImg8bit.png gtFine/test/mainz/mainz_000001_011785_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_012186_leftImg8bit.png gtFine/test/mainz/mainz_000001_012186_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_012470_leftImg8bit.png gtFine/test/mainz/mainz_000001_012470_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_012541_leftImg8bit.png gtFine/test/mainz/mainz_000001_012541_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_012644_leftImg8bit.png gtFine/test/mainz/mainz_000001_012644_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_012950_leftImg8bit.png gtFine/test/mainz/mainz_000001_012950_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_013313_leftImg8bit.png gtFine/test/mainz/mainz_000001_013313_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_014073_leftImg8bit.png gtFine/test/mainz/mainz_000001_014073_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_014469_leftImg8bit.png gtFine/test/mainz/mainz_000001_014469_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_014626_leftImg8bit.png gtFine/test/mainz/mainz_000001_014626_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_015117_leftImg8bit.png gtFine/test/mainz/mainz_000001_015117_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_015235_leftImg8bit.png gtFine/test/mainz/mainz_000001_015235_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_015508_leftImg8bit.png gtFine/test/mainz/mainz_000001_015508_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_015724_leftImg8bit.png gtFine/test/mainz/mainz_000001_015724_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_016011_leftImg8bit.png gtFine/test/mainz/mainz_000001_016011_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_016391_leftImg8bit.png gtFine/test/mainz/mainz_000001_016391_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_016931_leftImg8bit.png gtFine/test/mainz/mainz_000001_016931_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_017618_leftImg8bit.png gtFine/test/mainz/mainz_000001_017618_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_017992_leftImg8bit.png gtFine/test/mainz/mainz_000001_017992_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_018145_leftImg8bit.png gtFine/test/mainz/mainz_000001_018145_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_018329_leftImg8bit.png gtFine/test/mainz/mainz_000001_018329_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_018670_leftImg8bit.png gtFine/test/mainz/mainz_000001_018670_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_018817_leftImg8bit.png gtFine/test/mainz/mainz_000001_018817_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_019061_leftImg8bit.png gtFine/test/mainz/mainz_000001_019061_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_019286_leftImg8bit.png gtFine/test/mainz/mainz_000001_019286_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_019593_leftImg8bit.png gtFine/test/mainz/mainz_000001_019593_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_020068_leftImg8bit.png gtFine/test/mainz/mainz_000001_020068_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_020193_leftImg8bit.png gtFine/test/mainz/mainz_000001_020193_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_020484_leftImg8bit.png gtFine/test/mainz/mainz_000001_020484_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_020829_leftImg8bit.png gtFine/test/mainz/mainz_000001_020829_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_021042_leftImg8bit.png gtFine/test/mainz/mainz_000001_021042_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_021547_leftImg8bit.png gtFine/test/mainz/mainz_000001_021547_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_021892_leftImg8bit.png gtFine/test/mainz/mainz_000001_021892_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_021946_leftImg8bit.png gtFine/test/mainz/mainz_000001_021946_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_022125_leftImg8bit.png gtFine/test/mainz/mainz_000001_022125_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_022630_leftImg8bit.png gtFine/test/mainz/mainz_000001_022630_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_023439_leftImg8bit.png gtFine/test/mainz/mainz_000001_023439_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_023604_leftImg8bit.png gtFine/test/mainz/mainz_000001_023604_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_024439_leftImg8bit.png gtFine/test/mainz/mainz_000001_024439_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_024489_leftImg8bit.png gtFine/test/mainz/mainz_000001_024489_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_024718_leftImg8bit.png gtFine/test/mainz/mainz_000001_024718_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_025161_leftImg8bit.png gtFine/test/mainz/mainz_000001_025161_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_025390_leftImg8bit.png gtFine/test/mainz/mainz_000001_025390_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_025623_leftImg8bit.png gtFine/test/mainz/mainz_000001_025623_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_026209_leftImg8bit.png gtFine/test/mainz/mainz_000001_026209_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_026837_leftImg8bit.png gtFine/test/mainz/mainz_000001_026837_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_026963_leftImg8bit.png gtFine/test/mainz/mainz_000001_026963_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_027053_leftImg8bit.png gtFine/test/mainz/mainz_000001_027053_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_027124_leftImg8bit.png gtFine/test/mainz/mainz_000001_027124_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_027377_leftImg8bit.png gtFine/test/mainz/mainz_000001_027377_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_027675_leftImg8bit.png gtFine/test/mainz/mainz_000001_027675_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_027751_leftImg8bit.png gtFine/test/mainz/mainz_000001_027751_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_028111_leftImg8bit.png gtFine/test/mainz/mainz_000001_028111_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_028326_leftImg8bit.png gtFine/test/mainz/mainz_000001_028326_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_028566_leftImg8bit.png gtFine/test/mainz/mainz_000001_028566_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_028847_leftImg8bit.png gtFine/test/mainz/mainz_000001_028847_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_029293_leftImg8bit.png gtFine/test/mainz/mainz_000001_029293_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_029521_leftImg8bit.png gtFine/test/mainz/mainz_000001_029521_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_029755_leftImg8bit.png gtFine/test/mainz/mainz_000001_029755_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_029950_leftImg8bit.png gtFine/test/mainz/mainz_000001_029950_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_030417_leftImg8bit.png gtFine/test/mainz/mainz_000001_030417_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_030630_leftImg8bit.png gtFine/test/mainz/mainz_000001_030630_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_031026_leftImg8bit.png gtFine/test/mainz/mainz_000001_031026_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_031350_leftImg8bit.png gtFine/test/mainz/mainz_000001_031350_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_031697_leftImg8bit.png gtFine/test/mainz/mainz_000001_031697_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_031946_leftImg8bit.png gtFine/test/mainz/mainz_000001_031946_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_032294_leftImg8bit.png gtFine/test/mainz/mainz_000001_032294_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_032401_leftImg8bit.png gtFine/test/mainz/mainz_000001_032401_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_032567_leftImg8bit.png gtFine/test/mainz/mainz_000001_032567_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_032691_leftImg8bit.png gtFine/test/mainz/mainz_000001_032691_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_032767_leftImg8bit.png gtFine/test/mainz/mainz_000001_032767_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_032911_leftImg8bit.png gtFine/test/mainz/mainz_000001_032911_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_033096_leftImg8bit.png gtFine/test/mainz/mainz_000001_033096_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_033329_leftImg8bit.png gtFine/test/mainz/mainz_000001_033329_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_033437_leftImg8bit.png gtFine/test/mainz/mainz_000001_033437_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_033603_leftImg8bit.png gtFine/test/mainz/mainz_000001_033603_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_033756_leftImg8bit.png gtFine/test/mainz/mainz_000001_033756_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_034033_leftImg8bit.png gtFine/test/mainz/mainz_000001_034033_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_034209_leftImg8bit.png gtFine/test/mainz/mainz_000001_034209_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_034394_leftImg8bit.png gtFine/test/mainz/mainz_000001_034394_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_034508_leftImg8bit.png gtFine/test/mainz/mainz_000001_034508_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_034681_leftImg8bit.png gtFine/test/mainz/mainz_000001_034681_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_035293_leftImg8bit.png gtFine/test/mainz/mainz_000001_035293_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_035585_leftImg8bit.png gtFine/test/mainz/mainz_000001_035585_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_035963_leftImg8bit.png gtFine/test/mainz/mainz_000001_035963_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_036115_leftImg8bit.png gtFine/test/mainz/mainz_000001_036115_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_036240_leftImg8bit.png gtFine/test/mainz/mainz_000001_036240_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_036412_leftImg8bit.png gtFine/test/mainz/mainz_000001_036412_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_037170_leftImg8bit.png gtFine/test/mainz/mainz_000001_037170_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_037411_leftImg8bit.png gtFine/test/mainz/mainz_000001_037411_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_037532_leftImg8bit.png gtFine/test/mainz/mainz_000001_037532_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_037736_leftImg8bit.png gtFine/test/mainz/mainz_000001_037736_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_037905_leftImg8bit.png gtFine/test/mainz/mainz_000001_037905_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_038026_leftImg8bit.png gtFine/test/mainz/mainz_000001_038026_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_038191_leftImg8bit.png gtFine/test/mainz/mainz_000001_038191_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_038347_leftImg8bit.png gtFine/test/mainz/mainz_000001_038347_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_038768_leftImg8bit.png gtFine/test/mainz/mainz_000001_038768_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_038955_leftImg8bit.png gtFine/test/mainz/mainz_000001_038955_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_039075_leftImg8bit.png gtFine/test/mainz/mainz_000001_039075_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_039470_leftImg8bit.png gtFine/test/mainz/mainz_000001_039470_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_039943_leftImg8bit.png gtFine/test/mainz/mainz_000001_039943_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_040195_leftImg8bit.png gtFine/test/mainz/mainz_000001_040195_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_040367_leftImg8bit.png gtFine/test/mainz/mainz_000001_040367_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_040839_leftImg8bit.png gtFine/test/mainz/mainz_000001_040839_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_041172_leftImg8bit.png gtFine/test/mainz/mainz_000001_041172_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_041284_leftImg8bit.png gtFine/test/mainz/mainz_000001_041284_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_041647_leftImg8bit.png gtFine/test/mainz/mainz_000001_041647_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_041797_leftImg8bit.png gtFine/test/mainz/mainz_000001_041797_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_041887_leftImg8bit.png gtFine/test/mainz/mainz_000001_041887_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_041923_leftImg8bit.png gtFine/test/mainz/mainz_000001_041923_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_042121_leftImg8bit.png gtFine/test/mainz/mainz_000001_042121_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_042400_leftImg8bit.png gtFine/test/mainz/mainz_000001_042400_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_042851_leftImg8bit.png gtFine/test/mainz/mainz_000001_042851_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_043656_leftImg8bit.png gtFine/test/mainz/mainz_000001_043656_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_043886_leftImg8bit.png gtFine/test/mainz/mainz_000001_043886_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_044366_leftImg8bit.png gtFine/test/mainz/mainz_000001_044366_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_044619_leftImg8bit.png gtFine/test/mainz/mainz_000001_044619_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_045197_leftImg8bit.png gtFine/test/mainz/mainz_000001_045197_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_045385_leftImg8bit.png gtFine/test/mainz/mainz_000001_045385_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_045651_leftImg8bit.png gtFine/test/mainz/mainz_000001_045651_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_045782_leftImg8bit.png gtFine/test/mainz/mainz_000001_045782_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_046381_leftImg8bit.png gtFine/test/mainz/mainz_000001_046381_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_046981_leftImg8bit.png gtFine/test/mainz/mainz_000001_046981_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_047546_leftImg8bit.png gtFine/test/mainz/mainz_000001_047546_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_047611_leftImg8bit.png gtFine/test/mainz/mainz_000001_047611_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_047888_leftImg8bit.png gtFine/test/mainz/mainz_000001_047888_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000001_048725_leftImg8bit.png gtFine/test/mainz/mainz_000001_048725_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000002_000061_leftImg8bit.png gtFine/test/mainz/mainz_000002_000061_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000002_000181_leftImg8bit.png gtFine/test/mainz/mainz_000002_000181_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000002_000381_leftImg8bit.png gtFine/test/mainz/mainz_000002_000381_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000002_000912_leftImg8bit.png gtFine/test/mainz/mainz_000002_000912_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000002_001747_leftImg8bit.png gtFine/test/mainz/mainz_000002_001747_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000002_001871_leftImg8bit.png gtFine/test/mainz/mainz_000002_001871_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000002_002279_leftImg8bit.png gtFine/test/mainz/mainz_000002_002279_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_000043_leftImg8bit.png gtFine/test/mainz/mainz_000003_000043_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_000968_leftImg8bit.png gtFine/test/mainz/mainz_000003_000968_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_001465_leftImg8bit.png gtFine/test/mainz/mainz_000003_001465_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_001694_leftImg8bit.png gtFine/test/mainz/mainz_000003_001694_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_001899_leftImg8bit.png gtFine/test/mainz/mainz_000003_001899_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_003042_leftImg8bit.png gtFine/test/mainz/mainz_000003_003042_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_003455_leftImg8bit.png gtFine/test/mainz/mainz_000003_003455_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_003558_leftImg8bit.png gtFine/test/mainz/mainz_000003_003558_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_003711_leftImg8bit.png gtFine/test/mainz/mainz_000003_003711_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_003791_leftImg8bit.png gtFine/test/mainz/mainz_000003_003791_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_003942_leftImg8bit.png gtFine/test/mainz/mainz_000003_003942_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_004144_leftImg8bit.png gtFine/test/mainz/mainz_000003_004144_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_004228_leftImg8bit.png gtFine/test/mainz/mainz_000003_004228_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_004576_leftImg8bit.png gtFine/test/mainz/mainz_000003_004576_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_004774_leftImg8bit.png gtFine/test/mainz/mainz_000003_004774_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_004883_leftImg8bit.png gtFine/test/mainz/mainz_000003_004883_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_005029_leftImg8bit.png gtFine/test/mainz/mainz_000003_005029_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_005088_leftImg8bit.png gtFine/test/mainz/mainz_000003_005088_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_005162_leftImg8bit.png gtFine/test/mainz/mainz_000003_005162_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_006478_leftImg8bit.png gtFine/test/mainz/mainz_000003_006478_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_006863_leftImg8bit.png gtFine/test/mainz/mainz_000003_006863_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_007024_leftImg8bit.png gtFine/test/mainz/mainz_000003_007024_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_007144_leftImg8bit.png gtFine/test/mainz/mainz_000003_007144_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_007255_leftImg8bit.png gtFine/test/mainz/mainz_000003_007255_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_007701_leftImg8bit.png gtFine/test/mainz/mainz_000003_007701_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_008258_leftImg8bit.png gtFine/test/mainz/mainz_000003_008258_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_008690_leftImg8bit.png gtFine/test/mainz/mainz_000003_008690_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_008876_leftImg8bit.png gtFine/test/mainz/mainz_000003_008876_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_009819_leftImg8bit.png gtFine/test/mainz/mainz_000003_009819_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_010019_leftImg8bit.png gtFine/test/mainz/mainz_000003_010019_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_010772_leftImg8bit.png gtFine/test/mainz/mainz_000003_010772_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_010880_leftImg8bit.png gtFine/test/mainz/mainz_000003_010880_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_010924_leftImg8bit.png gtFine/test/mainz/mainz_000003_010924_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_011182_leftImg8bit.png gtFine/test/mainz/mainz_000003_011182_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_011352_leftImg8bit.png gtFine/test/mainz/mainz_000003_011352_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_011949_leftImg8bit.png gtFine/test/mainz/mainz_000003_011949_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_012168_leftImg8bit.png gtFine/test/mainz/mainz_000003_012168_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_012341_leftImg8bit.png gtFine/test/mainz/mainz_000003_012341_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_012995_leftImg8bit.png gtFine/test/mainz/mainz_000003_012995_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_013348_leftImg8bit.png gtFine/test/mainz/mainz_000003_013348_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_013983_leftImg8bit.png gtFine/test/mainz/mainz_000003_013983_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_014083_leftImg8bit.png gtFine/test/mainz/mainz_000003_014083_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_014319_leftImg8bit.png gtFine/test/mainz/mainz_000003_014319_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_014457_leftImg8bit.png gtFine/test/mainz/mainz_000003_014457_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_014537_leftImg8bit.png gtFine/test/mainz/mainz_000003_014537_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_014959_leftImg8bit.png gtFine/test/mainz/mainz_000003_014959_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_015411_leftImg8bit.png gtFine/test/mainz/mainz_000003_015411_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_015649_leftImg8bit.png gtFine/test/mainz/mainz_000003_015649_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_015917_leftImg8bit.png gtFine/test/mainz/mainz_000003_015917_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_016360_leftImg8bit.png gtFine/test/mainz/mainz_000003_016360_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_016542_leftImg8bit.png gtFine/test/mainz/mainz_000003_016542_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_016708_leftImg8bit.png gtFine/test/mainz/mainz_000003_016708_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_016877_leftImg8bit.png gtFine/test/mainz/mainz_000003_016877_gtFine_labelTrainIds.png +leftImg8bit/test/mainz/mainz_000003_017171_leftImg8bit.png gtFine/test/mainz/mainz_000003_017171_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000000_000019_leftImg8bit.png gtFine/test/munich/munich_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000001_000019_leftImg8bit.png gtFine/test/munich/munich_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000002_000019_leftImg8bit.png gtFine/test/munich/munich_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000003_000019_leftImg8bit.png gtFine/test/munich/munich_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000004_000019_leftImg8bit.png gtFine/test/munich/munich_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000005_000019_leftImg8bit.png gtFine/test/munich/munich_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000006_000019_leftImg8bit.png gtFine/test/munich/munich_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000007_000019_leftImg8bit.png gtFine/test/munich/munich_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000008_000019_leftImg8bit.png gtFine/test/munich/munich_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000009_000019_leftImg8bit.png gtFine/test/munich/munich_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000010_000019_leftImg8bit.png gtFine/test/munich/munich_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000011_000019_leftImg8bit.png gtFine/test/munich/munich_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000012_000019_leftImg8bit.png gtFine/test/munich/munich_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000013_000019_leftImg8bit.png gtFine/test/munich/munich_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000014_000019_leftImg8bit.png gtFine/test/munich/munich_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000015_000019_leftImg8bit.png gtFine/test/munich/munich_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000016_000019_leftImg8bit.png gtFine/test/munich/munich_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000017_000019_leftImg8bit.png gtFine/test/munich/munich_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000018_000019_leftImg8bit.png gtFine/test/munich/munich_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000019_000019_leftImg8bit.png gtFine/test/munich/munich_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000020_000019_leftImg8bit.png gtFine/test/munich/munich_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000021_000019_leftImg8bit.png gtFine/test/munich/munich_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000022_000019_leftImg8bit.png gtFine/test/munich/munich_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000023_000019_leftImg8bit.png gtFine/test/munich/munich_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000024_000019_leftImg8bit.png gtFine/test/munich/munich_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000025_000019_leftImg8bit.png gtFine/test/munich/munich_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000026_000019_leftImg8bit.png gtFine/test/munich/munich_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000027_000019_leftImg8bit.png gtFine/test/munich/munich_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000028_000019_leftImg8bit.png gtFine/test/munich/munich_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000029_000019_leftImg8bit.png gtFine/test/munich/munich_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000030_000019_leftImg8bit.png gtFine/test/munich/munich_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000031_000019_leftImg8bit.png gtFine/test/munich/munich_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000032_000019_leftImg8bit.png gtFine/test/munich/munich_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000033_000019_leftImg8bit.png gtFine/test/munich/munich_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000034_000019_leftImg8bit.png gtFine/test/munich/munich_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000035_000019_leftImg8bit.png gtFine/test/munich/munich_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000036_000019_leftImg8bit.png gtFine/test/munich/munich_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000037_000019_leftImg8bit.png gtFine/test/munich/munich_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000038_000019_leftImg8bit.png gtFine/test/munich/munich_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000039_000019_leftImg8bit.png gtFine/test/munich/munich_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000040_000019_leftImg8bit.png gtFine/test/munich/munich_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000041_000019_leftImg8bit.png gtFine/test/munich/munich_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000042_000019_leftImg8bit.png gtFine/test/munich/munich_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000043_000019_leftImg8bit.png gtFine/test/munich/munich_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000044_000019_leftImg8bit.png gtFine/test/munich/munich_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000045_000019_leftImg8bit.png gtFine/test/munich/munich_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000046_000019_leftImg8bit.png gtFine/test/munich/munich_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000047_000019_leftImg8bit.png gtFine/test/munich/munich_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000048_000019_leftImg8bit.png gtFine/test/munich/munich_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000049_000019_leftImg8bit.png gtFine/test/munich/munich_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000050_000019_leftImg8bit.png gtFine/test/munich/munich_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000051_000019_leftImg8bit.png gtFine/test/munich/munich_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000052_000019_leftImg8bit.png gtFine/test/munich/munich_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000053_000019_leftImg8bit.png gtFine/test/munich/munich_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000054_000019_leftImg8bit.png gtFine/test/munich/munich_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000055_000019_leftImg8bit.png gtFine/test/munich/munich_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000056_000019_leftImg8bit.png gtFine/test/munich/munich_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000057_000019_leftImg8bit.png gtFine/test/munich/munich_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000058_000019_leftImg8bit.png gtFine/test/munich/munich_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000059_000019_leftImg8bit.png gtFine/test/munich/munich_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000060_000019_leftImg8bit.png gtFine/test/munich/munich_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000061_000019_leftImg8bit.png gtFine/test/munich/munich_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000062_000019_leftImg8bit.png gtFine/test/munich/munich_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000063_000019_leftImg8bit.png gtFine/test/munich/munich_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000064_000019_leftImg8bit.png gtFine/test/munich/munich_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000065_000019_leftImg8bit.png gtFine/test/munich/munich_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000066_000019_leftImg8bit.png gtFine/test/munich/munich_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000067_000019_leftImg8bit.png gtFine/test/munich/munich_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000068_000019_leftImg8bit.png gtFine/test/munich/munich_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000069_000019_leftImg8bit.png gtFine/test/munich/munich_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000070_000019_leftImg8bit.png gtFine/test/munich/munich_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000071_000019_leftImg8bit.png gtFine/test/munich/munich_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000072_000019_leftImg8bit.png gtFine/test/munich/munich_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000073_000019_leftImg8bit.png gtFine/test/munich/munich_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000074_000019_leftImg8bit.png gtFine/test/munich/munich_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000075_000019_leftImg8bit.png gtFine/test/munich/munich_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000076_000019_leftImg8bit.png gtFine/test/munich/munich_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000077_000019_leftImg8bit.png gtFine/test/munich/munich_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000078_000019_leftImg8bit.png gtFine/test/munich/munich_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000079_000019_leftImg8bit.png gtFine/test/munich/munich_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000080_000019_leftImg8bit.png gtFine/test/munich/munich_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000081_000019_leftImg8bit.png gtFine/test/munich/munich_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000082_000019_leftImg8bit.png gtFine/test/munich/munich_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000083_000019_leftImg8bit.png gtFine/test/munich/munich_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000084_000019_leftImg8bit.png gtFine/test/munich/munich_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000085_000019_leftImg8bit.png gtFine/test/munich/munich_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000086_000019_leftImg8bit.png gtFine/test/munich/munich_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000087_000019_leftImg8bit.png gtFine/test/munich/munich_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000088_000019_leftImg8bit.png gtFine/test/munich/munich_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000089_000019_leftImg8bit.png gtFine/test/munich/munich_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000090_000019_leftImg8bit.png gtFine/test/munich/munich_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000091_000019_leftImg8bit.png gtFine/test/munich/munich_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000092_000019_leftImg8bit.png gtFine/test/munich/munich_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000093_000019_leftImg8bit.png gtFine/test/munich/munich_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000094_000019_leftImg8bit.png gtFine/test/munich/munich_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000095_000019_leftImg8bit.png gtFine/test/munich/munich_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000096_000019_leftImg8bit.png gtFine/test/munich/munich_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000097_000019_leftImg8bit.png gtFine/test/munich/munich_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000098_000019_leftImg8bit.png gtFine/test/munich/munich_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000099_000019_leftImg8bit.png gtFine/test/munich/munich_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000100_000019_leftImg8bit.png gtFine/test/munich/munich_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000101_000019_leftImg8bit.png gtFine/test/munich/munich_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000102_000019_leftImg8bit.png gtFine/test/munich/munich_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000103_000019_leftImg8bit.png gtFine/test/munich/munich_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000104_000019_leftImg8bit.png gtFine/test/munich/munich_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000105_000019_leftImg8bit.png gtFine/test/munich/munich_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000106_000019_leftImg8bit.png gtFine/test/munich/munich_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000107_000019_leftImg8bit.png gtFine/test/munich/munich_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000108_000019_leftImg8bit.png gtFine/test/munich/munich_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000109_000019_leftImg8bit.png gtFine/test/munich/munich_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000110_000019_leftImg8bit.png gtFine/test/munich/munich_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000111_000019_leftImg8bit.png gtFine/test/munich/munich_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000112_000019_leftImg8bit.png gtFine/test/munich/munich_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000113_000019_leftImg8bit.png gtFine/test/munich/munich_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000114_000019_leftImg8bit.png gtFine/test/munich/munich_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000115_000019_leftImg8bit.png gtFine/test/munich/munich_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000116_000019_leftImg8bit.png gtFine/test/munich/munich_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000117_000019_leftImg8bit.png gtFine/test/munich/munich_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000118_000019_leftImg8bit.png gtFine/test/munich/munich_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000119_000019_leftImg8bit.png gtFine/test/munich/munich_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000120_000019_leftImg8bit.png gtFine/test/munich/munich_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000121_000019_leftImg8bit.png gtFine/test/munich/munich_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000122_000019_leftImg8bit.png gtFine/test/munich/munich_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000123_000019_leftImg8bit.png gtFine/test/munich/munich_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000124_000019_leftImg8bit.png gtFine/test/munich/munich_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000125_000019_leftImg8bit.png gtFine/test/munich/munich_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000126_000019_leftImg8bit.png gtFine/test/munich/munich_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000127_000019_leftImg8bit.png gtFine/test/munich/munich_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000128_000019_leftImg8bit.png gtFine/test/munich/munich_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000129_000019_leftImg8bit.png gtFine/test/munich/munich_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000130_000019_leftImg8bit.png gtFine/test/munich/munich_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000131_000019_leftImg8bit.png gtFine/test/munich/munich_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000132_000019_leftImg8bit.png gtFine/test/munich/munich_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000133_000019_leftImg8bit.png gtFine/test/munich/munich_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000134_000019_leftImg8bit.png gtFine/test/munich/munich_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000135_000019_leftImg8bit.png gtFine/test/munich/munich_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000136_000019_leftImg8bit.png gtFine/test/munich/munich_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000137_000019_leftImg8bit.png gtFine/test/munich/munich_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000138_000019_leftImg8bit.png gtFine/test/munich/munich_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000139_000019_leftImg8bit.png gtFine/test/munich/munich_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000140_000019_leftImg8bit.png gtFine/test/munich/munich_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000141_000019_leftImg8bit.png gtFine/test/munich/munich_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000142_000019_leftImg8bit.png gtFine/test/munich/munich_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000143_000019_leftImg8bit.png gtFine/test/munich/munich_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000144_000019_leftImg8bit.png gtFine/test/munich/munich_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000145_000019_leftImg8bit.png gtFine/test/munich/munich_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000146_000019_leftImg8bit.png gtFine/test/munich/munich_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000147_000019_leftImg8bit.png gtFine/test/munich/munich_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000148_000019_leftImg8bit.png gtFine/test/munich/munich_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000149_000019_leftImg8bit.png gtFine/test/munich/munich_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000150_000019_leftImg8bit.png gtFine/test/munich/munich_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000151_000019_leftImg8bit.png gtFine/test/munich/munich_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000152_000019_leftImg8bit.png gtFine/test/munich/munich_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000153_000019_leftImg8bit.png gtFine/test/munich/munich_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000154_000019_leftImg8bit.png gtFine/test/munich/munich_000154_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000155_000019_leftImg8bit.png gtFine/test/munich/munich_000155_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000156_000019_leftImg8bit.png gtFine/test/munich/munich_000156_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000157_000019_leftImg8bit.png gtFine/test/munich/munich_000157_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000158_000019_leftImg8bit.png gtFine/test/munich/munich_000158_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000159_000019_leftImg8bit.png gtFine/test/munich/munich_000159_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000160_000019_leftImg8bit.png gtFine/test/munich/munich_000160_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000161_000019_leftImg8bit.png gtFine/test/munich/munich_000161_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000162_000019_leftImg8bit.png gtFine/test/munich/munich_000162_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000163_000019_leftImg8bit.png gtFine/test/munich/munich_000163_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000164_000019_leftImg8bit.png gtFine/test/munich/munich_000164_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000165_000019_leftImg8bit.png gtFine/test/munich/munich_000165_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000166_000019_leftImg8bit.png gtFine/test/munich/munich_000166_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000167_000019_leftImg8bit.png gtFine/test/munich/munich_000167_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000168_000019_leftImg8bit.png gtFine/test/munich/munich_000168_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000169_000019_leftImg8bit.png gtFine/test/munich/munich_000169_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000170_000019_leftImg8bit.png gtFine/test/munich/munich_000170_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000171_000019_leftImg8bit.png gtFine/test/munich/munich_000171_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000172_000019_leftImg8bit.png gtFine/test/munich/munich_000172_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000173_000019_leftImg8bit.png gtFine/test/munich/munich_000173_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000174_000019_leftImg8bit.png gtFine/test/munich/munich_000174_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000175_000019_leftImg8bit.png gtFine/test/munich/munich_000175_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000176_000019_leftImg8bit.png gtFine/test/munich/munich_000176_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000177_000019_leftImg8bit.png gtFine/test/munich/munich_000177_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000178_000019_leftImg8bit.png gtFine/test/munich/munich_000178_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000179_000019_leftImg8bit.png gtFine/test/munich/munich_000179_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000180_000019_leftImg8bit.png gtFine/test/munich/munich_000180_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000181_000019_leftImg8bit.png gtFine/test/munich/munich_000181_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000182_000019_leftImg8bit.png gtFine/test/munich/munich_000182_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000183_000019_leftImg8bit.png gtFine/test/munich/munich_000183_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000184_000019_leftImg8bit.png gtFine/test/munich/munich_000184_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000185_000019_leftImg8bit.png gtFine/test/munich/munich_000185_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000186_000019_leftImg8bit.png gtFine/test/munich/munich_000186_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000187_000019_leftImg8bit.png gtFine/test/munich/munich_000187_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000188_000019_leftImg8bit.png gtFine/test/munich/munich_000188_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000189_000019_leftImg8bit.png gtFine/test/munich/munich_000189_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000190_000019_leftImg8bit.png gtFine/test/munich/munich_000190_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000191_000019_leftImg8bit.png gtFine/test/munich/munich_000191_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000192_000019_leftImg8bit.png gtFine/test/munich/munich_000192_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000193_000019_leftImg8bit.png gtFine/test/munich/munich_000193_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000194_000019_leftImg8bit.png gtFine/test/munich/munich_000194_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000195_000019_leftImg8bit.png gtFine/test/munich/munich_000195_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000196_000019_leftImg8bit.png gtFine/test/munich/munich_000196_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000197_000019_leftImg8bit.png gtFine/test/munich/munich_000197_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000198_000019_leftImg8bit.png gtFine/test/munich/munich_000198_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000199_000019_leftImg8bit.png gtFine/test/munich/munich_000199_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000200_000019_leftImg8bit.png gtFine/test/munich/munich_000200_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000201_000019_leftImg8bit.png gtFine/test/munich/munich_000201_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000202_000019_leftImg8bit.png gtFine/test/munich/munich_000202_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000203_000019_leftImg8bit.png gtFine/test/munich/munich_000203_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000204_000019_leftImg8bit.png gtFine/test/munich/munich_000204_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000205_000019_leftImg8bit.png gtFine/test/munich/munich_000205_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000206_000019_leftImg8bit.png gtFine/test/munich/munich_000206_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000207_000019_leftImg8bit.png gtFine/test/munich/munich_000207_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000208_000019_leftImg8bit.png gtFine/test/munich/munich_000208_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000209_000019_leftImg8bit.png gtFine/test/munich/munich_000209_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000210_000019_leftImg8bit.png gtFine/test/munich/munich_000210_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000211_000019_leftImg8bit.png gtFine/test/munich/munich_000211_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000212_000019_leftImg8bit.png gtFine/test/munich/munich_000212_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000213_000019_leftImg8bit.png gtFine/test/munich/munich_000213_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000214_000019_leftImg8bit.png gtFine/test/munich/munich_000214_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000215_000019_leftImg8bit.png gtFine/test/munich/munich_000215_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000216_000019_leftImg8bit.png gtFine/test/munich/munich_000216_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000217_000019_leftImg8bit.png gtFine/test/munich/munich_000217_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000218_000019_leftImg8bit.png gtFine/test/munich/munich_000218_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000219_000019_leftImg8bit.png gtFine/test/munich/munich_000219_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000220_000019_leftImg8bit.png gtFine/test/munich/munich_000220_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000221_000019_leftImg8bit.png gtFine/test/munich/munich_000221_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000222_000019_leftImg8bit.png gtFine/test/munich/munich_000222_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000223_000019_leftImg8bit.png gtFine/test/munich/munich_000223_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000224_000019_leftImg8bit.png gtFine/test/munich/munich_000224_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000225_000019_leftImg8bit.png gtFine/test/munich/munich_000225_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000226_000019_leftImg8bit.png gtFine/test/munich/munich_000226_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000227_000019_leftImg8bit.png gtFine/test/munich/munich_000227_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000228_000019_leftImg8bit.png gtFine/test/munich/munich_000228_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000229_000019_leftImg8bit.png gtFine/test/munich/munich_000229_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000230_000019_leftImg8bit.png gtFine/test/munich/munich_000230_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000231_000019_leftImg8bit.png gtFine/test/munich/munich_000231_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000232_000019_leftImg8bit.png gtFine/test/munich/munich_000232_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000233_000019_leftImg8bit.png gtFine/test/munich/munich_000233_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000234_000019_leftImg8bit.png gtFine/test/munich/munich_000234_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000235_000019_leftImg8bit.png gtFine/test/munich/munich_000235_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000236_000019_leftImg8bit.png gtFine/test/munich/munich_000236_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000237_000019_leftImg8bit.png gtFine/test/munich/munich_000237_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000238_000019_leftImg8bit.png gtFine/test/munich/munich_000238_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000239_000019_leftImg8bit.png gtFine/test/munich/munich_000239_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000240_000019_leftImg8bit.png gtFine/test/munich/munich_000240_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000241_000019_leftImg8bit.png gtFine/test/munich/munich_000241_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000242_000019_leftImg8bit.png gtFine/test/munich/munich_000242_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000243_000019_leftImg8bit.png gtFine/test/munich/munich_000243_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000244_000019_leftImg8bit.png gtFine/test/munich/munich_000244_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000245_000019_leftImg8bit.png gtFine/test/munich/munich_000245_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000246_000019_leftImg8bit.png gtFine/test/munich/munich_000246_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000247_000019_leftImg8bit.png gtFine/test/munich/munich_000247_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000248_000019_leftImg8bit.png gtFine/test/munich/munich_000248_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000249_000019_leftImg8bit.png gtFine/test/munich/munich_000249_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000250_000019_leftImg8bit.png gtFine/test/munich/munich_000250_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000251_000019_leftImg8bit.png gtFine/test/munich/munich_000251_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000252_000019_leftImg8bit.png gtFine/test/munich/munich_000252_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000253_000019_leftImg8bit.png gtFine/test/munich/munich_000253_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000254_000019_leftImg8bit.png gtFine/test/munich/munich_000254_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000255_000019_leftImg8bit.png gtFine/test/munich/munich_000255_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000256_000019_leftImg8bit.png gtFine/test/munich/munich_000256_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000257_000019_leftImg8bit.png gtFine/test/munich/munich_000257_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000258_000019_leftImg8bit.png gtFine/test/munich/munich_000258_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000259_000019_leftImg8bit.png gtFine/test/munich/munich_000259_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000260_000019_leftImg8bit.png gtFine/test/munich/munich_000260_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000261_000019_leftImg8bit.png gtFine/test/munich/munich_000261_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000262_000019_leftImg8bit.png gtFine/test/munich/munich_000262_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000263_000019_leftImg8bit.png gtFine/test/munich/munich_000263_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000264_000019_leftImg8bit.png gtFine/test/munich/munich_000264_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000265_000019_leftImg8bit.png gtFine/test/munich/munich_000265_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000266_000019_leftImg8bit.png gtFine/test/munich/munich_000266_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000267_000019_leftImg8bit.png gtFine/test/munich/munich_000267_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000268_000019_leftImg8bit.png gtFine/test/munich/munich_000268_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000269_000019_leftImg8bit.png gtFine/test/munich/munich_000269_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000270_000019_leftImg8bit.png gtFine/test/munich/munich_000270_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000271_000019_leftImg8bit.png gtFine/test/munich/munich_000271_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000272_000019_leftImg8bit.png gtFine/test/munich/munich_000272_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000273_000019_leftImg8bit.png gtFine/test/munich/munich_000273_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000274_000019_leftImg8bit.png gtFine/test/munich/munich_000274_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000275_000019_leftImg8bit.png gtFine/test/munich/munich_000275_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000276_000019_leftImg8bit.png gtFine/test/munich/munich_000276_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000277_000019_leftImg8bit.png gtFine/test/munich/munich_000277_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000278_000019_leftImg8bit.png gtFine/test/munich/munich_000278_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000279_000019_leftImg8bit.png gtFine/test/munich/munich_000279_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000280_000019_leftImg8bit.png gtFine/test/munich/munich_000280_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000281_000019_leftImg8bit.png gtFine/test/munich/munich_000281_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000282_000019_leftImg8bit.png gtFine/test/munich/munich_000282_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000283_000019_leftImg8bit.png gtFine/test/munich/munich_000283_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000284_000019_leftImg8bit.png gtFine/test/munich/munich_000284_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000285_000019_leftImg8bit.png gtFine/test/munich/munich_000285_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000286_000019_leftImg8bit.png gtFine/test/munich/munich_000286_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000287_000019_leftImg8bit.png gtFine/test/munich/munich_000287_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000288_000019_leftImg8bit.png gtFine/test/munich/munich_000288_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000289_000019_leftImg8bit.png gtFine/test/munich/munich_000289_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000290_000019_leftImg8bit.png gtFine/test/munich/munich_000290_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000291_000019_leftImg8bit.png gtFine/test/munich/munich_000291_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000292_000019_leftImg8bit.png gtFine/test/munich/munich_000292_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000293_000019_leftImg8bit.png gtFine/test/munich/munich_000293_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000294_000019_leftImg8bit.png gtFine/test/munich/munich_000294_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000295_000019_leftImg8bit.png gtFine/test/munich/munich_000295_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000296_000019_leftImg8bit.png gtFine/test/munich/munich_000296_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000297_000019_leftImg8bit.png gtFine/test/munich/munich_000297_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000298_000019_leftImg8bit.png gtFine/test/munich/munich_000298_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000299_000019_leftImg8bit.png gtFine/test/munich/munich_000299_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000300_000019_leftImg8bit.png gtFine/test/munich/munich_000300_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000301_000019_leftImg8bit.png gtFine/test/munich/munich_000301_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000302_000019_leftImg8bit.png gtFine/test/munich/munich_000302_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000303_000019_leftImg8bit.png gtFine/test/munich/munich_000303_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000304_000019_leftImg8bit.png gtFine/test/munich/munich_000304_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000305_000019_leftImg8bit.png gtFine/test/munich/munich_000305_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000306_000019_leftImg8bit.png gtFine/test/munich/munich_000306_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000307_000019_leftImg8bit.png gtFine/test/munich/munich_000307_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000308_000019_leftImg8bit.png gtFine/test/munich/munich_000308_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000309_000019_leftImg8bit.png gtFine/test/munich/munich_000309_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000310_000019_leftImg8bit.png gtFine/test/munich/munich_000310_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000311_000019_leftImg8bit.png gtFine/test/munich/munich_000311_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000312_000019_leftImg8bit.png gtFine/test/munich/munich_000312_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000313_000019_leftImg8bit.png gtFine/test/munich/munich_000313_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000314_000019_leftImg8bit.png gtFine/test/munich/munich_000314_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000315_000019_leftImg8bit.png gtFine/test/munich/munich_000315_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000316_000019_leftImg8bit.png gtFine/test/munich/munich_000316_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000317_000019_leftImg8bit.png gtFine/test/munich/munich_000317_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000318_000019_leftImg8bit.png gtFine/test/munich/munich_000318_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000319_000019_leftImg8bit.png gtFine/test/munich/munich_000319_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000320_000019_leftImg8bit.png gtFine/test/munich/munich_000320_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000321_000019_leftImg8bit.png gtFine/test/munich/munich_000321_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000322_000019_leftImg8bit.png gtFine/test/munich/munich_000322_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000323_000019_leftImg8bit.png gtFine/test/munich/munich_000323_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000324_000019_leftImg8bit.png gtFine/test/munich/munich_000324_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000325_000019_leftImg8bit.png gtFine/test/munich/munich_000325_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000326_000019_leftImg8bit.png gtFine/test/munich/munich_000326_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000327_000019_leftImg8bit.png gtFine/test/munich/munich_000327_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000328_000019_leftImg8bit.png gtFine/test/munich/munich_000328_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000329_000019_leftImg8bit.png gtFine/test/munich/munich_000329_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000330_000019_leftImg8bit.png gtFine/test/munich/munich_000330_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000331_000019_leftImg8bit.png gtFine/test/munich/munich_000331_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000332_000019_leftImg8bit.png gtFine/test/munich/munich_000332_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000333_000019_leftImg8bit.png gtFine/test/munich/munich_000333_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000334_000019_leftImg8bit.png gtFine/test/munich/munich_000334_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000335_000019_leftImg8bit.png gtFine/test/munich/munich_000335_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000336_000019_leftImg8bit.png gtFine/test/munich/munich_000336_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000337_000019_leftImg8bit.png gtFine/test/munich/munich_000337_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000338_000019_leftImg8bit.png gtFine/test/munich/munich_000338_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000339_000019_leftImg8bit.png gtFine/test/munich/munich_000339_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000340_000019_leftImg8bit.png gtFine/test/munich/munich_000340_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000341_000019_leftImg8bit.png gtFine/test/munich/munich_000341_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000342_000019_leftImg8bit.png gtFine/test/munich/munich_000342_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000343_000019_leftImg8bit.png gtFine/test/munich/munich_000343_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000344_000019_leftImg8bit.png gtFine/test/munich/munich_000344_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000345_000019_leftImg8bit.png gtFine/test/munich/munich_000345_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000346_000019_leftImg8bit.png gtFine/test/munich/munich_000346_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000347_000019_leftImg8bit.png gtFine/test/munich/munich_000347_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000348_000019_leftImg8bit.png gtFine/test/munich/munich_000348_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000349_000019_leftImg8bit.png gtFine/test/munich/munich_000349_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000350_000019_leftImg8bit.png gtFine/test/munich/munich_000350_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000351_000019_leftImg8bit.png gtFine/test/munich/munich_000351_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000352_000019_leftImg8bit.png gtFine/test/munich/munich_000352_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000353_000019_leftImg8bit.png gtFine/test/munich/munich_000353_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000354_000019_leftImg8bit.png gtFine/test/munich/munich_000354_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000355_000019_leftImg8bit.png gtFine/test/munich/munich_000355_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000356_000019_leftImg8bit.png gtFine/test/munich/munich_000356_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000357_000019_leftImg8bit.png gtFine/test/munich/munich_000357_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000358_000019_leftImg8bit.png gtFine/test/munich/munich_000358_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000359_000019_leftImg8bit.png gtFine/test/munich/munich_000359_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000360_000019_leftImg8bit.png gtFine/test/munich/munich_000360_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000361_000019_leftImg8bit.png gtFine/test/munich/munich_000361_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000362_000019_leftImg8bit.png gtFine/test/munich/munich_000362_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000363_000019_leftImg8bit.png gtFine/test/munich/munich_000363_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000364_000019_leftImg8bit.png gtFine/test/munich/munich_000364_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000365_000019_leftImg8bit.png gtFine/test/munich/munich_000365_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000366_000019_leftImg8bit.png gtFine/test/munich/munich_000366_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000367_000019_leftImg8bit.png gtFine/test/munich/munich_000367_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000368_000019_leftImg8bit.png gtFine/test/munich/munich_000368_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000369_000019_leftImg8bit.png gtFine/test/munich/munich_000369_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000370_000019_leftImg8bit.png gtFine/test/munich/munich_000370_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000371_000019_leftImg8bit.png gtFine/test/munich/munich_000371_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000372_000019_leftImg8bit.png gtFine/test/munich/munich_000372_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000373_000019_leftImg8bit.png gtFine/test/munich/munich_000373_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000374_000019_leftImg8bit.png gtFine/test/munich/munich_000374_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000375_000019_leftImg8bit.png gtFine/test/munich/munich_000375_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000376_000019_leftImg8bit.png gtFine/test/munich/munich_000376_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000377_000019_leftImg8bit.png gtFine/test/munich/munich_000377_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000378_000019_leftImg8bit.png gtFine/test/munich/munich_000378_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000379_000019_leftImg8bit.png gtFine/test/munich/munich_000379_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000380_000019_leftImg8bit.png gtFine/test/munich/munich_000380_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000381_000019_leftImg8bit.png gtFine/test/munich/munich_000381_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000382_000019_leftImg8bit.png gtFine/test/munich/munich_000382_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000383_000019_leftImg8bit.png gtFine/test/munich/munich_000383_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000384_000019_leftImg8bit.png gtFine/test/munich/munich_000384_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000385_000019_leftImg8bit.png gtFine/test/munich/munich_000385_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000386_000019_leftImg8bit.png gtFine/test/munich/munich_000386_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000387_000019_leftImg8bit.png gtFine/test/munich/munich_000387_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000388_000019_leftImg8bit.png gtFine/test/munich/munich_000388_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000389_000019_leftImg8bit.png gtFine/test/munich/munich_000389_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000390_000019_leftImg8bit.png gtFine/test/munich/munich_000390_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000391_000019_leftImg8bit.png gtFine/test/munich/munich_000391_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000392_000019_leftImg8bit.png gtFine/test/munich/munich_000392_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000393_000019_leftImg8bit.png gtFine/test/munich/munich_000393_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000394_000019_leftImg8bit.png gtFine/test/munich/munich_000394_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000395_000019_leftImg8bit.png gtFine/test/munich/munich_000395_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000396_000019_leftImg8bit.png gtFine/test/munich/munich_000396_000019_gtFine_labelTrainIds.png +leftImg8bit/test/munich/munich_000397_000019_leftImg8bit.png gtFine/test/munich/munich_000397_000019_gtFine_labelTrainIds.png diff --git a/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_train_fine.txt b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_train_fine.txt new file mode 100644 index 0000000..1e7045a --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_train_fine.txt @@ -0,0 +1,2975 @@ +leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png gtFine/train/aachen/aachen_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000001_000019_leftImg8bit.png gtFine/train/aachen/aachen_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000002_000019_leftImg8bit.png gtFine/train/aachen/aachen_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000003_000019_leftImg8bit.png gtFine/train/aachen/aachen_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000004_000019_leftImg8bit.png gtFine/train/aachen/aachen_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000005_000019_leftImg8bit.png gtFine/train/aachen/aachen_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000006_000019_leftImg8bit.png gtFine/train/aachen/aachen_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000007_000019_leftImg8bit.png gtFine/train/aachen/aachen_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000008_000019_leftImg8bit.png gtFine/train/aachen/aachen_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000009_000019_leftImg8bit.png gtFine/train/aachen/aachen_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000010_000019_leftImg8bit.png gtFine/train/aachen/aachen_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000011_000019_leftImg8bit.png gtFine/train/aachen/aachen_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000012_000019_leftImg8bit.png gtFine/train/aachen/aachen_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000013_000019_leftImg8bit.png gtFine/train/aachen/aachen_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000014_000019_leftImg8bit.png gtFine/train/aachen/aachen_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000015_000019_leftImg8bit.png gtFine/train/aachen/aachen_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000016_000019_leftImg8bit.png gtFine/train/aachen/aachen_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000017_000019_leftImg8bit.png gtFine/train/aachen/aachen_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000018_000019_leftImg8bit.png gtFine/train/aachen/aachen_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000019_000019_leftImg8bit.png gtFine/train/aachen/aachen_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000020_000019_leftImg8bit.png gtFine/train/aachen/aachen_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000021_000019_leftImg8bit.png gtFine/train/aachen/aachen_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000022_000019_leftImg8bit.png gtFine/train/aachen/aachen_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000023_000019_leftImg8bit.png gtFine/train/aachen/aachen_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000024_000019_leftImg8bit.png gtFine/train/aachen/aachen_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000025_000019_leftImg8bit.png gtFine/train/aachen/aachen_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000026_000019_leftImg8bit.png gtFine/train/aachen/aachen_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000027_000019_leftImg8bit.png gtFine/train/aachen/aachen_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000028_000019_leftImg8bit.png gtFine/train/aachen/aachen_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000029_000019_leftImg8bit.png gtFine/train/aachen/aachen_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000030_000019_leftImg8bit.png gtFine/train/aachen/aachen_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000031_000019_leftImg8bit.png gtFine/train/aachen/aachen_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000032_000019_leftImg8bit.png gtFine/train/aachen/aachen_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000033_000019_leftImg8bit.png gtFine/train/aachen/aachen_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000034_000019_leftImg8bit.png gtFine/train/aachen/aachen_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000035_000019_leftImg8bit.png gtFine/train/aachen/aachen_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000036_000019_leftImg8bit.png gtFine/train/aachen/aachen_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000037_000019_leftImg8bit.png gtFine/train/aachen/aachen_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000038_000019_leftImg8bit.png gtFine/train/aachen/aachen_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000039_000019_leftImg8bit.png gtFine/train/aachen/aachen_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000040_000019_leftImg8bit.png gtFine/train/aachen/aachen_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000041_000019_leftImg8bit.png gtFine/train/aachen/aachen_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000042_000019_leftImg8bit.png gtFine/train/aachen/aachen_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000043_000019_leftImg8bit.png gtFine/train/aachen/aachen_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000044_000019_leftImg8bit.png gtFine/train/aachen/aachen_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000045_000019_leftImg8bit.png gtFine/train/aachen/aachen_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000046_000019_leftImg8bit.png gtFine/train/aachen/aachen_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000047_000019_leftImg8bit.png gtFine/train/aachen/aachen_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000048_000019_leftImg8bit.png gtFine/train/aachen/aachen_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000049_000019_leftImg8bit.png gtFine/train/aachen/aachen_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000050_000019_leftImg8bit.png gtFine/train/aachen/aachen_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000051_000019_leftImg8bit.png gtFine/train/aachen/aachen_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000052_000019_leftImg8bit.png gtFine/train/aachen/aachen_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000053_000019_leftImg8bit.png gtFine/train/aachen/aachen_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000054_000019_leftImg8bit.png gtFine/train/aachen/aachen_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000055_000019_leftImg8bit.png gtFine/train/aachen/aachen_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000056_000019_leftImg8bit.png gtFine/train/aachen/aachen_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000057_000019_leftImg8bit.png gtFine/train/aachen/aachen_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000058_000019_leftImg8bit.png gtFine/train/aachen/aachen_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000059_000019_leftImg8bit.png gtFine/train/aachen/aachen_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000060_000019_leftImg8bit.png gtFine/train/aachen/aachen_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000061_000019_leftImg8bit.png gtFine/train/aachen/aachen_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000062_000019_leftImg8bit.png gtFine/train/aachen/aachen_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000063_000019_leftImg8bit.png gtFine/train/aachen/aachen_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000064_000019_leftImg8bit.png gtFine/train/aachen/aachen_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000065_000019_leftImg8bit.png gtFine/train/aachen/aachen_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000066_000019_leftImg8bit.png gtFine/train/aachen/aachen_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000067_000019_leftImg8bit.png gtFine/train/aachen/aachen_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000068_000019_leftImg8bit.png gtFine/train/aachen/aachen_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000069_000019_leftImg8bit.png gtFine/train/aachen/aachen_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000070_000019_leftImg8bit.png gtFine/train/aachen/aachen_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000071_000019_leftImg8bit.png gtFine/train/aachen/aachen_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000072_000019_leftImg8bit.png gtFine/train/aachen/aachen_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000073_000019_leftImg8bit.png gtFine/train/aachen/aachen_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000074_000019_leftImg8bit.png gtFine/train/aachen/aachen_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000075_000019_leftImg8bit.png gtFine/train/aachen/aachen_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000076_000019_leftImg8bit.png gtFine/train/aachen/aachen_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000077_000019_leftImg8bit.png gtFine/train/aachen/aachen_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000078_000019_leftImg8bit.png gtFine/train/aachen/aachen_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000079_000019_leftImg8bit.png gtFine/train/aachen/aachen_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000080_000019_leftImg8bit.png gtFine/train/aachen/aachen_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000081_000019_leftImg8bit.png gtFine/train/aachen/aachen_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000082_000019_leftImg8bit.png gtFine/train/aachen/aachen_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000083_000019_leftImg8bit.png gtFine/train/aachen/aachen_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000084_000019_leftImg8bit.png gtFine/train/aachen/aachen_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000085_000019_leftImg8bit.png gtFine/train/aachen/aachen_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000086_000019_leftImg8bit.png gtFine/train/aachen/aachen_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000087_000019_leftImg8bit.png gtFine/train/aachen/aachen_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000088_000019_leftImg8bit.png gtFine/train/aachen/aachen_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000089_000019_leftImg8bit.png gtFine/train/aachen/aachen_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000090_000019_leftImg8bit.png gtFine/train/aachen/aachen_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000091_000019_leftImg8bit.png gtFine/train/aachen/aachen_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000092_000019_leftImg8bit.png gtFine/train/aachen/aachen_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000093_000019_leftImg8bit.png gtFine/train/aachen/aachen_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000094_000019_leftImg8bit.png gtFine/train/aachen/aachen_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000095_000019_leftImg8bit.png gtFine/train/aachen/aachen_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000096_000019_leftImg8bit.png gtFine/train/aachen/aachen_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000097_000019_leftImg8bit.png gtFine/train/aachen/aachen_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000098_000019_leftImg8bit.png gtFine/train/aachen/aachen_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000099_000019_leftImg8bit.png gtFine/train/aachen/aachen_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000100_000019_leftImg8bit.png gtFine/train/aachen/aachen_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000101_000019_leftImg8bit.png gtFine/train/aachen/aachen_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000102_000019_leftImg8bit.png gtFine/train/aachen/aachen_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000103_000019_leftImg8bit.png gtFine/train/aachen/aachen_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000104_000019_leftImg8bit.png gtFine/train/aachen/aachen_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000105_000019_leftImg8bit.png gtFine/train/aachen/aachen_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000106_000019_leftImg8bit.png gtFine/train/aachen/aachen_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000107_000019_leftImg8bit.png gtFine/train/aachen/aachen_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000108_000019_leftImg8bit.png gtFine/train/aachen/aachen_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000109_000019_leftImg8bit.png gtFine/train/aachen/aachen_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000110_000019_leftImg8bit.png gtFine/train/aachen/aachen_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000111_000019_leftImg8bit.png gtFine/train/aachen/aachen_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000112_000019_leftImg8bit.png gtFine/train/aachen/aachen_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000113_000019_leftImg8bit.png gtFine/train/aachen/aachen_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000114_000019_leftImg8bit.png gtFine/train/aachen/aachen_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000115_000019_leftImg8bit.png gtFine/train/aachen/aachen_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000116_000019_leftImg8bit.png gtFine/train/aachen/aachen_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000117_000019_leftImg8bit.png gtFine/train/aachen/aachen_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000118_000019_leftImg8bit.png gtFine/train/aachen/aachen_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000119_000019_leftImg8bit.png gtFine/train/aachen/aachen_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000120_000019_leftImg8bit.png gtFine/train/aachen/aachen_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000121_000019_leftImg8bit.png gtFine/train/aachen/aachen_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000122_000019_leftImg8bit.png gtFine/train/aachen/aachen_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000123_000019_leftImg8bit.png gtFine/train/aachen/aachen_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000124_000019_leftImg8bit.png gtFine/train/aachen/aachen_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000125_000019_leftImg8bit.png gtFine/train/aachen/aachen_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000126_000019_leftImg8bit.png gtFine/train/aachen/aachen_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000127_000019_leftImg8bit.png gtFine/train/aachen/aachen_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000128_000019_leftImg8bit.png gtFine/train/aachen/aachen_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000129_000019_leftImg8bit.png gtFine/train/aachen/aachen_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000130_000019_leftImg8bit.png gtFine/train/aachen/aachen_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000131_000019_leftImg8bit.png gtFine/train/aachen/aachen_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000132_000019_leftImg8bit.png gtFine/train/aachen/aachen_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000133_000019_leftImg8bit.png gtFine/train/aachen/aachen_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000134_000019_leftImg8bit.png gtFine/train/aachen/aachen_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000135_000019_leftImg8bit.png gtFine/train/aachen/aachen_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000136_000019_leftImg8bit.png gtFine/train/aachen/aachen_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000137_000019_leftImg8bit.png gtFine/train/aachen/aachen_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000138_000019_leftImg8bit.png gtFine/train/aachen/aachen_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000139_000019_leftImg8bit.png gtFine/train/aachen/aachen_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000140_000019_leftImg8bit.png gtFine/train/aachen/aachen_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000141_000019_leftImg8bit.png gtFine/train/aachen/aachen_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000142_000019_leftImg8bit.png gtFine/train/aachen/aachen_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000143_000019_leftImg8bit.png gtFine/train/aachen/aachen_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000144_000019_leftImg8bit.png gtFine/train/aachen/aachen_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000145_000019_leftImg8bit.png gtFine/train/aachen/aachen_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000146_000019_leftImg8bit.png gtFine/train/aachen/aachen_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000147_000019_leftImg8bit.png gtFine/train/aachen/aachen_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000148_000019_leftImg8bit.png gtFine/train/aachen/aachen_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000149_000019_leftImg8bit.png gtFine/train/aachen/aachen_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000150_000019_leftImg8bit.png gtFine/train/aachen/aachen_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000151_000019_leftImg8bit.png gtFine/train/aachen/aachen_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000152_000019_leftImg8bit.png gtFine/train/aachen/aachen_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000153_000019_leftImg8bit.png gtFine/train/aachen/aachen_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000154_000019_leftImg8bit.png gtFine/train/aachen/aachen_000154_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000155_000019_leftImg8bit.png gtFine/train/aachen/aachen_000155_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000156_000019_leftImg8bit.png gtFine/train/aachen/aachen_000156_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000157_000019_leftImg8bit.png gtFine/train/aachen/aachen_000157_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000158_000019_leftImg8bit.png gtFine/train/aachen/aachen_000158_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000159_000019_leftImg8bit.png gtFine/train/aachen/aachen_000159_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000160_000019_leftImg8bit.png gtFine/train/aachen/aachen_000160_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000161_000019_leftImg8bit.png gtFine/train/aachen/aachen_000161_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000162_000019_leftImg8bit.png gtFine/train/aachen/aachen_000162_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000163_000019_leftImg8bit.png gtFine/train/aachen/aachen_000163_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000164_000019_leftImg8bit.png gtFine/train/aachen/aachen_000164_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000165_000019_leftImg8bit.png gtFine/train/aachen/aachen_000165_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000166_000019_leftImg8bit.png gtFine/train/aachen/aachen_000166_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000167_000019_leftImg8bit.png gtFine/train/aachen/aachen_000167_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000168_000019_leftImg8bit.png gtFine/train/aachen/aachen_000168_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000169_000019_leftImg8bit.png gtFine/train/aachen/aachen_000169_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000170_000019_leftImg8bit.png gtFine/train/aachen/aachen_000170_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000171_000019_leftImg8bit.png gtFine/train/aachen/aachen_000171_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000172_000019_leftImg8bit.png gtFine/train/aachen/aachen_000172_000019_gtFine_labelTrainIds.png +leftImg8bit/train/aachen/aachen_000173_000019_leftImg8bit.png gtFine/train/aachen/aachen_000173_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_000313_leftImg8bit.png gtFine/train/bochum/bochum_000000_000313_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_000600_leftImg8bit.png gtFine/train/bochum/bochum_000000_000600_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_000885_leftImg8bit.png gtFine/train/bochum/bochum_000000_000885_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_001097_leftImg8bit.png gtFine/train/bochum/bochum_000000_001097_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_001519_leftImg8bit.png gtFine/train/bochum/bochum_000000_001519_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_001828_leftImg8bit.png gtFine/train/bochum/bochum_000000_001828_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_002293_leftImg8bit.png gtFine/train/bochum/bochum_000000_002293_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_002562_leftImg8bit.png gtFine/train/bochum/bochum_000000_002562_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_003005_leftImg8bit.png gtFine/train/bochum/bochum_000000_003005_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_003245_leftImg8bit.png gtFine/train/bochum/bochum_000000_003245_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_003674_leftImg8bit.png gtFine/train/bochum/bochum_000000_003674_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_004032_leftImg8bit.png gtFine/train/bochum/bochum_000000_004032_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_004229_leftImg8bit.png gtFine/train/bochum/bochum_000000_004229_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_004748_leftImg8bit.png gtFine/train/bochum/bochum_000000_004748_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_005537_leftImg8bit.png gtFine/train/bochum/bochum_000000_005537_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_005936_leftImg8bit.png gtFine/train/bochum/bochum_000000_005936_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_006026_leftImg8bit.png gtFine/train/bochum/bochum_000000_006026_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_006484_leftImg8bit.png gtFine/train/bochum/bochum_000000_006484_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_006746_leftImg8bit.png gtFine/train/bochum/bochum_000000_006746_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_007150_leftImg8bit.png gtFine/train/bochum/bochum_000000_007150_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_007651_leftImg8bit.png gtFine/train/bochum/bochum_000000_007651_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_007950_leftImg8bit.png gtFine/train/bochum/bochum_000000_007950_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_008162_leftImg8bit.png gtFine/train/bochum/bochum_000000_008162_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_008448_leftImg8bit.png gtFine/train/bochum/bochum_000000_008448_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_008804_leftImg8bit.png gtFine/train/bochum/bochum_000000_008804_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_009554_leftImg8bit.png gtFine/train/bochum/bochum_000000_009554_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_009951_leftImg8bit.png gtFine/train/bochum/bochum_000000_009951_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_010562_leftImg8bit.png gtFine/train/bochum/bochum_000000_010562_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_010700_leftImg8bit.png gtFine/train/bochum/bochum_000000_010700_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_011255_leftImg8bit.png gtFine/train/bochum/bochum_000000_011255_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_011711_leftImg8bit.png gtFine/train/bochum/bochum_000000_011711_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_013209_leftImg8bit.png gtFine/train/bochum/bochum_000000_013209_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_013705_leftImg8bit.png gtFine/train/bochum/bochum_000000_013705_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_014332_leftImg8bit.png gtFine/train/bochum/bochum_000000_014332_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_014658_leftImg8bit.png gtFine/train/bochum/bochum_000000_014658_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_014803_leftImg8bit.png gtFine/train/bochum/bochum_000000_014803_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_015038_leftImg8bit.png gtFine/train/bochum/bochum_000000_015038_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_015321_leftImg8bit.png gtFine/train/bochum/bochum_000000_015321_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_015645_leftImg8bit.png gtFine/train/bochum/bochum_000000_015645_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_015880_leftImg8bit.png gtFine/train/bochum/bochum_000000_015880_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_016125_leftImg8bit.png gtFine/train/bochum/bochum_000000_016125_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_016260_leftImg8bit.png gtFine/train/bochum/bochum_000000_016260_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_016591_leftImg8bit.png gtFine/train/bochum/bochum_000000_016591_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_016758_leftImg8bit.png gtFine/train/bochum/bochum_000000_016758_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_017216_leftImg8bit.png gtFine/train/bochum/bochum_000000_017216_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_017453_leftImg8bit.png gtFine/train/bochum/bochum_000000_017453_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_018195_leftImg8bit.png gtFine/train/bochum/bochum_000000_018195_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_019188_leftImg8bit.png gtFine/train/bochum/bochum_000000_019188_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_020673_leftImg8bit.png gtFine/train/bochum/bochum_000000_020673_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_020776_leftImg8bit.png gtFine/train/bochum/bochum_000000_020776_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_020899_leftImg8bit.png gtFine/train/bochum/bochum_000000_020899_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_021070_leftImg8bit.png gtFine/train/bochum/bochum_000000_021070_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_021325_leftImg8bit.png gtFine/train/bochum/bochum_000000_021325_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_021393_leftImg8bit.png gtFine/train/bochum/bochum_000000_021393_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_021479_leftImg8bit.png gtFine/train/bochum/bochum_000000_021479_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_021606_leftImg8bit.png gtFine/train/bochum/bochum_000000_021606_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_022210_leftImg8bit.png gtFine/train/bochum/bochum_000000_022210_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_022414_leftImg8bit.png gtFine/train/bochum/bochum_000000_022414_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_023040_leftImg8bit.png gtFine/train/bochum/bochum_000000_023040_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_023174_leftImg8bit.png gtFine/train/bochum/bochum_000000_023174_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_023435_leftImg8bit.png gtFine/train/bochum/bochum_000000_023435_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_023648_leftImg8bit.png gtFine/train/bochum/bochum_000000_023648_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_024196_leftImg8bit.png gtFine/train/bochum/bochum_000000_024196_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_024343_leftImg8bit.png gtFine/train/bochum/bochum_000000_024343_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_024524_leftImg8bit.png gtFine/train/bochum/bochum_000000_024524_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_024717_leftImg8bit.png gtFine/train/bochum/bochum_000000_024717_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_024855_leftImg8bit.png gtFine/train/bochum/bochum_000000_024855_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_025746_leftImg8bit.png gtFine/train/bochum/bochum_000000_025746_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_025833_leftImg8bit.png gtFine/train/bochum/bochum_000000_025833_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_026056_leftImg8bit.png gtFine/train/bochum/bochum_000000_026056_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_026634_leftImg8bit.png gtFine/train/bochum/bochum_000000_026634_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_027057_leftImg8bit.png gtFine/train/bochum/bochum_000000_027057_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_027699_leftImg8bit.png gtFine/train/bochum/bochum_000000_027699_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_027951_leftImg8bit.png gtFine/train/bochum/bochum_000000_027951_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_028297_leftImg8bit.png gtFine/train/bochum/bochum_000000_028297_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_028764_leftImg8bit.png gtFine/train/bochum/bochum_000000_028764_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_029203_leftImg8bit.png gtFine/train/bochum/bochum_000000_029203_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_029721_leftImg8bit.png gtFine/train/bochum/bochum_000000_029721_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_030913_leftImg8bit.png gtFine/train/bochum/bochum_000000_030913_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_031152_leftImg8bit.png gtFine/train/bochum/bochum_000000_031152_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_031477_leftImg8bit.png gtFine/train/bochum/bochum_000000_031477_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_031687_leftImg8bit.png gtFine/train/bochum/bochum_000000_031687_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_031922_leftImg8bit.png gtFine/train/bochum/bochum_000000_031922_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_032169_leftImg8bit.png gtFine/train/bochum/bochum_000000_032169_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_033056_leftImg8bit.png gtFine/train/bochum/bochum_000000_033056_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_033331_leftImg8bit.png gtFine/train/bochum/bochum_000000_033331_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_033531_leftImg8bit.png gtFine/train/bochum/bochum_000000_033531_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_033714_leftImg8bit.png gtFine/train/bochum/bochum_000000_033714_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_034936_leftImg8bit.png gtFine/train/bochum/bochum_000000_034936_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_035958_leftImg8bit.png gtFine/train/bochum/bochum_000000_035958_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_036606_leftImg8bit.png gtFine/train/bochum/bochum_000000_036606_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_037039_leftImg8bit.png gtFine/train/bochum/bochum_000000_037039_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_037223_leftImg8bit.png gtFine/train/bochum/bochum_000000_037223_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_037829_leftImg8bit.png gtFine/train/bochum/bochum_000000_037829_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_038022_leftImg8bit.png gtFine/train/bochum/bochum_000000_038022_gtFine_labelTrainIds.png +leftImg8bit/train/bochum/bochum_000000_038150_leftImg8bit.png gtFine/train/bochum/bochum_000000_038150_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000000_000019_leftImg8bit.png gtFine/train/bremen/bremen_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000001_000019_leftImg8bit.png gtFine/train/bremen/bremen_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000002_000019_leftImg8bit.png gtFine/train/bremen/bremen_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000003_000019_leftImg8bit.png gtFine/train/bremen/bremen_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000004_000019_leftImg8bit.png gtFine/train/bremen/bremen_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000005_000019_leftImg8bit.png gtFine/train/bremen/bremen_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000006_000019_leftImg8bit.png gtFine/train/bremen/bremen_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000007_000019_leftImg8bit.png gtFine/train/bremen/bremen_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000008_000019_leftImg8bit.png gtFine/train/bremen/bremen_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000009_000019_leftImg8bit.png gtFine/train/bremen/bremen_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000010_000019_leftImg8bit.png gtFine/train/bremen/bremen_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000011_000019_leftImg8bit.png gtFine/train/bremen/bremen_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000012_000019_leftImg8bit.png gtFine/train/bremen/bremen_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000013_000019_leftImg8bit.png gtFine/train/bremen/bremen_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000014_000019_leftImg8bit.png gtFine/train/bremen/bremen_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000015_000019_leftImg8bit.png gtFine/train/bremen/bremen_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000016_000019_leftImg8bit.png gtFine/train/bremen/bremen_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000017_000019_leftImg8bit.png gtFine/train/bremen/bremen_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000018_000019_leftImg8bit.png gtFine/train/bremen/bremen_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000019_000019_leftImg8bit.png gtFine/train/bremen/bremen_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000020_000019_leftImg8bit.png gtFine/train/bremen/bremen_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000021_000019_leftImg8bit.png gtFine/train/bremen/bremen_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000022_000019_leftImg8bit.png gtFine/train/bremen/bremen_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000023_000019_leftImg8bit.png gtFine/train/bremen/bremen_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000024_000019_leftImg8bit.png gtFine/train/bremen/bremen_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000025_000019_leftImg8bit.png gtFine/train/bremen/bremen_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000026_000019_leftImg8bit.png gtFine/train/bremen/bremen_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000027_000019_leftImg8bit.png gtFine/train/bremen/bremen_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000028_000019_leftImg8bit.png gtFine/train/bremen/bremen_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000029_000019_leftImg8bit.png gtFine/train/bremen/bremen_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000030_000019_leftImg8bit.png gtFine/train/bremen/bremen_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000031_000019_leftImg8bit.png gtFine/train/bremen/bremen_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000032_000019_leftImg8bit.png gtFine/train/bremen/bremen_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000033_000019_leftImg8bit.png gtFine/train/bremen/bremen_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000034_000019_leftImg8bit.png gtFine/train/bremen/bremen_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000035_000019_leftImg8bit.png gtFine/train/bremen/bremen_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000036_000019_leftImg8bit.png gtFine/train/bremen/bremen_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000037_000019_leftImg8bit.png gtFine/train/bremen/bremen_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000038_000019_leftImg8bit.png gtFine/train/bremen/bremen_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000039_000019_leftImg8bit.png gtFine/train/bremen/bremen_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000040_000019_leftImg8bit.png gtFine/train/bremen/bremen_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000041_000019_leftImg8bit.png gtFine/train/bremen/bremen_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000042_000019_leftImg8bit.png gtFine/train/bremen/bremen_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000043_000019_leftImg8bit.png gtFine/train/bremen/bremen_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000044_000019_leftImg8bit.png gtFine/train/bremen/bremen_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000045_000019_leftImg8bit.png gtFine/train/bremen/bremen_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000046_000019_leftImg8bit.png gtFine/train/bremen/bremen_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000047_000019_leftImg8bit.png gtFine/train/bremen/bremen_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000048_000019_leftImg8bit.png gtFine/train/bremen/bremen_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000049_000019_leftImg8bit.png gtFine/train/bremen/bremen_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000050_000019_leftImg8bit.png gtFine/train/bremen/bremen_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000051_000019_leftImg8bit.png gtFine/train/bremen/bremen_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000052_000019_leftImg8bit.png gtFine/train/bremen/bremen_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000053_000019_leftImg8bit.png gtFine/train/bremen/bremen_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000054_000019_leftImg8bit.png gtFine/train/bremen/bremen_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000055_000019_leftImg8bit.png gtFine/train/bremen/bremen_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000056_000019_leftImg8bit.png gtFine/train/bremen/bremen_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000057_000019_leftImg8bit.png gtFine/train/bremen/bremen_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000058_000019_leftImg8bit.png gtFine/train/bremen/bremen_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000059_000019_leftImg8bit.png gtFine/train/bremen/bremen_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000060_000019_leftImg8bit.png gtFine/train/bremen/bremen_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000061_000019_leftImg8bit.png gtFine/train/bremen/bremen_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000062_000019_leftImg8bit.png gtFine/train/bremen/bremen_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000063_000019_leftImg8bit.png gtFine/train/bremen/bremen_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000064_000019_leftImg8bit.png gtFine/train/bremen/bremen_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000065_000019_leftImg8bit.png gtFine/train/bremen/bremen_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000066_000019_leftImg8bit.png gtFine/train/bremen/bremen_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000067_000019_leftImg8bit.png gtFine/train/bremen/bremen_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000068_000019_leftImg8bit.png gtFine/train/bremen/bremen_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000069_000019_leftImg8bit.png gtFine/train/bremen/bremen_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000070_000019_leftImg8bit.png gtFine/train/bremen/bremen_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000071_000019_leftImg8bit.png gtFine/train/bremen/bremen_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000072_000019_leftImg8bit.png gtFine/train/bremen/bremen_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000073_000019_leftImg8bit.png gtFine/train/bremen/bremen_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000074_000019_leftImg8bit.png gtFine/train/bremen/bremen_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000075_000019_leftImg8bit.png gtFine/train/bremen/bremen_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000076_000019_leftImg8bit.png gtFine/train/bremen/bremen_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000077_000019_leftImg8bit.png gtFine/train/bremen/bremen_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000078_000019_leftImg8bit.png gtFine/train/bremen/bremen_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000079_000019_leftImg8bit.png gtFine/train/bremen/bremen_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000080_000019_leftImg8bit.png gtFine/train/bremen/bremen_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000081_000019_leftImg8bit.png gtFine/train/bremen/bremen_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000082_000019_leftImg8bit.png gtFine/train/bremen/bremen_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000083_000019_leftImg8bit.png gtFine/train/bremen/bremen_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000084_000019_leftImg8bit.png gtFine/train/bremen/bremen_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000085_000019_leftImg8bit.png gtFine/train/bremen/bremen_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000086_000019_leftImg8bit.png gtFine/train/bremen/bremen_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000087_000019_leftImg8bit.png gtFine/train/bremen/bremen_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000088_000019_leftImg8bit.png gtFine/train/bremen/bremen_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000089_000019_leftImg8bit.png gtFine/train/bremen/bremen_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000090_000019_leftImg8bit.png gtFine/train/bremen/bremen_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000091_000019_leftImg8bit.png gtFine/train/bremen/bremen_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000092_000019_leftImg8bit.png gtFine/train/bremen/bremen_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000093_000019_leftImg8bit.png gtFine/train/bremen/bremen_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000094_000019_leftImg8bit.png gtFine/train/bremen/bremen_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000095_000019_leftImg8bit.png gtFine/train/bremen/bremen_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000096_000019_leftImg8bit.png gtFine/train/bremen/bremen_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000097_000019_leftImg8bit.png gtFine/train/bremen/bremen_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000098_000019_leftImg8bit.png gtFine/train/bremen/bremen_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000099_000019_leftImg8bit.png gtFine/train/bremen/bremen_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000100_000019_leftImg8bit.png gtFine/train/bremen/bremen_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000101_000019_leftImg8bit.png gtFine/train/bremen/bremen_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000102_000019_leftImg8bit.png gtFine/train/bremen/bremen_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000103_000019_leftImg8bit.png gtFine/train/bremen/bremen_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000104_000019_leftImg8bit.png gtFine/train/bremen/bremen_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000105_000019_leftImg8bit.png gtFine/train/bremen/bremen_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000106_000019_leftImg8bit.png gtFine/train/bremen/bremen_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000107_000019_leftImg8bit.png gtFine/train/bremen/bremen_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000108_000019_leftImg8bit.png gtFine/train/bremen/bremen_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000109_000019_leftImg8bit.png gtFine/train/bremen/bremen_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000110_000019_leftImg8bit.png gtFine/train/bremen/bremen_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000111_000019_leftImg8bit.png gtFine/train/bremen/bremen_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000112_000019_leftImg8bit.png gtFine/train/bremen/bremen_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000113_000019_leftImg8bit.png gtFine/train/bremen/bremen_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000114_000019_leftImg8bit.png gtFine/train/bremen/bremen_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000115_000019_leftImg8bit.png gtFine/train/bremen/bremen_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000116_000019_leftImg8bit.png gtFine/train/bremen/bremen_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000117_000019_leftImg8bit.png gtFine/train/bremen/bremen_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000118_000019_leftImg8bit.png gtFine/train/bremen/bremen_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000119_000019_leftImg8bit.png gtFine/train/bremen/bremen_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000120_000019_leftImg8bit.png gtFine/train/bremen/bremen_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000121_000019_leftImg8bit.png gtFine/train/bremen/bremen_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000122_000019_leftImg8bit.png gtFine/train/bremen/bremen_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000123_000019_leftImg8bit.png gtFine/train/bremen/bremen_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000124_000019_leftImg8bit.png gtFine/train/bremen/bremen_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000125_000019_leftImg8bit.png gtFine/train/bremen/bremen_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000126_000019_leftImg8bit.png gtFine/train/bremen/bremen_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000127_000019_leftImg8bit.png gtFine/train/bremen/bremen_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000128_000019_leftImg8bit.png gtFine/train/bremen/bremen_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000129_000019_leftImg8bit.png gtFine/train/bremen/bremen_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000130_000019_leftImg8bit.png gtFine/train/bremen/bremen_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000131_000019_leftImg8bit.png gtFine/train/bremen/bremen_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000132_000019_leftImg8bit.png gtFine/train/bremen/bremen_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000133_000019_leftImg8bit.png gtFine/train/bremen/bremen_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000134_000019_leftImg8bit.png gtFine/train/bremen/bremen_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000135_000019_leftImg8bit.png gtFine/train/bremen/bremen_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000136_000019_leftImg8bit.png gtFine/train/bremen/bremen_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000137_000019_leftImg8bit.png gtFine/train/bremen/bremen_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000138_000019_leftImg8bit.png gtFine/train/bremen/bremen_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000139_000019_leftImg8bit.png gtFine/train/bremen/bremen_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000140_000019_leftImg8bit.png gtFine/train/bremen/bremen_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000141_000019_leftImg8bit.png gtFine/train/bremen/bremen_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000142_000019_leftImg8bit.png gtFine/train/bremen/bremen_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000143_000019_leftImg8bit.png gtFine/train/bremen/bremen_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000144_000019_leftImg8bit.png gtFine/train/bremen/bremen_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000145_000019_leftImg8bit.png gtFine/train/bremen/bremen_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000146_000019_leftImg8bit.png gtFine/train/bremen/bremen_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000147_000019_leftImg8bit.png gtFine/train/bremen/bremen_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000148_000019_leftImg8bit.png gtFine/train/bremen/bremen_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000149_000019_leftImg8bit.png gtFine/train/bremen/bremen_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000150_000019_leftImg8bit.png gtFine/train/bremen/bremen_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000151_000019_leftImg8bit.png gtFine/train/bremen/bremen_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000152_000019_leftImg8bit.png gtFine/train/bremen/bremen_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000153_000019_leftImg8bit.png gtFine/train/bremen/bremen_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000154_000019_leftImg8bit.png gtFine/train/bremen/bremen_000154_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000155_000019_leftImg8bit.png gtFine/train/bremen/bremen_000155_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000156_000019_leftImg8bit.png gtFine/train/bremen/bremen_000156_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000157_000019_leftImg8bit.png gtFine/train/bremen/bremen_000157_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000158_000019_leftImg8bit.png gtFine/train/bremen/bremen_000158_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000159_000019_leftImg8bit.png gtFine/train/bremen/bremen_000159_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000160_000019_leftImg8bit.png gtFine/train/bremen/bremen_000160_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000161_000019_leftImg8bit.png gtFine/train/bremen/bremen_000161_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000162_000019_leftImg8bit.png gtFine/train/bremen/bremen_000162_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000163_000019_leftImg8bit.png gtFine/train/bremen/bremen_000163_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000164_000019_leftImg8bit.png gtFine/train/bremen/bremen_000164_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000165_000019_leftImg8bit.png gtFine/train/bremen/bremen_000165_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000166_000019_leftImg8bit.png gtFine/train/bremen/bremen_000166_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000167_000019_leftImg8bit.png gtFine/train/bremen/bremen_000167_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000168_000019_leftImg8bit.png gtFine/train/bremen/bremen_000168_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000169_000019_leftImg8bit.png gtFine/train/bremen/bremen_000169_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000170_000019_leftImg8bit.png gtFine/train/bremen/bremen_000170_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000171_000019_leftImg8bit.png gtFine/train/bremen/bremen_000171_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000172_000019_leftImg8bit.png gtFine/train/bremen/bremen_000172_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000173_000019_leftImg8bit.png gtFine/train/bremen/bremen_000173_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000174_000019_leftImg8bit.png gtFine/train/bremen/bremen_000174_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000175_000019_leftImg8bit.png gtFine/train/bremen/bremen_000175_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000176_000019_leftImg8bit.png gtFine/train/bremen/bremen_000176_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000177_000019_leftImg8bit.png gtFine/train/bremen/bremen_000177_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000178_000019_leftImg8bit.png gtFine/train/bremen/bremen_000178_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000179_000019_leftImg8bit.png gtFine/train/bremen/bremen_000179_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000180_000019_leftImg8bit.png gtFine/train/bremen/bremen_000180_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000181_000019_leftImg8bit.png gtFine/train/bremen/bremen_000181_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000182_000019_leftImg8bit.png gtFine/train/bremen/bremen_000182_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000183_000019_leftImg8bit.png gtFine/train/bremen/bremen_000183_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000184_000019_leftImg8bit.png gtFine/train/bremen/bremen_000184_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000185_000019_leftImg8bit.png gtFine/train/bremen/bremen_000185_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000186_000019_leftImg8bit.png gtFine/train/bremen/bremen_000186_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000187_000019_leftImg8bit.png gtFine/train/bremen/bremen_000187_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000188_000019_leftImg8bit.png gtFine/train/bremen/bremen_000188_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000189_000019_leftImg8bit.png gtFine/train/bremen/bremen_000189_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000190_000019_leftImg8bit.png gtFine/train/bremen/bremen_000190_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000191_000019_leftImg8bit.png gtFine/train/bremen/bremen_000191_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000192_000019_leftImg8bit.png gtFine/train/bremen/bremen_000192_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000193_000019_leftImg8bit.png gtFine/train/bremen/bremen_000193_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000194_000019_leftImg8bit.png gtFine/train/bremen/bremen_000194_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000195_000019_leftImg8bit.png gtFine/train/bremen/bremen_000195_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000196_000019_leftImg8bit.png gtFine/train/bremen/bremen_000196_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000197_000019_leftImg8bit.png gtFine/train/bremen/bremen_000197_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000198_000019_leftImg8bit.png gtFine/train/bremen/bremen_000198_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000199_000019_leftImg8bit.png gtFine/train/bremen/bremen_000199_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000200_000019_leftImg8bit.png gtFine/train/bremen/bremen_000200_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000201_000019_leftImg8bit.png gtFine/train/bremen/bremen_000201_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000202_000019_leftImg8bit.png gtFine/train/bremen/bremen_000202_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000203_000019_leftImg8bit.png gtFine/train/bremen/bremen_000203_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000204_000019_leftImg8bit.png gtFine/train/bremen/bremen_000204_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000205_000019_leftImg8bit.png gtFine/train/bremen/bremen_000205_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000206_000019_leftImg8bit.png gtFine/train/bremen/bremen_000206_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000207_000019_leftImg8bit.png gtFine/train/bremen/bremen_000207_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000208_000019_leftImg8bit.png gtFine/train/bremen/bremen_000208_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000209_000019_leftImg8bit.png gtFine/train/bremen/bremen_000209_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000210_000019_leftImg8bit.png gtFine/train/bremen/bremen_000210_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000211_000019_leftImg8bit.png gtFine/train/bremen/bremen_000211_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000212_000019_leftImg8bit.png gtFine/train/bremen/bremen_000212_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000213_000019_leftImg8bit.png gtFine/train/bremen/bremen_000213_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000214_000019_leftImg8bit.png gtFine/train/bremen/bremen_000214_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000215_000019_leftImg8bit.png gtFine/train/bremen/bremen_000215_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000216_000019_leftImg8bit.png gtFine/train/bremen/bremen_000216_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000217_000019_leftImg8bit.png gtFine/train/bremen/bremen_000217_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000218_000019_leftImg8bit.png gtFine/train/bremen/bremen_000218_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000219_000019_leftImg8bit.png gtFine/train/bremen/bremen_000219_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000220_000019_leftImg8bit.png gtFine/train/bremen/bremen_000220_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000221_000019_leftImg8bit.png gtFine/train/bremen/bremen_000221_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000222_000019_leftImg8bit.png gtFine/train/bremen/bremen_000222_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000223_000019_leftImg8bit.png gtFine/train/bremen/bremen_000223_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000224_000019_leftImg8bit.png gtFine/train/bremen/bremen_000224_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000225_000019_leftImg8bit.png gtFine/train/bremen/bremen_000225_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000226_000019_leftImg8bit.png gtFine/train/bremen/bremen_000226_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000227_000019_leftImg8bit.png gtFine/train/bremen/bremen_000227_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000228_000019_leftImg8bit.png gtFine/train/bremen/bremen_000228_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000229_000019_leftImg8bit.png gtFine/train/bremen/bremen_000229_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000230_000019_leftImg8bit.png gtFine/train/bremen/bremen_000230_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000231_000019_leftImg8bit.png gtFine/train/bremen/bremen_000231_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000232_000019_leftImg8bit.png gtFine/train/bremen/bremen_000232_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000233_000019_leftImg8bit.png gtFine/train/bremen/bremen_000233_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000234_000019_leftImg8bit.png gtFine/train/bremen/bremen_000234_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000235_000019_leftImg8bit.png gtFine/train/bremen/bremen_000235_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000236_000019_leftImg8bit.png gtFine/train/bremen/bremen_000236_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000237_000019_leftImg8bit.png gtFine/train/bremen/bremen_000237_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000238_000019_leftImg8bit.png gtFine/train/bremen/bremen_000238_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000239_000019_leftImg8bit.png gtFine/train/bremen/bremen_000239_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000240_000019_leftImg8bit.png gtFine/train/bremen/bremen_000240_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000241_000019_leftImg8bit.png gtFine/train/bremen/bremen_000241_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000242_000019_leftImg8bit.png gtFine/train/bremen/bremen_000242_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000243_000019_leftImg8bit.png gtFine/train/bremen/bremen_000243_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000244_000019_leftImg8bit.png gtFine/train/bremen/bremen_000244_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000245_000019_leftImg8bit.png gtFine/train/bremen/bremen_000245_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000246_000019_leftImg8bit.png gtFine/train/bremen/bremen_000246_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000247_000019_leftImg8bit.png gtFine/train/bremen/bremen_000247_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000248_000019_leftImg8bit.png gtFine/train/bremen/bremen_000248_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000249_000019_leftImg8bit.png gtFine/train/bremen/bremen_000249_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000250_000019_leftImg8bit.png gtFine/train/bremen/bremen_000250_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000251_000019_leftImg8bit.png gtFine/train/bremen/bremen_000251_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000252_000019_leftImg8bit.png gtFine/train/bremen/bremen_000252_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000253_000019_leftImg8bit.png gtFine/train/bremen/bremen_000253_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000254_000019_leftImg8bit.png gtFine/train/bremen/bremen_000254_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000255_000019_leftImg8bit.png gtFine/train/bremen/bremen_000255_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000256_000019_leftImg8bit.png gtFine/train/bremen/bremen_000256_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000257_000019_leftImg8bit.png gtFine/train/bremen/bremen_000257_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000258_000019_leftImg8bit.png gtFine/train/bremen/bremen_000258_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000259_000019_leftImg8bit.png gtFine/train/bremen/bremen_000259_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000260_000019_leftImg8bit.png gtFine/train/bremen/bremen_000260_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000261_000019_leftImg8bit.png gtFine/train/bremen/bremen_000261_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000262_000019_leftImg8bit.png gtFine/train/bremen/bremen_000262_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000263_000019_leftImg8bit.png gtFine/train/bremen/bremen_000263_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000264_000019_leftImg8bit.png gtFine/train/bremen/bremen_000264_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000265_000019_leftImg8bit.png gtFine/train/bremen/bremen_000265_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000266_000019_leftImg8bit.png gtFine/train/bremen/bremen_000266_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000267_000019_leftImg8bit.png gtFine/train/bremen/bremen_000267_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000268_000019_leftImg8bit.png gtFine/train/bremen/bremen_000268_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000269_000019_leftImg8bit.png gtFine/train/bremen/bremen_000269_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000270_000019_leftImg8bit.png gtFine/train/bremen/bremen_000270_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000271_000019_leftImg8bit.png gtFine/train/bremen/bremen_000271_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000272_000019_leftImg8bit.png gtFine/train/bremen/bremen_000272_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000273_000019_leftImg8bit.png gtFine/train/bremen/bremen_000273_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000274_000019_leftImg8bit.png gtFine/train/bremen/bremen_000274_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000275_000019_leftImg8bit.png gtFine/train/bremen/bremen_000275_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000276_000019_leftImg8bit.png gtFine/train/bremen/bremen_000276_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000277_000019_leftImg8bit.png gtFine/train/bremen/bremen_000277_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000278_000019_leftImg8bit.png gtFine/train/bremen/bremen_000278_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000279_000019_leftImg8bit.png gtFine/train/bremen/bremen_000279_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000280_000019_leftImg8bit.png gtFine/train/bremen/bremen_000280_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000281_000019_leftImg8bit.png gtFine/train/bremen/bremen_000281_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000282_000019_leftImg8bit.png gtFine/train/bremen/bremen_000282_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000283_000019_leftImg8bit.png gtFine/train/bremen/bremen_000283_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000284_000019_leftImg8bit.png gtFine/train/bremen/bremen_000284_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000285_000019_leftImg8bit.png gtFine/train/bremen/bremen_000285_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000286_000019_leftImg8bit.png gtFine/train/bremen/bremen_000286_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000287_000019_leftImg8bit.png gtFine/train/bremen/bremen_000287_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000288_000019_leftImg8bit.png gtFine/train/bremen/bremen_000288_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000289_000019_leftImg8bit.png gtFine/train/bremen/bremen_000289_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000290_000019_leftImg8bit.png gtFine/train/bremen/bremen_000290_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000291_000019_leftImg8bit.png gtFine/train/bremen/bremen_000291_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000292_000019_leftImg8bit.png gtFine/train/bremen/bremen_000292_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000293_000019_leftImg8bit.png gtFine/train/bremen/bremen_000293_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000294_000019_leftImg8bit.png gtFine/train/bremen/bremen_000294_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000295_000019_leftImg8bit.png gtFine/train/bremen/bremen_000295_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000296_000019_leftImg8bit.png gtFine/train/bremen/bremen_000296_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000297_000019_leftImg8bit.png gtFine/train/bremen/bremen_000297_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000298_000019_leftImg8bit.png gtFine/train/bremen/bremen_000298_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000299_000019_leftImg8bit.png gtFine/train/bremen/bremen_000299_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000300_000019_leftImg8bit.png gtFine/train/bremen/bremen_000300_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000301_000019_leftImg8bit.png gtFine/train/bremen/bremen_000301_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000302_000019_leftImg8bit.png gtFine/train/bremen/bremen_000302_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000303_000019_leftImg8bit.png gtFine/train/bremen/bremen_000303_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000304_000019_leftImg8bit.png gtFine/train/bremen/bremen_000304_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000305_000019_leftImg8bit.png gtFine/train/bremen/bremen_000305_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000306_000019_leftImg8bit.png gtFine/train/bremen/bremen_000306_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000307_000019_leftImg8bit.png gtFine/train/bremen/bremen_000307_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000308_000019_leftImg8bit.png gtFine/train/bremen/bremen_000308_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000309_000019_leftImg8bit.png gtFine/train/bremen/bremen_000309_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000310_000019_leftImg8bit.png gtFine/train/bremen/bremen_000310_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000311_000019_leftImg8bit.png gtFine/train/bremen/bremen_000311_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000312_000019_leftImg8bit.png gtFine/train/bremen/bremen_000312_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000313_000019_leftImg8bit.png gtFine/train/bremen/bremen_000313_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000314_000019_leftImg8bit.png gtFine/train/bremen/bremen_000314_000019_gtFine_labelTrainIds.png +leftImg8bit/train/bremen/bremen_000315_000019_leftImg8bit.png gtFine/train/bremen/bremen_000315_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000000_000019_leftImg8bit.png gtFine/train/cologne/cologne_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000001_000019_leftImg8bit.png gtFine/train/cologne/cologne_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000002_000019_leftImg8bit.png gtFine/train/cologne/cologne_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000003_000019_leftImg8bit.png gtFine/train/cologne/cologne_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000004_000019_leftImg8bit.png gtFine/train/cologne/cologne_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000005_000019_leftImg8bit.png gtFine/train/cologne/cologne_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000006_000019_leftImg8bit.png gtFine/train/cologne/cologne_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000007_000019_leftImg8bit.png gtFine/train/cologne/cologne_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000008_000019_leftImg8bit.png gtFine/train/cologne/cologne_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000009_000019_leftImg8bit.png gtFine/train/cologne/cologne_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000010_000019_leftImg8bit.png gtFine/train/cologne/cologne_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000011_000019_leftImg8bit.png gtFine/train/cologne/cologne_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000012_000019_leftImg8bit.png gtFine/train/cologne/cologne_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000013_000019_leftImg8bit.png gtFine/train/cologne/cologne_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000014_000019_leftImg8bit.png gtFine/train/cologne/cologne_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000015_000019_leftImg8bit.png gtFine/train/cologne/cologne_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000016_000019_leftImg8bit.png gtFine/train/cologne/cologne_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000017_000019_leftImg8bit.png gtFine/train/cologne/cologne_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000018_000019_leftImg8bit.png gtFine/train/cologne/cologne_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000019_000019_leftImg8bit.png gtFine/train/cologne/cologne_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000020_000019_leftImg8bit.png gtFine/train/cologne/cologne_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000021_000019_leftImg8bit.png gtFine/train/cologne/cologne_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000022_000019_leftImg8bit.png gtFine/train/cologne/cologne_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000023_000019_leftImg8bit.png gtFine/train/cologne/cologne_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000024_000019_leftImg8bit.png gtFine/train/cologne/cologne_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000025_000019_leftImg8bit.png gtFine/train/cologne/cologne_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000026_000019_leftImg8bit.png gtFine/train/cologne/cologne_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000027_000019_leftImg8bit.png gtFine/train/cologne/cologne_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000028_000019_leftImg8bit.png gtFine/train/cologne/cologne_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000029_000019_leftImg8bit.png gtFine/train/cologne/cologne_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000030_000019_leftImg8bit.png gtFine/train/cologne/cologne_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000031_000019_leftImg8bit.png gtFine/train/cologne/cologne_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000032_000019_leftImg8bit.png gtFine/train/cologne/cologne_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000033_000019_leftImg8bit.png gtFine/train/cologne/cologne_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000034_000019_leftImg8bit.png gtFine/train/cologne/cologne_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000035_000019_leftImg8bit.png gtFine/train/cologne/cologne_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000036_000019_leftImg8bit.png gtFine/train/cologne/cologne_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000037_000019_leftImg8bit.png gtFine/train/cologne/cologne_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000038_000019_leftImg8bit.png gtFine/train/cologne/cologne_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000039_000019_leftImg8bit.png gtFine/train/cologne/cologne_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000040_000019_leftImg8bit.png gtFine/train/cologne/cologne_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000041_000019_leftImg8bit.png gtFine/train/cologne/cologne_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000042_000019_leftImg8bit.png gtFine/train/cologne/cologne_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000043_000019_leftImg8bit.png gtFine/train/cologne/cologne_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000044_000019_leftImg8bit.png gtFine/train/cologne/cologne_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000045_000019_leftImg8bit.png gtFine/train/cologne/cologne_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000046_000019_leftImg8bit.png gtFine/train/cologne/cologne_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000047_000019_leftImg8bit.png gtFine/train/cologne/cologne_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000048_000019_leftImg8bit.png gtFine/train/cologne/cologne_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000049_000019_leftImg8bit.png gtFine/train/cologne/cologne_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000050_000019_leftImg8bit.png gtFine/train/cologne/cologne_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000051_000019_leftImg8bit.png gtFine/train/cologne/cologne_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000052_000019_leftImg8bit.png gtFine/train/cologne/cologne_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000053_000019_leftImg8bit.png gtFine/train/cologne/cologne_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000054_000019_leftImg8bit.png gtFine/train/cologne/cologne_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000055_000019_leftImg8bit.png gtFine/train/cologne/cologne_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000056_000019_leftImg8bit.png gtFine/train/cologne/cologne_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000057_000019_leftImg8bit.png gtFine/train/cologne/cologne_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000058_000019_leftImg8bit.png gtFine/train/cologne/cologne_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000059_000019_leftImg8bit.png gtFine/train/cologne/cologne_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000060_000019_leftImg8bit.png gtFine/train/cologne/cologne_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000061_000019_leftImg8bit.png gtFine/train/cologne/cologne_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000062_000019_leftImg8bit.png gtFine/train/cologne/cologne_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000063_000019_leftImg8bit.png gtFine/train/cologne/cologne_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000064_000019_leftImg8bit.png gtFine/train/cologne/cologne_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000065_000019_leftImg8bit.png gtFine/train/cologne/cologne_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000066_000019_leftImg8bit.png gtFine/train/cologne/cologne_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000067_000019_leftImg8bit.png gtFine/train/cologne/cologne_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000068_000019_leftImg8bit.png gtFine/train/cologne/cologne_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000069_000019_leftImg8bit.png gtFine/train/cologne/cologne_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000070_000019_leftImg8bit.png gtFine/train/cologne/cologne_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000071_000019_leftImg8bit.png gtFine/train/cologne/cologne_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000072_000019_leftImg8bit.png gtFine/train/cologne/cologne_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000073_000019_leftImg8bit.png gtFine/train/cologne/cologne_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000074_000019_leftImg8bit.png gtFine/train/cologne/cologne_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000075_000019_leftImg8bit.png gtFine/train/cologne/cologne_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000076_000019_leftImg8bit.png gtFine/train/cologne/cologne_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000077_000019_leftImg8bit.png gtFine/train/cologne/cologne_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000078_000019_leftImg8bit.png gtFine/train/cologne/cologne_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000079_000019_leftImg8bit.png gtFine/train/cologne/cologne_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000080_000019_leftImg8bit.png gtFine/train/cologne/cologne_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000081_000019_leftImg8bit.png gtFine/train/cologne/cologne_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000082_000019_leftImg8bit.png gtFine/train/cologne/cologne_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000083_000019_leftImg8bit.png gtFine/train/cologne/cologne_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000084_000019_leftImg8bit.png gtFine/train/cologne/cologne_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000085_000019_leftImg8bit.png gtFine/train/cologne/cologne_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000086_000019_leftImg8bit.png gtFine/train/cologne/cologne_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000087_000019_leftImg8bit.png gtFine/train/cologne/cologne_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000088_000019_leftImg8bit.png gtFine/train/cologne/cologne_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000089_000019_leftImg8bit.png gtFine/train/cologne/cologne_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000090_000019_leftImg8bit.png gtFine/train/cologne/cologne_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000091_000019_leftImg8bit.png gtFine/train/cologne/cologne_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000092_000019_leftImg8bit.png gtFine/train/cologne/cologne_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000093_000019_leftImg8bit.png gtFine/train/cologne/cologne_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000094_000019_leftImg8bit.png gtFine/train/cologne/cologne_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000095_000019_leftImg8bit.png gtFine/train/cologne/cologne_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000096_000019_leftImg8bit.png gtFine/train/cologne/cologne_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000097_000019_leftImg8bit.png gtFine/train/cologne/cologne_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000098_000019_leftImg8bit.png gtFine/train/cologne/cologne_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000099_000019_leftImg8bit.png gtFine/train/cologne/cologne_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000100_000019_leftImg8bit.png gtFine/train/cologne/cologne_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000101_000019_leftImg8bit.png gtFine/train/cologne/cologne_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000102_000019_leftImg8bit.png gtFine/train/cologne/cologne_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000103_000019_leftImg8bit.png gtFine/train/cologne/cologne_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000104_000019_leftImg8bit.png gtFine/train/cologne/cologne_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000105_000019_leftImg8bit.png gtFine/train/cologne/cologne_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000106_000019_leftImg8bit.png gtFine/train/cologne/cologne_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000107_000019_leftImg8bit.png gtFine/train/cologne/cologne_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000108_000019_leftImg8bit.png gtFine/train/cologne/cologne_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000109_000019_leftImg8bit.png gtFine/train/cologne/cologne_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000110_000019_leftImg8bit.png gtFine/train/cologne/cologne_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000111_000019_leftImg8bit.png gtFine/train/cologne/cologne_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000112_000019_leftImg8bit.png gtFine/train/cologne/cologne_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000113_000019_leftImg8bit.png gtFine/train/cologne/cologne_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000114_000019_leftImg8bit.png gtFine/train/cologne/cologne_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000115_000019_leftImg8bit.png gtFine/train/cologne/cologne_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000116_000019_leftImg8bit.png gtFine/train/cologne/cologne_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000117_000019_leftImg8bit.png gtFine/train/cologne/cologne_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000118_000019_leftImg8bit.png gtFine/train/cologne/cologne_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000119_000019_leftImg8bit.png gtFine/train/cologne/cologne_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000120_000019_leftImg8bit.png gtFine/train/cologne/cologne_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000121_000019_leftImg8bit.png gtFine/train/cologne/cologne_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000122_000019_leftImg8bit.png gtFine/train/cologne/cologne_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000123_000019_leftImg8bit.png gtFine/train/cologne/cologne_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000124_000019_leftImg8bit.png gtFine/train/cologne/cologne_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000125_000019_leftImg8bit.png gtFine/train/cologne/cologne_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000126_000019_leftImg8bit.png gtFine/train/cologne/cologne_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000127_000019_leftImg8bit.png gtFine/train/cologne/cologne_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000128_000019_leftImg8bit.png gtFine/train/cologne/cologne_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000129_000019_leftImg8bit.png gtFine/train/cologne/cologne_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000130_000019_leftImg8bit.png gtFine/train/cologne/cologne_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000131_000019_leftImg8bit.png gtFine/train/cologne/cologne_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000132_000019_leftImg8bit.png gtFine/train/cologne/cologne_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000133_000019_leftImg8bit.png gtFine/train/cologne/cologne_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000134_000019_leftImg8bit.png gtFine/train/cologne/cologne_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000135_000019_leftImg8bit.png gtFine/train/cologne/cologne_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000136_000019_leftImg8bit.png gtFine/train/cologne/cologne_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000137_000019_leftImg8bit.png gtFine/train/cologne/cologne_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000138_000019_leftImg8bit.png gtFine/train/cologne/cologne_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000139_000019_leftImg8bit.png gtFine/train/cologne/cologne_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000140_000019_leftImg8bit.png gtFine/train/cologne/cologne_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000141_000019_leftImg8bit.png gtFine/train/cologne/cologne_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000142_000019_leftImg8bit.png gtFine/train/cologne/cologne_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000143_000019_leftImg8bit.png gtFine/train/cologne/cologne_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000144_000019_leftImg8bit.png gtFine/train/cologne/cologne_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000145_000019_leftImg8bit.png gtFine/train/cologne/cologne_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000146_000019_leftImg8bit.png gtFine/train/cologne/cologne_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000147_000019_leftImg8bit.png gtFine/train/cologne/cologne_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000148_000019_leftImg8bit.png gtFine/train/cologne/cologne_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000149_000019_leftImg8bit.png gtFine/train/cologne/cologne_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000150_000019_leftImg8bit.png gtFine/train/cologne/cologne_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000151_000019_leftImg8bit.png gtFine/train/cologne/cologne_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000152_000019_leftImg8bit.png gtFine/train/cologne/cologne_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/train/cologne/cologne_000153_000019_leftImg8bit.png gtFine/train/cologne/cologne_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000000_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000001_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000002_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000003_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000004_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000005_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000006_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000007_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000008_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000009_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000010_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000011_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000012_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000013_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000014_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000015_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000016_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000017_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000018_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000019_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000020_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000021_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000022_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000023_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000024_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000025_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000026_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000027_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000028_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000029_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000030_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000031_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000032_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000033_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000034_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000035_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000036_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000037_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000038_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000039_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000040_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000041_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000042_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000043_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000044_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000045_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000046_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000047_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000048_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000049_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000050_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000051_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000052_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000053_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000054_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000055_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000056_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000057_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000058_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000059_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000060_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000061_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000062_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000063_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000064_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000065_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000066_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000067_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000068_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000069_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000070_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000071_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000072_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000073_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000074_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000075_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000076_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000077_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000078_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000079_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000080_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000081_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000082_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000083_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/darmstadt/darmstadt_000084_000019_leftImg8bit.png gtFine/train/darmstadt/darmstadt_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000000_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000001_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000002_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000003_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000004_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000005_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000006_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000007_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000008_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000009_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000010_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000011_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000012_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000013_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000014_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000015_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000016_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000017_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000018_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000019_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000020_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000021_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000022_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000023_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000024_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000025_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000026_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000027_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000028_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000029_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000030_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000031_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000032_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000033_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000034_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000035_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000036_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000037_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000038_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000039_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000040_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000041_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000042_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000043_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000044_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000045_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000046_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000047_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000048_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000049_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000050_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000051_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000052_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000053_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000054_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000055_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000056_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000057_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000058_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000059_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000060_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000061_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000062_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000063_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000064_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000065_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000066_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000067_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000068_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000069_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000070_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000071_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000072_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000073_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000074_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000075_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000076_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000077_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000078_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000079_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000080_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000081_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000082_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000083_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000084_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000085_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000086_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000087_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000088_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000089_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000090_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000091_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000092_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000093_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000094_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000095_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000096_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000097_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000098_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000099_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000100_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000101_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000102_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000103_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000104_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000105_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000106_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000107_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000108_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000109_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000110_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000111_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000112_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000113_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000114_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000115_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000116_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000117_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000118_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000119_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000120_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000121_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000122_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000123_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000124_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000125_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000126_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000127_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000128_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000129_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000130_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000131_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000132_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000133_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000134_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000135_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000136_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000137_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000138_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000139_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000140_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000141_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000142_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000143_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000144_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000145_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000146_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000147_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000148_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000149_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000150_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000151_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000152_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000153_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000154_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000155_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000155_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000156_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000156_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000157_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000157_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000158_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000158_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000159_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000159_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000160_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000160_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000161_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000161_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000162_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000162_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000163_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000163_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000164_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000164_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000165_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000165_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000166_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000166_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000167_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000167_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000168_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000169_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000169_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000170_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000170_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000171_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000171_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000172_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000172_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000173_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000173_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000174_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000174_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000175_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000175_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000176_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000176_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000177_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000177_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000178_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000178_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000179_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000179_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000180_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000180_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000181_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000181_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000182_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000182_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000183_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000183_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000184_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000184_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000185_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000185_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000186_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000186_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000187_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000187_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000188_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000188_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000189_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000189_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000190_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000190_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000191_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000191_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000192_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000192_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000193_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000193_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000194_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000195_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000195_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000196_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000196_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000197_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000197_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000198_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000199_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000199_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000200_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000201_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000201_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000202_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000203_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000204_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000204_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000205_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000205_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000206_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000207_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000207_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000208_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000208_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000209_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000209_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000210_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000210_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000211_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000211_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000212_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000212_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000213_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000214_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000214_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000215_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000215_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000216_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000216_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000217_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000218_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000218_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000219_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000219_000019_gtFine_labelTrainIds.png +leftImg8bit/train/dusseldorf/dusseldorf_000220_000019_leftImg8bit.png gtFine/train/dusseldorf/dusseldorf_000220_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000000_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000001_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000002_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000003_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000004_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000005_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000006_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000007_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000008_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000009_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000010_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000011_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000012_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000013_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000014_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000015_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000016_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000017_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000018_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000019_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000020_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000021_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000022_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000023_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000024_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000025_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000026_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000027_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000028_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000029_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000030_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000031_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000032_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000033_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000034_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000035_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000036_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000037_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000038_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000039_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000040_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000041_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000042_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000043_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000044_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000045_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000046_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000047_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000048_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000049_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000050_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000051_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000052_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000053_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000054_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000055_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000056_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000057_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000058_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000059_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000060_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000061_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000062_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000063_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000064_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000065_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000066_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000067_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000068_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000069_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000070_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000071_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000072_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000073_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000074_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000075_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000076_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000077_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000078_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000079_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000080_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000081_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000082_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000083_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000084_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000085_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000086_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000087_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000088_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000089_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000090_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000091_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000092_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000093_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000094_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000095_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000096_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000097_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000098_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000099_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000100_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000101_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000102_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000103_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000104_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000105_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000106_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000107_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/erfurt/erfurt_000108_000019_leftImg8bit.png gtFine/train/erfurt/erfurt_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_000042_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_000042_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_000629_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_000629_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_001106_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_001106_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_001613_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_001613_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_002095_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_002095_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_002338_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_002338_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_003488_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_003488_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_003904_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_003904_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_004985_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_004985_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_005639_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_005639_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_006192_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_006192_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_006322_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_006322_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_007737_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_007737_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_008221_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_008221_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_008494_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_008494_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_011641_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_011641_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_013577_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_013577_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_014030_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_014030_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_014940_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_014940_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_015350_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_015350_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_016447_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_016447_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_016691_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_016691_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_016928_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_016928_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_018592_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_018592_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_018878_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_018878_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_019373_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_019373_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_019760_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_019760_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_019892_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_019892_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_020211_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_020211_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_020563_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_020563_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_021353_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_021353_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_021961_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_021961_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_022524_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_022524_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_023472_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_023472_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_024251_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_024251_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_025802_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_025802_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_025986_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_025986_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_026675_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_026675_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_027304_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_027304_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_027857_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_027857_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_028056_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_028056_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_028439_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_028439_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_028608_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_028608_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_029144_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_029144_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_029378_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_029378_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_029676_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_029676_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_030279_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_030279_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_030953_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_030953_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_031971_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_031971_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_032266_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_032266_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_032460_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_032460_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_032719_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_032719_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_032906_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_032906_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_033506_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_033506_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_034049_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_034049_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_035568_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_035568_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_036003_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_036003_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_036427_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_036427_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_036527_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_036527_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_037036_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_037036_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_037161_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_037161_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_037279_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_037279_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_037741_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_037741_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_038446_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_038446_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_038511_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_038511_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_038729_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_038729_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_038915_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_038915_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_039264_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_039264_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_039420_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_039420_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_039546_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_039546_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_040021_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_040021_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_041667_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_041667_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_042505_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_042505_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_042885_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_042885_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_043944_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_043944_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_044251_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_044251_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_044400_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_044400_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_044747_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_044747_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_044996_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_044996_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_045437_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_045437_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_045704_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_045704_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_045908_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_045908_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_046078_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_046078_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_046510_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_046510_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_046566_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_046566_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_046619_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_046619_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_046872_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_046872_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_047057_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_047057_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_047108_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_047108_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_047157_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_047157_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_047220_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_047220_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_047390_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_047390_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_048138_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_048138_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_048494_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_048494_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_048750_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_048750_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_048960_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_048960_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_049558_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_049558_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_050160_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_050160_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_051855_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_051855_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_052122_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_052122_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_052904_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_052904_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_053086_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_053086_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_053486_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_053486_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_053563_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_053563_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_053776_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_053776_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_053886_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_053886_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_054029_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_054029_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_054220_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_054220_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_054555_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_054555_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_054850_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_054850_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_055039_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_055039_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_055414_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_055414_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_055894_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_055894_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_056229_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_056229_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_056508_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_056508_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_057487_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_057487_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_057678_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_057678_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_057816_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_057816_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_058591_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_058591_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_059339_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_059339_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_059720_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_059720_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_060215_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_060215_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_060586_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_060586_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_060907_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_060907_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_061048_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_061048_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_061468_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_061468_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_061790_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_061790_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_062039_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_062039_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_062371_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_062371_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_062710_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_062710_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_062964_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_062964_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_063403_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_063403_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_063698_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_063698_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_064269_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_064269_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_064825_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_064825_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_065055_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_065055_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_065604_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_065604_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_065843_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_065843_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_065983_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_065983_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_066424_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_066424_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_066706_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_066706_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_066988_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_066988_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_067223_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_067223_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_067338_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_067338_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_067587_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_067587_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_067799_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_067799_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_068693_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_068693_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_068916_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_068916_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_069096_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_069096_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_069177_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_069177_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_069289_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_069289_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_069417_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_069417_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_070334_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_070334_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_070444_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_070444_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_071016_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_071016_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_071150_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_071150_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_071675_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_071675_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_071942_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_071942_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_073314_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_073314_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_073389_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_073389_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_073549_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_073549_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_073672_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_073672_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_073758_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_073758_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_073999_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_073999_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_074139_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_074139_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_074267_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_074267_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_074425_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_074425_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_074545_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_074545_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_074694_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_074694_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_076392_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_076392_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_076966_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_076966_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_077144_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_077144_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_077642_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_077642_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_077756_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_077756_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_077927_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_077927_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_078407_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_078407_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_078579_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_078579_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_078842_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_078842_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_079376_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_079376_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_079657_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_079657_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_080169_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_080169_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_080438_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_080438_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_080674_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_080674_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_080878_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_080878_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_081299_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_081299_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_082187_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_082187_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_082301_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_082301_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_083586_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_083586_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_083696_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_083696_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_084746_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_084746_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_084865_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_084865_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_085073_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_085073_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_085321_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_085321_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_085413_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_085413_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_085645_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_085645_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_085982_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_085982_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_086499_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_086499_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_086636_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_086636_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_087216_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_087216_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_087822_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_087822_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_088054_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_088054_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_088197_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_088197_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_088627_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_088627_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_088783_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_088783_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_088939_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_088939_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_088983_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_088983_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_089491_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_089491_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_089696_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_089696_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_090398_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_090398_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_090742_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_090742_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_091038_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_091038_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_091155_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_091155_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_091900_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_091900_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_092476_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_092476_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_092850_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_092850_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_093325_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_093325_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_093572_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_093572_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_093787_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_093787_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_094185_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_094185_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_094717_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_094717_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_095561_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_095561_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_096063_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_096063_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_096624_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_096624_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_097086_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_097086_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_097447_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_097447_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_098061_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_098061_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_098400_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_098400_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_098616_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_098616_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_098862_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_098862_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_099109_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_099109_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_099368_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_099368_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_099902_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_099902_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_100300_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_100300_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_101724_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_101724_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_102379_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_102379_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_102574_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_102574_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_103075_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_103075_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_103186_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_103186_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_103367_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_103367_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_103541_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_103541_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_103856_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_103856_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_104428_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_104428_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_104857_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_104857_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_105123_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_105123_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_105296_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_105296_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_105464_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_105464_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_105724_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_105724_gtFine_labelTrainIds.png +leftImg8bit/train/hamburg/hamburg_000000_106102_leftImg8bit.png gtFine/train/hamburg/hamburg_000000_106102_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_000164_leftImg8bit.png gtFine/train/hanover/hanover_000000_000164_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_000381_leftImg8bit.png gtFine/train/hanover/hanover_000000_000381_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_000712_leftImg8bit.png gtFine/train/hanover/hanover_000000_000712_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_001173_leftImg8bit.png gtFine/train/hanover/hanover_000000_001173_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_001620_leftImg8bit.png gtFine/train/hanover/hanover_000000_001620_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_002140_leftImg8bit.png gtFine/train/hanover/hanover_000000_002140_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_002357_leftImg8bit.png gtFine/train/hanover/hanover_000000_002357_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_002458_leftImg8bit.png gtFine/train/hanover/hanover_000000_002458_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_003224_leftImg8bit.png gtFine/train/hanover/hanover_000000_003224_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_003411_leftImg8bit.png gtFine/train/hanover/hanover_000000_003411_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_003853_leftImg8bit.png gtFine/train/hanover/hanover_000000_003853_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_004230_leftImg8bit.png gtFine/train/hanover/hanover_000000_004230_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_004646_leftImg8bit.png gtFine/train/hanover/hanover_000000_004646_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_004752_leftImg8bit.png gtFine/train/hanover/hanover_000000_004752_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_005175_leftImg8bit.png gtFine/train/hanover/hanover_000000_005175_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_005288_leftImg8bit.png gtFine/train/hanover/hanover_000000_005288_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_005599_leftImg8bit.png gtFine/train/hanover/hanover_000000_005599_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_005732_leftImg8bit.png gtFine/train/hanover/hanover_000000_005732_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_005970_leftImg8bit.png gtFine/train/hanover/hanover_000000_005970_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_006355_leftImg8bit.png gtFine/train/hanover/hanover_000000_006355_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_006922_leftImg8bit.png gtFine/train/hanover/hanover_000000_006922_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_007342_leftImg8bit.png gtFine/train/hanover/hanover_000000_007342_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_007780_leftImg8bit.png gtFine/train/hanover/hanover_000000_007780_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_007897_leftImg8bit.png gtFine/train/hanover/hanover_000000_007897_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_008017_leftImg8bit.png gtFine/train/hanover/hanover_000000_008017_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_008200_leftImg8bit.png gtFine/train/hanover/hanover_000000_008200_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_009004_leftImg8bit.png gtFine/train/hanover/hanover_000000_009004_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_009128_leftImg8bit.png gtFine/train/hanover/hanover_000000_009128_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_009420_leftImg8bit.png gtFine/train/hanover/hanover_000000_009420_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_010403_leftImg8bit.png gtFine/train/hanover/hanover_000000_010403_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_010553_leftImg8bit.png gtFine/train/hanover/hanover_000000_010553_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_011170_leftImg8bit.png gtFine/train/hanover/hanover_000000_011170_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_011471_leftImg8bit.png gtFine/train/hanover/hanover_000000_011471_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_011971_leftImg8bit.png gtFine/train/hanover/hanover_000000_011971_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_012347_leftImg8bit.png gtFine/train/hanover/hanover_000000_012347_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_012675_leftImg8bit.png gtFine/train/hanover/hanover_000000_012675_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_013094_leftImg8bit.png gtFine/train/hanover/hanover_000000_013094_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_013205_leftImg8bit.png gtFine/train/hanover/hanover_000000_013205_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_013814_leftImg8bit.png gtFine/train/hanover/hanover_000000_013814_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_014319_leftImg8bit.png gtFine/train/hanover/hanover_000000_014319_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_014537_leftImg8bit.png gtFine/train/hanover/hanover_000000_014537_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_014713_leftImg8bit.png gtFine/train/hanover/hanover_000000_014713_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_014919_leftImg8bit.png gtFine/train/hanover/hanover_000000_014919_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_015587_leftImg8bit.png gtFine/train/hanover/hanover_000000_015587_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_015849_leftImg8bit.png gtFine/train/hanover/hanover_000000_015849_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_016038_leftImg8bit.png gtFine/train/hanover/hanover_000000_016038_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_016558_leftImg8bit.png gtFine/train/hanover/hanover_000000_016558_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_017041_leftImg8bit.png gtFine/train/hanover/hanover_000000_017041_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_018213_leftImg8bit.png gtFine/train/hanover/hanover_000000_018213_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_018546_leftImg8bit.png gtFine/train/hanover/hanover_000000_018546_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_018800_leftImg8bit.png gtFine/train/hanover/hanover_000000_018800_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_019116_leftImg8bit.png gtFine/train/hanover/hanover_000000_019116_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_019282_leftImg8bit.png gtFine/train/hanover/hanover_000000_019282_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_019456_leftImg8bit.png gtFine/train/hanover/hanover_000000_019456_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_019672_leftImg8bit.png gtFine/train/hanover/hanover_000000_019672_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_019938_leftImg8bit.png gtFine/train/hanover/hanover_000000_019938_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_020089_leftImg8bit.png gtFine/train/hanover/hanover_000000_020089_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_020655_leftImg8bit.png gtFine/train/hanover/hanover_000000_020655_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_021337_leftImg8bit.png gtFine/train/hanover/hanover_000000_021337_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_022645_leftImg8bit.png gtFine/train/hanover/hanover_000000_022645_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_023239_leftImg8bit.png gtFine/train/hanover/hanover_000000_023239_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_023276_leftImg8bit.png gtFine/train/hanover/hanover_000000_023276_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_023614_leftImg8bit.png gtFine/train/hanover/hanover_000000_023614_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_023881_leftImg8bit.png gtFine/train/hanover/hanover_000000_023881_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_023975_leftImg8bit.png gtFine/train/hanover/hanover_000000_023975_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_024136_leftImg8bit.png gtFine/train/hanover/hanover_000000_024136_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_024276_leftImg8bit.png gtFine/train/hanover/hanover_000000_024276_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_024441_leftImg8bit.png gtFine/train/hanover/hanover_000000_024441_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_024719_leftImg8bit.png gtFine/train/hanover/hanover_000000_024719_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_024989_leftImg8bit.png gtFine/train/hanover/hanover_000000_024989_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_025335_leftImg8bit.png gtFine/train/hanover/hanover_000000_025335_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_025437_leftImg8bit.png gtFine/train/hanover/hanover_000000_025437_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_026014_leftImg8bit.png gtFine/train/hanover/hanover_000000_026014_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_026183_leftImg8bit.png gtFine/train/hanover/hanover_000000_026183_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_026356_leftImg8bit.png gtFine/train/hanover/hanover_000000_026356_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_026743_leftImg8bit.png gtFine/train/hanover/hanover_000000_026743_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_026804_leftImg8bit.png gtFine/train/hanover/hanover_000000_026804_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027007_leftImg8bit.png gtFine/train/hanover/hanover_000000_027007_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027282_leftImg8bit.png gtFine/train/hanover/hanover_000000_027282_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027390_leftImg8bit.png gtFine/train/hanover/hanover_000000_027390_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027481_leftImg8bit.png gtFine/train/hanover/hanover_000000_027481_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027561_leftImg8bit.png gtFine/train/hanover/hanover_000000_027561_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027650_leftImg8bit.png gtFine/train/hanover/hanover_000000_027650_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027766_leftImg8bit.png gtFine/train/hanover/hanover_000000_027766_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_027998_leftImg8bit.png gtFine/train/hanover/hanover_000000_027998_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_028202_leftImg8bit.png gtFine/train/hanover/hanover_000000_028202_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_028460_leftImg8bit.png gtFine/train/hanover/hanover_000000_028460_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_029043_leftImg8bit.png gtFine/train/hanover/hanover_000000_029043_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_029325_leftImg8bit.png gtFine/train/hanover/hanover_000000_029325_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_029404_leftImg8bit.png gtFine/train/hanover/hanover_000000_029404_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_029455_leftImg8bit.png gtFine/train/hanover/hanover_000000_029455_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_029769_leftImg8bit.png gtFine/train/hanover/hanover_000000_029769_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_030276_leftImg8bit.png gtFine/train/hanover/hanover_000000_030276_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_030346_leftImg8bit.png gtFine/train/hanover/hanover_000000_030346_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_030546_leftImg8bit.png gtFine/train/hanover/hanover_000000_030546_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_030781_leftImg8bit.png gtFine/train/hanover/hanover_000000_030781_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_030889_leftImg8bit.png gtFine/train/hanover/hanover_000000_030889_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_031144_leftImg8bit.png gtFine/train/hanover/hanover_000000_031144_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_031856_leftImg8bit.png gtFine/train/hanover/hanover_000000_031856_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_032210_leftImg8bit.png gtFine/train/hanover/hanover_000000_032210_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_032351_leftImg8bit.png gtFine/train/hanover/hanover_000000_032351_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_032559_leftImg8bit.png gtFine/train/hanover/hanover_000000_032559_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_032681_leftImg8bit.png gtFine/train/hanover/hanover_000000_032681_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_033457_leftImg8bit.png gtFine/train/hanover/hanover_000000_033457_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_034015_leftImg8bit.png gtFine/train/hanover/hanover_000000_034015_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_034141_leftImg8bit.png gtFine/train/hanover/hanover_000000_034141_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_034347_leftImg8bit.png gtFine/train/hanover/hanover_000000_034347_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_034560_leftImg8bit.png gtFine/train/hanover/hanover_000000_034560_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_034720_leftImg8bit.png gtFine/train/hanover/hanover_000000_034720_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_034935_leftImg8bit.png gtFine/train/hanover/hanover_000000_034935_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_035491_leftImg8bit.png gtFine/train/hanover/hanover_000000_035491_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_035606_leftImg8bit.png gtFine/train/hanover/hanover_000000_035606_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_035768_leftImg8bit.png gtFine/train/hanover/hanover_000000_035768_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_036051_leftImg8bit.png gtFine/train/hanover/hanover_000000_036051_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_036562_leftImg8bit.png gtFine/train/hanover/hanover_000000_036562_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_037039_leftImg8bit.png gtFine/train/hanover/hanover_000000_037039_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_037298_leftImg8bit.png gtFine/train/hanover/hanover_000000_037298_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_037516_leftImg8bit.png gtFine/train/hanover/hanover_000000_037516_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_038773_leftImg8bit.png gtFine/train/hanover/hanover_000000_038773_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_038855_leftImg8bit.png gtFine/train/hanover/hanover_000000_038855_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_038927_leftImg8bit.png gtFine/train/hanover/hanover_000000_038927_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_039021_leftImg8bit.png gtFine/train/hanover/hanover_000000_039021_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_039470_leftImg8bit.png gtFine/train/hanover/hanover_000000_039470_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_040051_leftImg8bit.png gtFine/train/hanover/hanover_000000_040051_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_040133_leftImg8bit.png gtFine/train/hanover/hanover_000000_040133_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_040221_leftImg8bit.png gtFine/train/hanover/hanover_000000_040221_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_040294_leftImg8bit.png gtFine/train/hanover/hanover_000000_040294_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_040456_leftImg8bit.png gtFine/train/hanover/hanover_000000_040456_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_040793_leftImg8bit.png gtFine/train/hanover/hanover_000000_040793_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_041232_leftImg8bit.png gtFine/train/hanover/hanover_000000_041232_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_041493_leftImg8bit.png gtFine/train/hanover/hanover_000000_041493_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_041610_leftImg8bit.png gtFine/train/hanover/hanover_000000_041610_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_042255_leftImg8bit.png gtFine/train/hanover/hanover_000000_042255_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_042382_leftImg8bit.png gtFine/train/hanover/hanover_000000_042382_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_042581_leftImg8bit.png gtFine/train/hanover/hanover_000000_042581_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_042770_leftImg8bit.png gtFine/train/hanover/hanover_000000_042770_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_042992_leftImg8bit.png gtFine/train/hanover/hanover_000000_042992_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_043102_leftImg8bit.png gtFine/train/hanover/hanover_000000_043102_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_043236_leftImg8bit.png gtFine/train/hanover/hanover_000000_043236_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_043550_leftImg8bit.png gtFine/train/hanover/hanover_000000_043550_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_043653_leftImg8bit.png gtFine/train/hanover/hanover_000000_043653_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_043822_leftImg8bit.png gtFine/train/hanover/hanover_000000_043822_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_044085_leftImg8bit.png gtFine/train/hanover/hanover_000000_044085_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_044195_leftImg8bit.png gtFine/train/hanover/hanover_000000_044195_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_044344_leftImg8bit.png gtFine/train/hanover/hanover_000000_044344_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_044622_leftImg8bit.png gtFine/train/hanover/hanover_000000_044622_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_045004_leftImg8bit.png gtFine/train/hanover/hanover_000000_045004_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_045188_leftImg8bit.png gtFine/train/hanover/hanover_000000_045188_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_045446_leftImg8bit.png gtFine/train/hanover/hanover_000000_045446_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_045657_leftImg8bit.png gtFine/train/hanover/hanover_000000_045657_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_045841_leftImg8bit.png gtFine/train/hanover/hanover_000000_045841_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png gtFine/train/hanover/hanover_000000_046200_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_046398_leftImg8bit.png gtFine/train/hanover/hanover_000000_046398_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_046572_leftImg8bit.png gtFine/train/hanover/hanover_000000_046572_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_046646_leftImg8bit.png gtFine/train/hanover/hanover_000000_046646_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_046732_leftImg8bit.png gtFine/train/hanover/hanover_000000_046732_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_046954_leftImg8bit.png gtFine/train/hanover/hanover_000000_046954_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_047499_leftImg8bit.png gtFine/train/hanover/hanover_000000_047499_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_047629_leftImg8bit.png gtFine/train/hanover/hanover_000000_047629_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_047870_leftImg8bit.png gtFine/train/hanover/hanover_000000_047870_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_048274_leftImg8bit.png gtFine/train/hanover/hanover_000000_048274_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_048379_leftImg8bit.png gtFine/train/hanover/hanover_000000_048379_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_048508_leftImg8bit.png gtFine/train/hanover/hanover_000000_048508_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_048765_leftImg8bit.png gtFine/train/hanover/hanover_000000_048765_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_049005_leftImg8bit.png gtFine/train/hanover/hanover_000000_049005_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_049269_leftImg8bit.png gtFine/train/hanover/hanover_000000_049269_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_049465_leftImg8bit.png gtFine/train/hanover/hanover_000000_049465_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_050228_leftImg8bit.png gtFine/train/hanover/hanover_000000_050228_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_050398_leftImg8bit.png gtFine/train/hanover/hanover_000000_050398_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_051059_leftImg8bit.png gtFine/train/hanover/hanover_000000_051059_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_051152_leftImg8bit.png gtFine/train/hanover/hanover_000000_051152_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_051271_leftImg8bit.png gtFine/train/hanover/hanover_000000_051271_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_051536_leftImg8bit.png gtFine/train/hanover/hanover_000000_051536_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_051842_leftImg8bit.png gtFine/train/hanover/hanover_000000_051842_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_052013_leftImg8bit.png gtFine/train/hanover/hanover_000000_052013_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_052512_leftImg8bit.png gtFine/train/hanover/hanover_000000_052512_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_052649_leftImg8bit.png gtFine/train/hanover/hanover_000000_052649_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_052729_leftImg8bit.png gtFine/train/hanover/hanover_000000_052729_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_052887_leftImg8bit.png gtFine/train/hanover/hanover_000000_052887_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_053027_leftImg8bit.png gtFine/train/hanover/hanover_000000_053027_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_053437_leftImg8bit.png gtFine/train/hanover/hanover_000000_053437_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_053604_leftImg8bit.png gtFine/train/hanover/hanover_000000_053604_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_054276_leftImg8bit.png gtFine/train/hanover/hanover_000000_054276_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_054965_leftImg8bit.png gtFine/train/hanover/hanover_000000_054965_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_055124_leftImg8bit.png gtFine/train/hanover/hanover_000000_055124_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_055592_leftImg8bit.png gtFine/train/hanover/hanover_000000_055592_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_055800_leftImg8bit.png gtFine/train/hanover/hanover_000000_055800_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_055937_leftImg8bit.png gtFine/train/hanover/hanover_000000_055937_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_056142_leftImg8bit.png gtFine/train/hanover/hanover_000000_056142_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_056361_leftImg8bit.png gtFine/train/hanover/hanover_000000_056361_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_056457_leftImg8bit.png gtFine/train/hanover/hanover_000000_056457_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_056601_leftImg8bit.png gtFine/train/hanover/hanover_000000_056601_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_056800_leftImg8bit.png gtFine/train/hanover/hanover_000000_056800_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_057532_leftImg8bit.png gtFine/train/hanover/hanover_000000_057532_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_057710_leftImg8bit.png gtFine/train/hanover/hanover_000000_057710_gtFine_labelTrainIds.png +leftImg8bit/train/hanover/hanover_000000_058189_leftImg8bit.png gtFine/train/hanover/hanover_000000_058189_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000000_000019_leftImg8bit.png gtFine/train/jena/jena_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000001_000019_leftImg8bit.png gtFine/train/jena/jena_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000002_000019_leftImg8bit.png gtFine/train/jena/jena_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000003_000019_leftImg8bit.png gtFine/train/jena/jena_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000004_000019_leftImg8bit.png gtFine/train/jena/jena_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000005_000019_leftImg8bit.png gtFine/train/jena/jena_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000006_000019_leftImg8bit.png gtFine/train/jena/jena_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000007_000019_leftImg8bit.png gtFine/train/jena/jena_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000008_000019_leftImg8bit.png gtFine/train/jena/jena_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000009_000019_leftImg8bit.png gtFine/train/jena/jena_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000010_000019_leftImg8bit.png gtFine/train/jena/jena_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000011_000019_leftImg8bit.png gtFine/train/jena/jena_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000012_000019_leftImg8bit.png gtFine/train/jena/jena_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000013_000019_leftImg8bit.png gtFine/train/jena/jena_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000014_000019_leftImg8bit.png gtFine/train/jena/jena_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000015_000019_leftImg8bit.png gtFine/train/jena/jena_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000016_000019_leftImg8bit.png gtFine/train/jena/jena_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000017_000019_leftImg8bit.png gtFine/train/jena/jena_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000018_000019_leftImg8bit.png gtFine/train/jena/jena_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000019_000019_leftImg8bit.png gtFine/train/jena/jena_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000020_000019_leftImg8bit.png gtFine/train/jena/jena_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000021_000019_leftImg8bit.png gtFine/train/jena/jena_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000022_000019_leftImg8bit.png gtFine/train/jena/jena_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000023_000019_leftImg8bit.png gtFine/train/jena/jena_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000024_000019_leftImg8bit.png gtFine/train/jena/jena_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000025_000019_leftImg8bit.png gtFine/train/jena/jena_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000026_000019_leftImg8bit.png gtFine/train/jena/jena_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000027_000019_leftImg8bit.png gtFine/train/jena/jena_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000028_000019_leftImg8bit.png gtFine/train/jena/jena_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000029_000019_leftImg8bit.png gtFine/train/jena/jena_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000030_000019_leftImg8bit.png gtFine/train/jena/jena_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000031_000019_leftImg8bit.png gtFine/train/jena/jena_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000032_000019_leftImg8bit.png gtFine/train/jena/jena_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000033_000019_leftImg8bit.png gtFine/train/jena/jena_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000034_000019_leftImg8bit.png gtFine/train/jena/jena_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000035_000019_leftImg8bit.png gtFine/train/jena/jena_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000036_000019_leftImg8bit.png gtFine/train/jena/jena_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000037_000019_leftImg8bit.png gtFine/train/jena/jena_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000038_000019_leftImg8bit.png gtFine/train/jena/jena_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000039_000019_leftImg8bit.png gtFine/train/jena/jena_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000040_000019_leftImg8bit.png gtFine/train/jena/jena_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000041_000019_leftImg8bit.png gtFine/train/jena/jena_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000042_000019_leftImg8bit.png gtFine/train/jena/jena_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000043_000019_leftImg8bit.png gtFine/train/jena/jena_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000044_000019_leftImg8bit.png gtFine/train/jena/jena_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000045_000019_leftImg8bit.png gtFine/train/jena/jena_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000046_000019_leftImg8bit.png gtFine/train/jena/jena_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000047_000019_leftImg8bit.png gtFine/train/jena/jena_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000048_000019_leftImg8bit.png gtFine/train/jena/jena_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000049_000019_leftImg8bit.png gtFine/train/jena/jena_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000050_000019_leftImg8bit.png gtFine/train/jena/jena_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000051_000019_leftImg8bit.png gtFine/train/jena/jena_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000052_000019_leftImg8bit.png gtFine/train/jena/jena_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000053_000019_leftImg8bit.png gtFine/train/jena/jena_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000054_000019_leftImg8bit.png gtFine/train/jena/jena_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000055_000019_leftImg8bit.png gtFine/train/jena/jena_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000056_000019_leftImg8bit.png gtFine/train/jena/jena_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000057_000019_leftImg8bit.png gtFine/train/jena/jena_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000058_000019_leftImg8bit.png gtFine/train/jena/jena_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000059_000019_leftImg8bit.png gtFine/train/jena/jena_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000060_000019_leftImg8bit.png gtFine/train/jena/jena_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000061_000019_leftImg8bit.png gtFine/train/jena/jena_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000062_000019_leftImg8bit.png gtFine/train/jena/jena_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000063_000019_leftImg8bit.png gtFine/train/jena/jena_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000064_000019_leftImg8bit.png gtFine/train/jena/jena_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000065_000019_leftImg8bit.png gtFine/train/jena/jena_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000066_000019_leftImg8bit.png gtFine/train/jena/jena_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000067_000019_leftImg8bit.png gtFine/train/jena/jena_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000068_000019_leftImg8bit.png gtFine/train/jena/jena_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000069_000019_leftImg8bit.png gtFine/train/jena/jena_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000070_000019_leftImg8bit.png gtFine/train/jena/jena_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000071_000019_leftImg8bit.png gtFine/train/jena/jena_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000072_000019_leftImg8bit.png gtFine/train/jena/jena_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000073_000019_leftImg8bit.png gtFine/train/jena/jena_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000074_000019_leftImg8bit.png gtFine/train/jena/jena_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000075_000019_leftImg8bit.png gtFine/train/jena/jena_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000076_000019_leftImg8bit.png gtFine/train/jena/jena_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000077_000019_leftImg8bit.png gtFine/train/jena/jena_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000078_000019_leftImg8bit.png gtFine/train/jena/jena_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000079_000019_leftImg8bit.png gtFine/train/jena/jena_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000080_000019_leftImg8bit.png gtFine/train/jena/jena_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000081_000019_leftImg8bit.png gtFine/train/jena/jena_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000082_000019_leftImg8bit.png gtFine/train/jena/jena_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000083_000019_leftImg8bit.png gtFine/train/jena/jena_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000084_000019_leftImg8bit.png gtFine/train/jena/jena_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000085_000019_leftImg8bit.png gtFine/train/jena/jena_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000086_000019_leftImg8bit.png gtFine/train/jena/jena_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000087_000019_leftImg8bit.png gtFine/train/jena/jena_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000088_000019_leftImg8bit.png gtFine/train/jena/jena_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000089_000019_leftImg8bit.png gtFine/train/jena/jena_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000090_000019_leftImg8bit.png gtFine/train/jena/jena_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000091_000019_leftImg8bit.png gtFine/train/jena/jena_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000092_000019_leftImg8bit.png gtFine/train/jena/jena_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000093_000019_leftImg8bit.png gtFine/train/jena/jena_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000094_000019_leftImg8bit.png gtFine/train/jena/jena_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000095_000019_leftImg8bit.png gtFine/train/jena/jena_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000096_000019_leftImg8bit.png gtFine/train/jena/jena_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000097_000019_leftImg8bit.png gtFine/train/jena/jena_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000098_000019_leftImg8bit.png gtFine/train/jena/jena_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000099_000019_leftImg8bit.png gtFine/train/jena/jena_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000100_000019_leftImg8bit.png gtFine/train/jena/jena_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000101_000019_leftImg8bit.png gtFine/train/jena/jena_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000102_000019_leftImg8bit.png gtFine/train/jena/jena_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000103_000019_leftImg8bit.png gtFine/train/jena/jena_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000104_000019_leftImg8bit.png gtFine/train/jena/jena_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000105_000019_leftImg8bit.png gtFine/train/jena/jena_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000106_000019_leftImg8bit.png gtFine/train/jena/jena_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000107_000019_leftImg8bit.png gtFine/train/jena/jena_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000108_000019_leftImg8bit.png gtFine/train/jena/jena_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000109_000019_leftImg8bit.png gtFine/train/jena/jena_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000110_000019_leftImg8bit.png gtFine/train/jena/jena_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000111_000019_leftImg8bit.png gtFine/train/jena/jena_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000112_000019_leftImg8bit.png gtFine/train/jena/jena_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000113_000019_leftImg8bit.png gtFine/train/jena/jena_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000114_000019_leftImg8bit.png gtFine/train/jena/jena_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000115_000019_leftImg8bit.png gtFine/train/jena/jena_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000116_000019_leftImg8bit.png gtFine/train/jena/jena_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000117_000019_leftImg8bit.png gtFine/train/jena/jena_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/jena/jena_000118_000019_leftImg8bit.png gtFine/train/jena/jena_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_000108_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_000108_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_000316_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_000316_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_000442_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_000442_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_000926_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_000926_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_001566_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_001566_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_001908_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_001908_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_002083_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_002083_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_003096_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_003096_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_003707_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_003707_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_003937_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_003937_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_004447_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_004447_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_004608_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_004608_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_005252_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_005252_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_005503_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_005503_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_006274_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_006274_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_006686_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_006686_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_007325_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_007325_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_008239_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_008239_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_008305_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_008305_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_008584_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_008584_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_009404_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_009404_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_009574_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_009574_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_009926_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_009926_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_010160_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_010160_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_010329_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_010329_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_010653_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_010653_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_011483_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_011483_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_011655_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_011655_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_012353_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_012353_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_012505_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_012505_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_013139_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_013139_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_013257_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_013257_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_013766_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_013766_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_014146_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_014146_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_014673_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_014673_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_014886_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_014886_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_015116_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_015116_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_015494_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_015494_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_015687_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_015687_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_015868_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_015868_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_016342_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_016342_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_016863_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_016863_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_017042_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_017042_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_017342_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_017342_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_017489_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_017489_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_018004_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_018004_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_018514_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_018514_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_018747_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_018747_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_018866_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_018866_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_019125_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_019125_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_019697_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_019697_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_019791_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_019791_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_020033_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_020033_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_020334_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_020334_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_020624_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_020624_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_020873_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_020873_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_020933_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_020933_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_021000_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_021000_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_021222_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_021222_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_021553_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_021553_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_021814_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_021814_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_022162_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_022162_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_023143_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_023143_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_023338_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_023338_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_023510_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_023510_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_023698_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_023698_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_024276_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_024276_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_024362_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_024362_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_024604_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_024604_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_024921_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_024921_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_025434_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_025434_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_025812_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_025812_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_026269_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_026269_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_026580_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_026580_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_026919_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_026919_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_027075_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_027075_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_027596_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_027596_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_027954_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_027954_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_028378_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_028378_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_028638_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_028638_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_029050_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_029050_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_029704_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_029704_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_030111_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_030111_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_030221_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_030221_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_030400_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_030400_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_030560_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_030560_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_030701_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_030701_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_031257_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_031257_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_032390_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_032390_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_032614_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_032614_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_032845_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_032845_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_033478_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_033478_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_034156_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_034156_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_034231_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_034231_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_034389_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_034389_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_034686_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_034686_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_035124_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_035124_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_035398_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_035398_gtFine_labelTrainIds.png +leftImg8bit/train/krefeld/krefeld_000000_036299_leftImg8bit.png gtFine/train/krefeld/krefeld_000000_036299_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_000076_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_000076_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_000383_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_000383_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_001068_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_001068_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_001294_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_001294_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_002255_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_002255_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_002478_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_002478_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_002972_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_002972_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_003442_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_003442_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_004580_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_004580_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_005138_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_005138_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_005686_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_005686_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_005876_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_005876_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_006169_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_006169_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_006518_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_006518_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_007098_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_007098_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_007695_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_007695_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_007851_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_007851_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_009191_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_009191_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_009615_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_009615_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_009690_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_009690_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_009930_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_009930_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_010280_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_010280_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_010505_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_010505_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_010733_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_010733_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_010860_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_010860_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_011383_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_011383_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_012376_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_012376_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_012672_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_012672_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_013228_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_013228_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_013352_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_013352_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_014685_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_014685_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_015126_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_015126_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_015561_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_015561_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_015685_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_015685_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_015928_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_015928_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_017950_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_017950_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_018114_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_018114_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_018294_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_018294_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_018445_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_018445_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_018575_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_018575_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_018720_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_018720_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_019142_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_019142_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_019500_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_019500_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_019682_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_019682_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_019901_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_019901_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_020303_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_020303_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_020596_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_020596_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_020856_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_020856_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_021104_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_021104_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_021663_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_021663_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_022361_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_022361_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_022748_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_022748_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_023052_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_023052_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_023375_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_023375_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_023489_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_023489_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_023856_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_023856_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_024243_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_024243_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_024637_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_024637_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_024964_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_024964_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_025215_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_025215_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_026006_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_026006_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_026305_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_026305_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_026602_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_026602_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_026908_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_026908_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_027628_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_027628_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_028216_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_028216_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_028563_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_028563_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_028883_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_028883_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_029240_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_029240_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_029526_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_029526_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_030010_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_030010_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_030662_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_030662_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_031005_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_031005_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_031360_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_031360_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_031623_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_031623_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_032540_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_032540_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_033454_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_033454_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_033683_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_033683_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_034302_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_034302_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_034621_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_034621_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_034930_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_034930_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_035083_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_035083_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_035364_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_035364_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_035650_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_035650_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_035718_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_035718_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000000_036139_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000000_036139_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_000054_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_000054_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_000168_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_000168_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_000537_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_000537_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_000876_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_000876_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_001531_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_001531_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_001936_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_001936_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_002229_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_002229_gtFine_labelTrainIds.png +leftImg8bit/train/monchengladbach/monchengladbach_000001_002353_leftImg8bit.png gtFine/train/monchengladbach/monchengladbach_000001_002353_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_000065_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_000065_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_000295_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_000295_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_000751_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_000751_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_001278_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_001278_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_002183_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_002183_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_002553_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_002553_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_003632_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_003632_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_003846_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_003846_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_004112_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_004112_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_004248_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_004248_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_004383_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_004383_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_004660_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_004660_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_004951_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_004951_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_005249_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_005249_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_005912_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_005912_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_005995_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_005995_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_006106_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_006106_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_006264_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_006264_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_006483_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_006483_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_006621_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_006621_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_006995_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_006995_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_007441_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_007441_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_007727_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_007727_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_007813_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_007813_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_008603_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_008603_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_008677_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_008677_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_008784_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_008784_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_009110_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_009110_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_009619_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_009619_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_010049_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_010049_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_010372_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_010372_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_010816_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_010816_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_011225_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_011225_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_011880_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_011880_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_012070_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_012070_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_012934_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_012934_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_013223_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_013223_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_013322_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_013322_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_013574_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_013574_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_013654_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_013654_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_013863_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_013863_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_013944_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_013944_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014066_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014066_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014101_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014101_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014235_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014235_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014416_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014416_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014503_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014503_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014584_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014584_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014743_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014743_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_014931_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_014931_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_015131_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_015131_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_015506_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_015506_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_015602_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_015602_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_015764_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_015764_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_016024_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_016024_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_016247_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_016247_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_016311_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_016311_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_016436_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_016436_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_017044_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_017044_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_017081_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_017081_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_017159_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_017159_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_017283_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_017283_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_017450_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_017450_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_017593_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_017593_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_017761_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_017761_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_018153_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_018153_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_018358_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_018358_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_018616_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_018616_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_018874_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_018874_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_019050_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_019050_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_019229_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_019229_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_019355_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_019355_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_019617_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_019617_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_019891_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_019891_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_020432_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_020432_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_020653_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_020653_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_021231_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_021231_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_021651_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_021651_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_022067_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_022067_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_022489_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_022489_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_023064_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_023064_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_023694_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_023694_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_023854_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_023854_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_024179_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_024179_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_024945_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_024945_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_025089_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_025089_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_025268_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_025268_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_025351_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_025351_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_025491_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_025491_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_025772_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_025772_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_025907_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_025907_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_026316_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_026316_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_026575_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_026575_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_026611_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_026611_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_026741_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_026741_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_026882_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_026882_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_026998_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_026998_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_027156_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_027156_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_027233_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_027233_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_027771_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_027771_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_028240_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_028240_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_028556_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_028556_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_028628_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_028628_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_028822_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_028822_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_028912_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_028912_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029020_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029020_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029051_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029051_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029179_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029179_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029281_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029281_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029339_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029339_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029400_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029400_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029481_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029481_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029577_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029577_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029729_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029729_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029839_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029839_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_029915_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_029915_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_030017_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_030017_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_030122_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_030122_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_030324_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_030324_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_030435_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_030435_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_030706_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_030706_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_030941_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_030941_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_031067_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_031067_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_031223_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_031223_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_031323_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_031323_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_031602_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_031602_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_032186_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_032186_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_032346_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_032346_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_032962_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_032962_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_033062_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_033062_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_033129_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_033129_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_033425_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_033425_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_033747_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_033747_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_033838_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_033838_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_034040_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_034040_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_034097_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_034097_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_034387_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_034387_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_034652_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_034652_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_035008_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_035008_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_035255_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_035255_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_035571_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_035571_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_035713_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_035713_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_035942_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_035942_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000000_036016_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000000_036016_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_000113_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_000113_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_000508_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_000508_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_000710_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_000710_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_000778_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_000778_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_001072_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_001072_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_001449_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_001449_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_001722_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_001722_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_001901_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_001901_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_002081_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_002081_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_002216_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_002216_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_002354_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_002354_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_002519_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_002519_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_002644_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_002644_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_002949_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_002949_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_003159_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_003159_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_003489_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_003489_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_003676_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_003676_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_003991_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_003991_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_004106_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_004106_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_004260_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_004260_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_004745_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_004745_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_004983_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_004983_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_005219_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_005219_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_005289_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_005289_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_005666_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_005666_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_005876_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_005876_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_006153_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_006153_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_006386_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_006386_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_006562_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_006562_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_006916_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_006916_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_007148_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_007148_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_007524_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_007524_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_007657_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_007657_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_007864_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_007864_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_008310_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_008310_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_008576_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_008576_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_008771_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_008771_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_009097_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_009097_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_009246_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_009246_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_009333_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_009333_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_009471_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_009471_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_009618_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_009618_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_009795_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_009795_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_010162_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_010162_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_010445_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_010445_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_010640_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_010640_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_010755_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_010755_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_011617_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_011617_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_011775_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_011775_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_011990_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_011990_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_012956_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_012956_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_013266_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_013266_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_013767_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_013767_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_013914_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_013914_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_014033_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_014033_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_014258_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_014258_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_014629_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_014629_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_015220_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_015220_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_015605_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_015605_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_015974_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_015974_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_016253_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_016253_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_016376_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_016376_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_016481_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_016481_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_016681_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_016681_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_017469_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_017469_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_017540_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_017540_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_017675_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_017675_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_017844_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_017844_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_018155_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_018155_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_018432_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_018432_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_018742_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_018742_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_018872_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_018872_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_019247_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_019247_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_019698_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_019698_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_020904_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_020904_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_020956_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_020956_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_021951_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_021951_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_022151_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_022151_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_022363_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_022363_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_022560_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_022560_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_022836_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_022836_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_023271_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_023271_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_023515_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_023515_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_024152_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_024152_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_024379_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_024379_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_024701_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_024701_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_025426_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_025426_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_025833_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_025833_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_026106_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_026106_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_026355_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_026355_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_026606_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_026606_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_026856_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_026856_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_027097_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_027097_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_028379_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_028379_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_028852_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_028852_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_029178_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_029178_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_029696_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_029696_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_029980_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_029980_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_030120_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_030120_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_030269_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_030269_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_030539_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_030539_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_030725_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_030725_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_030839_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_030839_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_030997_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_030997_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_031116_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_031116_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_031272_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_031272_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_031427_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_031427_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_031582_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_031582_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_031683_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_031683_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_031976_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_031976_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_032315_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_032315_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_032660_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_032660_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_033027_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_033027_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_033448_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_033448_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_033925_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_033925_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_034375_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_034375_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_034494_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_034494_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_034633_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_034633_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_034923_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_034923_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_035276_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_035276_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_035562_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_035562_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_035689_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_035689_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_036232_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_036232_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_036480_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_036480_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_036697_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_036697_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_036937_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_036937_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_037090_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_037090_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_037645_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_037645_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_037776_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_037776_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_037906_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_037906_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_038281_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_038281_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_039114_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_039114_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_039231_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_039231_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_039374_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_039374_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_039446_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_039446_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_039558_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_039558_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_039703_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_039703_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_040564_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_040564_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_040620_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_040620_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_040761_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_040761_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_040981_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_040981_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_041215_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_041215_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_042235_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_042235_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_042309_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_042309_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_042434_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_042434_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_042558_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_042558_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_042869_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_042869_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_043080_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_043080_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_043748_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_043748_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_043886_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_043886_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_044219_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_044219_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_045135_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_045135_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_045481_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_045481_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_045880_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_045880_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_046324_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_046324_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_047336_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_047336_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_047619_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_047619_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_047702_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_047702_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_047755_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_047755_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_047955_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_047955_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_048121_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_048121_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_048605_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_048605_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_049143_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_049143_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_049399_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_049399_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_049776_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_049776_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_049977_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_049977_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_050098_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_050098_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_051134_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_051134_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_051317_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_051317_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_051448_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_051448_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_051574_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_051574_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_051661_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_051661_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_051877_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_051877_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_051934_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_051934_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052050_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052050_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052198_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052198_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052297_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052297_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052430_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052430_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052497_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052497_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052544_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052544_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052840_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052840_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_052979_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_052979_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_053222_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_053222_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_053579_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_053579_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_053976_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_053976_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_054275_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_054275_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_054639_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_054639_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_055273_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_055273_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_055698_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_055698_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_055860_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_055860_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_055934_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_055934_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_056142_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_056142_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_056330_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_056330_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_056857_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_056857_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_057129_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_057129_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_057191_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_057191_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_057517_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_057517_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_057811_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_057811_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_057930_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_057930_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_058105_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_058105_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_058373_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_058373_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_058954_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_058954_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_059433_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_059433_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_059675_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_059675_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_059914_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_059914_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_060061_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_060061_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_060173_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_060173_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_060821_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_060821_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_061285_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_061285_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_061384_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_061384_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_061472_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_061472_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_061685_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_061685_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_062362_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_062362_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_062542_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_062542_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_062691_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_062691_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_063385_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_063385_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_063808_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_063808_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_064224_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_064224_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_064393_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_064393_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_065214_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_065214_gtFine_labelTrainIds.png +leftImg8bit/train/strasbourg/strasbourg_000001_065572_leftImg8bit.png gtFine/train/strasbourg/strasbourg_000001_065572_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000000_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000001_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000002_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000003_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000004_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000005_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000006_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000007_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000008_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000009_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000010_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000011_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000012_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000013_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000014_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000015_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000016_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000017_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000018_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000019_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000020_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000021_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000022_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000023_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000024_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000025_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000026_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000027_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000028_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000029_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000030_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000031_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000032_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000033_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000034_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000035_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000036_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000037_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000038_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000039_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000040_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000041_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000042_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000043_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000044_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000045_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000046_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000047_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000048_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000049_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000050_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000051_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000052_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000053_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000054_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000055_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000056_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000057_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000058_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000059_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000060_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000061_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000062_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000063_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000064_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000065_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000066_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000067_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000068_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000069_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000070_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000071_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000072_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000073_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000074_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000075_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000076_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000077_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000078_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000079_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000080_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000081_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000082_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000083_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000084_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000085_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000086_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000087_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000088_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000089_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000090_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000091_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000092_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000093_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000094_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000095_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000096_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000097_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000098_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000099_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000100_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000101_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000102_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000103_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000104_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000105_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000106_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000107_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000108_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000109_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000110_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000111_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000112_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000113_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000114_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000115_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000116_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000117_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000118_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000119_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000120_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000121_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000122_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000123_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000124_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000125_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000126_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000127_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000128_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000129_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000130_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000131_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000132_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000133_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000134_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000135_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000136_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000137_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000138_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000139_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000140_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000141_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000142_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000143_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000144_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000145_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000146_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000147_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000148_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000149_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000150_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000151_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000152_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000153_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000154_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000154_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000155_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000155_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000156_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000156_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000157_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000157_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000158_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000158_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000159_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000159_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000160_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000160_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000161_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000161_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000162_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000162_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000163_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000163_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000164_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000164_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000165_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000165_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000166_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000166_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000167_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000167_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000168_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000168_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000169_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000169_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000170_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000170_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000171_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000171_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000172_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000172_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000173_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000173_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000174_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000174_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000175_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000175_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000176_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000176_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000177_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000177_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000178_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000178_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000179_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000179_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000180_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000180_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000181_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000181_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000182_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000182_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000183_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000183_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000184_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000184_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000185_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000185_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000186_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000186_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000187_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000187_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000188_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000188_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000189_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000189_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000190_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000190_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000191_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000191_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000192_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000192_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000193_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000193_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000194_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000194_000019_gtFine_labelTrainIds.png +leftImg8bit/train/stuttgart/stuttgart_000195_000019_leftImg8bit.png gtFine/train/stuttgart/stuttgart_000195_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000000_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000001_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000002_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000003_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000004_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000005_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000006_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000007_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000008_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000009_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000010_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000011_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000012_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000013_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000014_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000015_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000016_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000017_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000018_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000019_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000020_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000021_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000022_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000023_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000024_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000025_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000026_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000027_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000028_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000029_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000030_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000031_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000032_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000033_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000034_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000035_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000036_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000037_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000038_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000039_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000040_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000041_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000042_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000043_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000044_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000045_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000046_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000047_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000048_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000049_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000050_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000051_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000052_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000053_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000054_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000055_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000056_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000057_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000058_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000059_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000060_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000061_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000062_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000063_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000064_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000065_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000066_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000067_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000068_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000069_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000070_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000071_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000072_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000073_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000074_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000075_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000076_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000077_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000078_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000079_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000080_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000081_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000082_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000083_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000084_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000085_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000086_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000087_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000088_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000089_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000090_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000091_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000092_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000093_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000094_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000095_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000096_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000097_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000098_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000099_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000100_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000101_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000102_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000103_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000104_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000105_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000106_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000107_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000108_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000109_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000110_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000111_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000112_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000113_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000114_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000115_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000116_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000117_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000118_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000119_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000120_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000121_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000122_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000123_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000124_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000125_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000126_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000127_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000128_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000129_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000130_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000131_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000132_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000133_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000134_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000135_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000136_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000137_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000138_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000139_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000140_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000141_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000142_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/train/tubingen/tubingen_000143_000019_leftImg8bit.png gtFine/train/tubingen/tubingen_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000000_000019_leftImg8bit.png gtFine/train/ulm/ulm_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000001_000019_leftImg8bit.png gtFine/train/ulm/ulm_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000002_000019_leftImg8bit.png gtFine/train/ulm/ulm_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000003_000019_leftImg8bit.png gtFine/train/ulm/ulm_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000004_000019_leftImg8bit.png gtFine/train/ulm/ulm_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000005_000019_leftImg8bit.png gtFine/train/ulm/ulm_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000006_000019_leftImg8bit.png gtFine/train/ulm/ulm_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000007_000019_leftImg8bit.png gtFine/train/ulm/ulm_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000008_000019_leftImg8bit.png gtFine/train/ulm/ulm_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000009_000019_leftImg8bit.png gtFine/train/ulm/ulm_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000010_000019_leftImg8bit.png gtFine/train/ulm/ulm_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000011_000019_leftImg8bit.png gtFine/train/ulm/ulm_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000012_000019_leftImg8bit.png gtFine/train/ulm/ulm_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000013_000019_leftImg8bit.png gtFine/train/ulm/ulm_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000014_000019_leftImg8bit.png gtFine/train/ulm/ulm_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000015_000019_leftImg8bit.png gtFine/train/ulm/ulm_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000016_000019_leftImg8bit.png gtFine/train/ulm/ulm_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000017_000019_leftImg8bit.png gtFine/train/ulm/ulm_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000018_000019_leftImg8bit.png gtFine/train/ulm/ulm_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000019_000019_leftImg8bit.png gtFine/train/ulm/ulm_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000020_000019_leftImg8bit.png gtFine/train/ulm/ulm_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000021_000019_leftImg8bit.png gtFine/train/ulm/ulm_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000022_000019_leftImg8bit.png gtFine/train/ulm/ulm_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000023_000019_leftImg8bit.png gtFine/train/ulm/ulm_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000024_000019_leftImg8bit.png gtFine/train/ulm/ulm_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000025_000019_leftImg8bit.png gtFine/train/ulm/ulm_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000026_000019_leftImg8bit.png gtFine/train/ulm/ulm_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000027_000019_leftImg8bit.png gtFine/train/ulm/ulm_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000028_000019_leftImg8bit.png gtFine/train/ulm/ulm_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000029_000019_leftImg8bit.png gtFine/train/ulm/ulm_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000030_000019_leftImg8bit.png gtFine/train/ulm/ulm_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000031_000019_leftImg8bit.png gtFine/train/ulm/ulm_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000032_000019_leftImg8bit.png gtFine/train/ulm/ulm_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000033_000019_leftImg8bit.png gtFine/train/ulm/ulm_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000034_000019_leftImg8bit.png gtFine/train/ulm/ulm_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000035_000019_leftImg8bit.png gtFine/train/ulm/ulm_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000036_000019_leftImg8bit.png gtFine/train/ulm/ulm_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000037_000019_leftImg8bit.png gtFine/train/ulm/ulm_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000038_000019_leftImg8bit.png gtFine/train/ulm/ulm_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000039_000019_leftImg8bit.png gtFine/train/ulm/ulm_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000040_000019_leftImg8bit.png gtFine/train/ulm/ulm_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000041_000019_leftImg8bit.png gtFine/train/ulm/ulm_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000042_000019_leftImg8bit.png gtFine/train/ulm/ulm_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000043_000019_leftImg8bit.png gtFine/train/ulm/ulm_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000044_000019_leftImg8bit.png gtFine/train/ulm/ulm_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000045_000019_leftImg8bit.png gtFine/train/ulm/ulm_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000046_000019_leftImg8bit.png gtFine/train/ulm/ulm_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000047_000019_leftImg8bit.png gtFine/train/ulm/ulm_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000048_000019_leftImg8bit.png gtFine/train/ulm/ulm_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000049_000019_leftImg8bit.png gtFine/train/ulm/ulm_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000050_000019_leftImg8bit.png gtFine/train/ulm/ulm_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000051_000019_leftImg8bit.png gtFine/train/ulm/ulm_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000052_000019_leftImg8bit.png gtFine/train/ulm/ulm_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000053_000019_leftImg8bit.png gtFine/train/ulm/ulm_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000054_000019_leftImg8bit.png gtFine/train/ulm/ulm_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000055_000019_leftImg8bit.png gtFine/train/ulm/ulm_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000056_000019_leftImg8bit.png gtFine/train/ulm/ulm_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000057_000019_leftImg8bit.png gtFine/train/ulm/ulm_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000058_000019_leftImg8bit.png gtFine/train/ulm/ulm_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000059_000019_leftImg8bit.png gtFine/train/ulm/ulm_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000060_000019_leftImg8bit.png gtFine/train/ulm/ulm_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000061_000019_leftImg8bit.png gtFine/train/ulm/ulm_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000062_000019_leftImg8bit.png gtFine/train/ulm/ulm_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000063_000019_leftImg8bit.png gtFine/train/ulm/ulm_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000064_000019_leftImg8bit.png gtFine/train/ulm/ulm_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000065_000019_leftImg8bit.png gtFine/train/ulm/ulm_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000066_000019_leftImg8bit.png gtFine/train/ulm/ulm_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000067_000019_leftImg8bit.png gtFine/train/ulm/ulm_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000068_000019_leftImg8bit.png gtFine/train/ulm/ulm_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000069_000019_leftImg8bit.png gtFine/train/ulm/ulm_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000070_000019_leftImg8bit.png gtFine/train/ulm/ulm_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000071_000019_leftImg8bit.png gtFine/train/ulm/ulm_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000072_000019_leftImg8bit.png gtFine/train/ulm/ulm_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000073_000019_leftImg8bit.png gtFine/train/ulm/ulm_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000074_000019_leftImg8bit.png gtFine/train/ulm/ulm_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000075_000019_leftImg8bit.png gtFine/train/ulm/ulm_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000076_000019_leftImg8bit.png gtFine/train/ulm/ulm_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000077_000019_leftImg8bit.png gtFine/train/ulm/ulm_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000078_000019_leftImg8bit.png gtFine/train/ulm/ulm_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000079_000019_leftImg8bit.png gtFine/train/ulm/ulm_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000080_000019_leftImg8bit.png gtFine/train/ulm/ulm_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000081_000019_leftImg8bit.png gtFine/train/ulm/ulm_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000082_000019_leftImg8bit.png gtFine/train/ulm/ulm_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000083_000019_leftImg8bit.png gtFine/train/ulm/ulm_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000084_000019_leftImg8bit.png gtFine/train/ulm/ulm_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000085_000019_leftImg8bit.png gtFine/train/ulm/ulm_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000086_000019_leftImg8bit.png gtFine/train/ulm/ulm_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000087_000019_leftImg8bit.png gtFine/train/ulm/ulm_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000088_000019_leftImg8bit.png gtFine/train/ulm/ulm_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000089_000019_leftImg8bit.png gtFine/train/ulm/ulm_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000090_000019_leftImg8bit.png gtFine/train/ulm/ulm_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000091_000019_leftImg8bit.png gtFine/train/ulm/ulm_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000092_000019_leftImg8bit.png gtFine/train/ulm/ulm_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000093_000019_leftImg8bit.png gtFine/train/ulm/ulm_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/ulm/ulm_000094_000019_leftImg8bit.png gtFine/train/ulm/ulm_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000000_000019_leftImg8bit.png gtFine/train/weimar/weimar_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000001_000019_leftImg8bit.png gtFine/train/weimar/weimar_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000002_000019_leftImg8bit.png gtFine/train/weimar/weimar_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000003_000019_leftImg8bit.png gtFine/train/weimar/weimar_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000004_000019_leftImg8bit.png gtFine/train/weimar/weimar_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000005_000019_leftImg8bit.png gtFine/train/weimar/weimar_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000006_000019_leftImg8bit.png gtFine/train/weimar/weimar_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000007_000019_leftImg8bit.png gtFine/train/weimar/weimar_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000008_000019_leftImg8bit.png gtFine/train/weimar/weimar_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000009_000019_leftImg8bit.png gtFine/train/weimar/weimar_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000010_000019_leftImg8bit.png gtFine/train/weimar/weimar_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000011_000019_leftImg8bit.png gtFine/train/weimar/weimar_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000012_000019_leftImg8bit.png gtFine/train/weimar/weimar_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000013_000019_leftImg8bit.png gtFine/train/weimar/weimar_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000014_000019_leftImg8bit.png gtFine/train/weimar/weimar_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000015_000019_leftImg8bit.png gtFine/train/weimar/weimar_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000016_000019_leftImg8bit.png gtFine/train/weimar/weimar_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000017_000019_leftImg8bit.png gtFine/train/weimar/weimar_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000018_000019_leftImg8bit.png gtFine/train/weimar/weimar_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000019_000019_leftImg8bit.png gtFine/train/weimar/weimar_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000020_000019_leftImg8bit.png gtFine/train/weimar/weimar_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000021_000019_leftImg8bit.png gtFine/train/weimar/weimar_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000022_000019_leftImg8bit.png gtFine/train/weimar/weimar_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000023_000019_leftImg8bit.png gtFine/train/weimar/weimar_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000024_000019_leftImg8bit.png gtFine/train/weimar/weimar_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000025_000019_leftImg8bit.png gtFine/train/weimar/weimar_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000026_000019_leftImg8bit.png gtFine/train/weimar/weimar_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000027_000019_leftImg8bit.png gtFine/train/weimar/weimar_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000028_000019_leftImg8bit.png gtFine/train/weimar/weimar_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000029_000019_leftImg8bit.png gtFine/train/weimar/weimar_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000030_000019_leftImg8bit.png gtFine/train/weimar/weimar_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000031_000019_leftImg8bit.png gtFine/train/weimar/weimar_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000032_000019_leftImg8bit.png gtFine/train/weimar/weimar_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000033_000019_leftImg8bit.png gtFine/train/weimar/weimar_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000034_000019_leftImg8bit.png gtFine/train/weimar/weimar_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000035_000019_leftImg8bit.png gtFine/train/weimar/weimar_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000036_000019_leftImg8bit.png gtFine/train/weimar/weimar_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000037_000019_leftImg8bit.png gtFine/train/weimar/weimar_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000038_000019_leftImg8bit.png gtFine/train/weimar/weimar_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000039_000019_leftImg8bit.png gtFine/train/weimar/weimar_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000040_000019_leftImg8bit.png gtFine/train/weimar/weimar_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000041_000019_leftImg8bit.png gtFine/train/weimar/weimar_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000042_000019_leftImg8bit.png gtFine/train/weimar/weimar_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000043_000019_leftImg8bit.png gtFine/train/weimar/weimar_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000044_000019_leftImg8bit.png gtFine/train/weimar/weimar_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000045_000019_leftImg8bit.png gtFine/train/weimar/weimar_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000046_000019_leftImg8bit.png gtFine/train/weimar/weimar_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000047_000019_leftImg8bit.png gtFine/train/weimar/weimar_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000048_000019_leftImg8bit.png gtFine/train/weimar/weimar_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000049_000019_leftImg8bit.png gtFine/train/weimar/weimar_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000050_000019_leftImg8bit.png gtFine/train/weimar/weimar_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000051_000019_leftImg8bit.png gtFine/train/weimar/weimar_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000052_000019_leftImg8bit.png gtFine/train/weimar/weimar_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000053_000019_leftImg8bit.png gtFine/train/weimar/weimar_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000054_000019_leftImg8bit.png gtFine/train/weimar/weimar_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000055_000019_leftImg8bit.png gtFine/train/weimar/weimar_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000056_000019_leftImg8bit.png gtFine/train/weimar/weimar_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000057_000019_leftImg8bit.png gtFine/train/weimar/weimar_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000058_000019_leftImg8bit.png gtFine/train/weimar/weimar_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000059_000019_leftImg8bit.png gtFine/train/weimar/weimar_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000060_000019_leftImg8bit.png gtFine/train/weimar/weimar_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000061_000019_leftImg8bit.png gtFine/train/weimar/weimar_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000062_000019_leftImg8bit.png gtFine/train/weimar/weimar_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000063_000019_leftImg8bit.png gtFine/train/weimar/weimar_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000064_000019_leftImg8bit.png gtFine/train/weimar/weimar_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000065_000019_leftImg8bit.png gtFine/train/weimar/weimar_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000066_000019_leftImg8bit.png gtFine/train/weimar/weimar_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000067_000019_leftImg8bit.png gtFine/train/weimar/weimar_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000068_000019_leftImg8bit.png gtFine/train/weimar/weimar_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000069_000019_leftImg8bit.png gtFine/train/weimar/weimar_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000070_000019_leftImg8bit.png gtFine/train/weimar/weimar_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000071_000019_leftImg8bit.png gtFine/train/weimar/weimar_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000072_000019_leftImg8bit.png gtFine/train/weimar/weimar_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000073_000019_leftImg8bit.png gtFine/train/weimar/weimar_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000074_000019_leftImg8bit.png gtFine/train/weimar/weimar_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000075_000019_leftImg8bit.png gtFine/train/weimar/weimar_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000076_000019_leftImg8bit.png gtFine/train/weimar/weimar_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000077_000019_leftImg8bit.png gtFine/train/weimar/weimar_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000078_000019_leftImg8bit.png gtFine/train/weimar/weimar_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000079_000019_leftImg8bit.png gtFine/train/weimar/weimar_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000080_000019_leftImg8bit.png gtFine/train/weimar/weimar_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000081_000019_leftImg8bit.png gtFine/train/weimar/weimar_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000082_000019_leftImg8bit.png gtFine/train/weimar/weimar_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000083_000019_leftImg8bit.png gtFine/train/weimar/weimar_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000084_000019_leftImg8bit.png gtFine/train/weimar/weimar_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000085_000019_leftImg8bit.png gtFine/train/weimar/weimar_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000086_000019_leftImg8bit.png gtFine/train/weimar/weimar_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000087_000019_leftImg8bit.png gtFine/train/weimar/weimar_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000088_000019_leftImg8bit.png gtFine/train/weimar/weimar_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000089_000019_leftImg8bit.png gtFine/train/weimar/weimar_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000090_000019_leftImg8bit.png gtFine/train/weimar/weimar_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000091_000019_leftImg8bit.png gtFine/train/weimar/weimar_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000092_000019_leftImg8bit.png gtFine/train/weimar/weimar_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000093_000019_leftImg8bit.png gtFine/train/weimar/weimar_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000094_000019_leftImg8bit.png gtFine/train/weimar/weimar_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000095_000019_leftImg8bit.png gtFine/train/weimar/weimar_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000096_000019_leftImg8bit.png gtFine/train/weimar/weimar_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000097_000019_leftImg8bit.png gtFine/train/weimar/weimar_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000098_000019_leftImg8bit.png gtFine/train/weimar/weimar_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000099_000019_leftImg8bit.png gtFine/train/weimar/weimar_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000100_000019_leftImg8bit.png gtFine/train/weimar/weimar_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000101_000019_leftImg8bit.png gtFine/train/weimar/weimar_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000102_000019_leftImg8bit.png gtFine/train/weimar/weimar_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000103_000019_leftImg8bit.png gtFine/train/weimar/weimar_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000104_000019_leftImg8bit.png gtFine/train/weimar/weimar_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000105_000019_leftImg8bit.png gtFine/train/weimar/weimar_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000106_000019_leftImg8bit.png gtFine/train/weimar/weimar_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000107_000019_leftImg8bit.png gtFine/train/weimar/weimar_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000108_000019_leftImg8bit.png gtFine/train/weimar/weimar_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000109_000019_leftImg8bit.png gtFine/train/weimar/weimar_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000110_000019_leftImg8bit.png gtFine/train/weimar/weimar_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000111_000019_leftImg8bit.png gtFine/train/weimar/weimar_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000112_000019_leftImg8bit.png gtFine/train/weimar/weimar_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000113_000019_leftImg8bit.png gtFine/train/weimar/weimar_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000114_000019_leftImg8bit.png gtFine/train/weimar/weimar_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000115_000019_leftImg8bit.png gtFine/train/weimar/weimar_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000116_000019_leftImg8bit.png gtFine/train/weimar/weimar_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000117_000019_leftImg8bit.png gtFine/train/weimar/weimar_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000118_000019_leftImg8bit.png gtFine/train/weimar/weimar_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000119_000019_leftImg8bit.png gtFine/train/weimar/weimar_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000120_000019_leftImg8bit.png gtFine/train/weimar/weimar_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000121_000019_leftImg8bit.png gtFine/train/weimar/weimar_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000122_000019_leftImg8bit.png gtFine/train/weimar/weimar_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000123_000019_leftImg8bit.png gtFine/train/weimar/weimar_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000124_000019_leftImg8bit.png gtFine/train/weimar/weimar_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000125_000019_leftImg8bit.png gtFine/train/weimar/weimar_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000126_000019_leftImg8bit.png gtFine/train/weimar/weimar_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000127_000019_leftImg8bit.png gtFine/train/weimar/weimar_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000128_000019_leftImg8bit.png gtFine/train/weimar/weimar_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000129_000019_leftImg8bit.png gtFine/train/weimar/weimar_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000130_000019_leftImg8bit.png gtFine/train/weimar/weimar_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000131_000019_leftImg8bit.png gtFine/train/weimar/weimar_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000132_000019_leftImg8bit.png gtFine/train/weimar/weimar_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000133_000019_leftImg8bit.png gtFine/train/weimar/weimar_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000134_000019_leftImg8bit.png gtFine/train/weimar/weimar_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000135_000019_leftImg8bit.png gtFine/train/weimar/weimar_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000136_000019_leftImg8bit.png gtFine/train/weimar/weimar_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000137_000019_leftImg8bit.png gtFine/train/weimar/weimar_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000138_000019_leftImg8bit.png gtFine/train/weimar/weimar_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000139_000019_leftImg8bit.png gtFine/train/weimar/weimar_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000140_000019_leftImg8bit.png gtFine/train/weimar/weimar_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/train/weimar/weimar_000141_000019_leftImg8bit.png gtFine/train/weimar/weimar_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000000_000019_leftImg8bit.png gtFine/train/zurich/zurich_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000001_000019_leftImg8bit.png gtFine/train/zurich/zurich_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000002_000019_leftImg8bit.png gtFine/train/zurich/zurich_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000003_000019_leftImg8bit.png gtFine/train/zurich/zurich_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000004_000019_leftImg8bit.png gtFine/train/zurich/zurich_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000005_000019_leftImg8bit.png gtFine/train/zurich/zurich_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000006_000019_leftImg8bit.png gtFine/train/zurich/zurich_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000007_000019_leftImg8bit.png gtFine/train/zurich/zurich_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000008_000019_leftImg8bit.png gtFine/train/zurich/zurich_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000009_000019_leftImg8bit.png gtFine/train/zurich/zurich_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000010_000019_leftImg8bit.png gtFine/train/zurich/zurich_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000011_000019_leftImg8bit.png gtFine/train/zurich/zurich_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000012_000019_leftImg8bit.png gtFine/train/zurich/zurich_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000013_000019_leftImg8bit.png gtFine/train/zurich/zurich_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000014_000019_leftImg8bit.png gtFine/train/zurich/zurich_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000015_000019_leftImg8bit.png gtFine/train/zurich/zurich_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000016_000019_leftImg8bit.png gtFine/train/zurich/zurich_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000017_000019_leftImg8bit.png gtFine/train/zurich/zurich_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000018_000019_leftImg8bit.png gtFine/train/zurich/zurich_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000019_000019_leftImg8bit.png gtFine/train/zurich/zurich_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000020_000019_leftImg8bit.png gtFine/train/zurich/zurich_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000021_000019_leftImg8bit.png gtFine/train/zurich/zurich_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000022_000019_leftImg8bit.png gtFine/train/zurich/zurich_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000023_000019_leftImg8bit.png gtFine/train/zurich/zurich_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000024_000019_leftImg8bit.png gtFine/train/zurich/zurich_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000025_000019_leftImg8bit.png gtFine/train/zurich/zurich_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000026_000019_leftImg8bit.png gtFine/train/zurich/zurich_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000027_000019_leftImg8bit.png gtFine/train/zurich/zurich_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000028_000019_leftImg8bit.png gtFine/train/zurich/zurich_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000029_000019_leftImg8bit.png gtFine/train/zurich/zurich_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000030_000019_leftImg8bit.png gtFine/train/zurich/zurich_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000031_000019_leftImg8bit.png gtFine/train/zurich/zurich_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000032_000019_leftImg8bit.png gtFine/train/zurich/zurich_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000033_000019_leftImg8bit.png gtFine/train/zurich/zurich_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000034_000019_leftImg8bit.png gtFine/train/zurich/zurich_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000035_000019_leftImg8bit.png gtFine/train/zurich/zurich_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000036_000019_leftImg8bit.png gtFine/train/zurich/zurich_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000037_000019_leftImg8bit.png gtFine/train/zurich/zurich_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000038_000019_leftImg8bit.png gtFine/train/zurich/zurich_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000039_000019_leftImg8bit.png gtFine/train/zurich/zurich_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000040_000019_leftImg8bit.png gtFine/train/zurich/zurich_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000041_000019_leftImg8bit.png gtFine/train/zurich/zurich_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000042_000019_leftImg8bit.png gtFine/train/zurich/zurich_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000043_000019_leftImg8bit.png gtFine/train/zurich/zurich_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000044_000019_leftImg8bit.png gtFine/train/zurich/zurich_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000045_000019_leftImg8bit.png gtFine/train/zurich/zurich_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000046_000019_leftImg8bit.png gtFine/train/zurich/zurich_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000047_000019_leftImg8bit.png gtFine/train/zurich/zurich_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000048_000019_leftImg8bit.png gtFine/train/zurich/zurich_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000049_000019_leftImg8bit.png gtFine/train/zurich/zurich_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000050_000019_leftImg8bit.png gtFine/train/zurich/zurich_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000051_000019_leftImg8bit.png gtFine/train/zurich/zurich_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000052_000019_leftImg8bit.png gtFine/train/zurich/zurich_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000053_000019_leftImg8bit.png gtFine/train/zurich/zurich_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000054_000019_leftImg8bit.png gtFine/train/zurich/zurich_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000055_000019_leftImg8bit.png gtFine/train/zurich/zurich_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000056_000019_leftImg8bit.png gtFine/train/zurich/zurich_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000057_000019_leftImg8bit.png gtFine/train/zurich/zurich_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000058_000019_leftImg8bit.png gtFine/train/zurich/zurich_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000059_000019_leftImg8bit.png gtFine/train/zurich/zurich_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000060_000019_leftImg8bit.png gtFine/train/zurich/zurich_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000061_000019_leftImg8bit.png gtFine/train/zurich/zurich_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000062_000019_leftImg8bit.png gtFine/train/zurich/zurich_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000063_000019_leftImg8bit.png gtFine/train/zurich/zurich_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000064_000019_leftImg8bit.png gtFine/train/zurich/zurich_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000065_000019_leftImg8bit.png gtFine/train/zurich/zurich_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000066_000019_leftImg8bit.png gtFine/train/zurich/zurich_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000067_000019_leftImg8bit.png gtFine/train/zurich/zurich_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000068_000019_leftImg8bit.png gtFine/train/zurich/zurich_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000069_000019_leftImg8bit.png gtFine/train/zurich/zurich_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000070_000019_leftImg8bit.png gtFine/train/zurich/zurich_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000071_000019_leftImg8bit.png gtFine/train/zurich/zurich_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000072_000019_leftImg8bit.png gtFine/train/zurich/zurich_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000073_000019_leftImg8bit.png gtFine/train/zurich/zurich_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000074_000019_leftImg8bit.png gtFine/train/zurich/zurich_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000075_000019_leftImg8bit.png gtFine/train/zurich/zurich_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000076_000019_leftImg8bit.png gtFine/train/zurich/zurich_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000077_000019_leftImg8bit.png gtFine/train/zurich/zurich_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000078_000019_leftImg8bit.png gtFine/train/zurich/zurich_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000079_000019_leftImg8bit.png gtFine/train/zurich/zurich_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000080_000019_leftImg8bit.png gtFine/train/zurich/zurich_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000081_000019_leftImg8bit.png gtFine/train/zurich/zurich_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000082_000019_leftImg8bit.png gtFine/train/zurich/zurich_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000083_000019_leftImg8bit.png gtFine/train/zurich/zurich_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000084_000019_leftImg8bit.png gtFine/train/zurich/zurich_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000085_000019_leftImg8bit.png gtFine/train/zurich/zurich_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000086_000019_leftImg8bit.png gtFine/train/zurich/zurich_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000087_000019_leftImg8bit.png gtFine/train/zurich/zurich_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000088_000019_leftImg8bit.png gtFine/train/zurich/zurich_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000089_000019_leftImg8bit.png gtFine/train/zurich/zurich_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000090_000019_leftImg8bit.png gtFine/train/zurich/zurich_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000091_000019_leftImg8bit.png gtFine/train/zurich/zurich_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000092_000019_leftImg8bit.png gtFine/train/zurich/zurich_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000093_000019_leftImg8bit.png gtFine/train/zurich/zurich_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000094_000019_leftImg8bit.png gtFine/train/zurich/zurich_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000095_000019_leftImg8bit.png gtFine/train/zurich/zurich_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000096_000019_leftImg8bit.png gtFine/train/zurich/zurich_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000097_000019_leftImg8bit.png gtFine/train/zurich/zurich_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000098_000019_leftImg8bit.png gtFine/train/zurich/zurich_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000099_000019_leftImg8bit.png gtFine/train/zurich/zurich_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000100_000019_leftImg8bit.png gtFine/train/zurich/zurich_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000101_000019_leftImg8bit.png gtFine/train/zurich/zurich_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000102_000019_leftImg8bit.png gtFine/train/zurich/zurich_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000103_000019_leftImg8bit.png gtFine/train/zurich/zurich_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000104_000019_leftImg8bit.png gtFine/train/zurich/zurich_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000105_000019_leftImg8bit.png gtFine/train/zurich/zurich_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000106_000019_leftImg8bit.png gtFine/train/zurich/zurich_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000107_000019_leftImg8bit.png gtFine/train/zurich/zurich_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000108_000019_leftImg8bit.png gtFine/train/zurich/zurich_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000109_000019_leftImg8bit.png gtFine/train/zurich/zurich_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000110_000019_leftImg8bit.png gtFine/train/zurich/zurich_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000111_000019_leftImg8bit.png gtFine/train/zurich/zurich_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000112_000019_leftImg8bit.png gtFine/train/zurich/zurich_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000113_000019_leftImg8bit.png gtFine/train/zurich/zurich_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000114_000019_leftImg8bit.png gtFine/train/zurich/zurich_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000115_000019_leftImg8bit.png gtFine/train/zurich/zurich_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000116_000019_leftImg8bit.png gtFine/train/zurich/zurich_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000117_000019_leftImg8bit.png gtFine/train/zurich/zurich_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000118_000019_leftImg8bit.png gtFine/train/zurich/zurich_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000119_000019_leftImg8bit.png gtFine/train/zurich/zurich_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000120_000019_leftImg8bit.png gtFine/train/zurich/zurich_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/train/zurich/zurich_000121_000019_leftImg8bit.png gtFine/train/zurich/zurich_000121_000019_gtFine_labelTrainIds.png diff --git a/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_val_fine.txt b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_val_fine.txt new file mode 100644 index 0000000..70f192b --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/cityscapes/cityscapes_val_fine.txt @@ -0,0 +1,500 @@ +leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_000576_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_000576_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_001016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001016_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_001236_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001236_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_001751_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001751_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_002196_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_002196_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_002963_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_002963_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_003025_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003025_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_003357_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003357_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_003920_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003920_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_004617_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_004617_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_005543_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_005543_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_005898_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_005898_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_006589_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_006589_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_007365_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_007365_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_008206_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_008206_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_008451_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_008451_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_009291_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009291_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_009561_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009561_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_009688_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009688_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_009969_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009969_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_010351_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_010351_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_010763_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_010763_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_011007_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011007_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_011074_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011074_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_011461_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011461_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_011810_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011810_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_012009_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012009_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_012121_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012121_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_012868_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012868_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_013067_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013067_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_013240_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013240_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_013382_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013382_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_013942_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013942_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_014480_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_014480_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_015389_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_015389_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_015676_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_015676_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_016005_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_016005_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_016286_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_016286_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_017228_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_017228_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_017476_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_017476_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_018797_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_018797_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_019607_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_019607_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_020215_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020215_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_020321_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020321_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_020880_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020880_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_021667_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_021667_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_021879_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_021879_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_022254_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_022254_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000000_022797_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_022797_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_000538_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_000538_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_001464_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_001464_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_002512_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002512_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_002646_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002646_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_002759_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002759_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_003056_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_003056_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_003588_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_003588_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_004327_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004327_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_004736_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004736_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_004859_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004859_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_005184_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005184_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_005410_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005410_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_005703_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005703_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_005898_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005898_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_007285_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007285_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_007407_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007407_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_007622_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007622_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_007857_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007857_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_007973_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007973_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_008200_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_008200_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_008688_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_008688_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_009058_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009058_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_009504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009504_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_009854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009854_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_010156_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010156_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_010444_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010444_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_010600_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010600_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_010830_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010830_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_011162_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011162_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_011715_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011715_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_011835_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011835_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_012038_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012038_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_012519_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012519_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_012699_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012699_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_012738_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012738_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_012870_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012870_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_013016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013016_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_013496_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013496_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_013710_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013710_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_014221_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014221_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_014406_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014406_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_014565_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014565_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_014741_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014741_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_015091_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015091_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_015328_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015328_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_015768_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015768_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_016029_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016029_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_016273_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016273_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_016462_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016462_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_017101_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017101_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_017459_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017459_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_017842_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017842_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_018113_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_018113_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_019698_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019698_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_019854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019854_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_019969_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019969_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_020046_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020046_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_020287_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020287_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_020693_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020693_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_021406_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_021406_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_021825_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_021825_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_023235_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023235_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_023369_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023369_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_023769_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023769_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_024927_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_024927_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_025512_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025512_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_025713_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025713_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_025921_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025921_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_027325_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_027325_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_028232_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028232_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_028335_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028335_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_028590_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028590_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_028854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028854_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_029086_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029086_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_029236_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029236_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_029600_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029600_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_030067_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030067_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_030310_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030310_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_030669_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030669_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_031266_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_031266_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_031416_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_031416_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_032018_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032018_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_032556_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032556_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_032711_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032711_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_032942_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032942_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_033655_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_033655_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_034047_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_034047_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_034816_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_034816_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_035144_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_035144_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_035864_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_035864_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_037705_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_037705_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_038245_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038245_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_038418_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038418_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_038645_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038645_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_038844_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038844_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_039895_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_039895_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_040575_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_040575_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_040732_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_040732_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_041074_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041074_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_041354_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041354_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_041517_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041517_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_041664_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041664_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_042098_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042098_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_042384_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042384_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_042733_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042733_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_043395_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_043395_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_043564_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_043564_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_044227_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044227_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_044413_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044413_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_044525_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044525_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_044658_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044658_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_044787_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044787_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_046126_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046126_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_046272_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046272_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_046504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046504_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_046779_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046779_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_047178_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_047178_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_047552_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_047552_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_048196_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048196_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_048355_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048355_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_048654_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048654_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_049078_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049078_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_049209_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049209_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_049298_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049298_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_049698_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049698_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_049770_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049770_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_050149_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_050149_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_050686_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_050686_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_051516_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051516_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_051737_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051737_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_051807_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051807_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_052120_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_052120_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_052594_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_052594_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_053102_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_053102_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_054077_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054077_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_054219_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054219_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_054415_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054415_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_054640_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054640_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_054884_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054884_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_055062_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055062_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_055172_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055172_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_055306_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055306_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_055387_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055387_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_055538_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055538_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_055603_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055603_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_055709_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055709_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_056580_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_056580_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_057181_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057181_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_057478_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057478_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_057954_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057954_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_058057_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058057_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_058176_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058176_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_058504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058504_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_058914_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058914_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_059119_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059119_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_059642_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059642_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_059789_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059789_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_060135_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060135_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_060422_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060422_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_060545_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060545_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_060906_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060906_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_061682_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_061682_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_061763_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_061763_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_062016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062016_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_062396_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062396_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_062509_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062509_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_062653_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062653_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_062793_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062793_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_064130_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064130_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_064305_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064305_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_064651_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064651_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_064798_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064798_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_064925_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064925_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_065160_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065160_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_065617_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065617_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_065850_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065850_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_066092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066092_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_066438_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066438_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_066574_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066574_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_066832_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066832_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_067092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067092_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_067178_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067178_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_067295_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067295_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_067474_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067474_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_067735_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067735_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_068063_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068063_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_068208_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068208_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_068682_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068682_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_068772_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068772_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_069633_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_069633_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_070099_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_070099_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_071288_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_071288_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_071781_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_071781_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_072155_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_072155_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_072295_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_072295_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_073088_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073088_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_073243_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073243_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_073464_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073464_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_073911_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073911_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_075296_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_075296_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_075984_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_075984_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_076502_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_076502_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_077092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077092_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_077233_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077233_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_077434_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077434_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_078803_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_078803_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_079206_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_079206_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_080091_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080091_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_080391_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080391_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_080830_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080830_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_082087_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_082087_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_082466_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_082466_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_083029_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083029_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_083199_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083199_gtFine_labelTrainIds.png +leftImg8bit/val/frankfurt/frankfurt_000001_083852_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083852_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000000_000019_leftImg8bit.png gtFine/val/lindau/lindau_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000001_000019_leftImg8bit.png gtFine/val/lindau/lindau_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000002_000019_leftImg8bit.png gtFine/val/lindau/lindau_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000003_000019_leftImg8bit.png gtFine/val/lindau/lindau_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000004_000019_leftImg8bit.png gtFine/val/lindau/lindau_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000005_000019_leftImg8bit.png gtFine/val/lindau/lindau_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000006_000019_leftImg8bit.png gtFine/val/lindau/lindau_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000007_000019_leftImg8bit.png gtFine/val/lindau/lindau_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000008_000019_leftImg8bit.png gtFine/val/lindau/lindau_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000009_000019_leftImg8bit.png gtFine/val/lindau/lindau_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000010_000019_leftImg8bit.png gtFine/val/lindau/lindau_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000011_000019_leftImg8bit.png gtFine/val/lindau/lindau_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000012_000019_leftImg8bit.png gtFine/val/lindau/lindau_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000013_000019_leftImg8bit.png gtFine/val/lindau/lindau_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000014_000019_leftImg8bit.png gtFine/val/lindau/lindau_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000015_000019_leftImg8bit.png gtFine/val/lindau/lindau_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000016_000019_leftImg8bit.png gtFine/val/lindau/lindau_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000017_000019_leftImg8bit.png gtFine/val/lindau/lindau_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000018_000019_leftImg8bit.png gtFine/val/lindau/lindau_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000019_000019_leftImg8bit.png gtFine/val/lindau/lindau_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000020_000019_leftImg8bit.png gtFine/val/lindau/lindau_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000021_000019_leftImg8bit.png gtFine/val/lindau/lindau_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000022_000019_leftImg8bit.png gtFine/val/lindau/lindau_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000023_000019_leftImg8bit.png gtFine/val/lindau/lindau_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000024_000019_leftImg8bit.png gtFine/val/lindau/lindau_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000025_000019_leftImg8bit.png gtFine/val/lindau/lindau_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000026_000019_leftImg8bit.png gtFine/val/lindau/lindau_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000027_000019_leftImg8bit.png gtFine/val/lindau/lindau_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000028_000019_leftImg8bit.png gtFine/val/lindau/lindau_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000029_000019_leftImg8bit.png gtFine/val/lindau/lindau_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000030_000019_leftImg8bit.png gtFine/val/lindau/lindau_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000031_000019_leftImg8bit.png gtFine/val/lindau/lindau_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000032_000019_leftImg8bit.png gtFine/val/lindau/lindau_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000033_000019_leftImg8bit.png gtFine/val/lindau/lindau_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000034_000019_leftImg8bit.png gtFine/val/lindau/lindau_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000035_000019_leftImg8bit.png gtFine/val/lindau/lindau_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000036_000019_leftImg8bit.png gtFine/val/lindau/lindau_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000037_000019_leftImg8bit.png gtFine/val/lindau/lindau_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000038_000019_leftImg8bit.png gtFine/val/lindau/lindau_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000039_000019_leftImg8bit.png gtFine/val/lindau/lindau_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000040_000019_leftImg8bit.png gtFine/val/lindau/lindau_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000041_000019_leftImg8bit.png gtFine/val/lindau/lindau_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000042_000019_leftImg8bit.png gtFine/val/lindau/lindau_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000043_000019_leftImg8bit.png gtFine/val/lindau/lindau_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000044_000019_leftImg8bit.png gtFine/val/lindau/lindau_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000045_000019_leftImg8bit.png gtFine/val/lindau/lindau_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000046_000019_leftImg8bit.png gtFine/val/lindau/lindau_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000047_000019_leftImg8bit.png gtFine/val/lindau/lindau_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000048_000019_leftImg8bit.png gtFine/val/lindau/lindau_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000049_000019_leftImg8bit.png gtFine/val/lindau/lindau_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000050_000019_leftImg8bit.png gtFine/val/lindau/lindau_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000051_000019_leftImg8bit.png gtFine/val/lindau/lindau_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000052_000019_leftImg8bit.png gtFine/val/lindau/lindau_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000053_000019_leftImg8bit.png gtFine/val/lindau/lindau_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000054_000019_leftImg8bit.png gtFine/val/lindau/lindau_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000055_000019_leftImg8bit.png gtFine/val/lindau/lindau_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000056_000019_leftImg8bit.png gtFine/val/lindau/lindau_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000057_000019_leftImg8bit.png gtFine/val/lindau/lindau_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/val/lindau/lindau_000058_000019_leftImg8bit.png gtFine/val/lindau/lindau_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000000_000019_leftImg8bit.png gtFine/val/munster/munster_000000_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000001_000019_leftImg8bit.png gtFine/val/munster/munster_000001_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000002_000019_leftImg8bit.png gtFine/val/munster/munster_000002_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000003_000019_leftImg8bit.png gtFine/val/munster/munster_000003_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000004_000019_leftImg8bit.png gtFine/val/munster/munster_000004_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000005_000019_leftImg8bit.png gtFine/val/munster/munster_000005_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000006_000019_leftImg8bit.png gtFine/val/munster/munster_000006_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000007_000019_leftImg8bit.png gtFine/val/munster/munster_000007_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000008_000019_leftImg8bit.png gtFine/val/munster/munster_000008_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000009_000019_leftImg8bit.png gtFine/val/munster/munster_000009_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000010_000019_leftImg8bit.png gtFine/val/munster/munster_000010_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000011_000019_leftImg8bit.png gtFine/val/munster/munster_000011_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000012_000019_leftImg8bit.png gtFine/val/munster/munster_000012_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000013_000019_leftImg8bit.png gtFine/val/munster/munster_000013_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000014_000019_leftImg8bit.png gtFine/val/munster/munster_000014_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000015_000019_leftImg8bit.png gtFine/val/munster/munster_000015_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000016_000019_leftImg8bit.png gtFine/val/munster/munster_000016_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000017_000019_leftImg8bit.png gtFine/val/munster/munster_000017_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000018_000019_leftImg8bit.png gtFine/val/munster/munster_000018_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000019_000019_leftImg8bit.png gtFine/val/munster/munster_000019_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000020_000019_leftImg8bit.png gtFine/val/munster/munster_000020_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000021_000019_leftImg8bit.png gtFine/val/munster/munster_000021_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000022_000019_leftImg8bit.png gtFine/val/munster/munster_000022_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000023_000019_leftImg8bit.png gtFine/val/munster/munster_000023_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000024_000019_leftImg8bit.png gtFine/val/munster/munster_000024_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000025_000019_leftImg8bit.png gtFine/val/munster/munster_000025_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000026_000019_leftImg8bit.png gtFine/val/munster/munster_000026_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000027_000019_leftImg8bit.png gtFine/val/munster/munster_000027_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000028_000019_leftImg8bit.png gtFine/val/munster/munster_000028_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000029_000019_leftImg8bit.png gtFine/val/munster/munster_000029_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000030_000019_leftImg8bit.png gtFine/val/munster/munster_000030_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000031_000019_leftImg8bit.png gtFine/val/munster/munster_000031_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000032_000019_leftImg8bit.png gtFine/val/munster/munster_000032_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000033_000019_leftImg8bit.png gtFine/val/munster/munster_000033_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000034_000019_leftImg8bit.png gtFine/val/munster/munster_000034_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000035_000019_leftImg8bit.png gtFine/val/munster/munster_000035_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000036_000019_leftImg8bit.png gtFine/val/munster/munster_000036_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000037_000019_leftImg8bit.png gtFine/val/munster/munster_000037_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000038_000019_leftImg8bit.png gtFine/val/munster/munster_000038_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000039_000019_leftImg8bit.png gtFine/val/munster/munster_000039_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000040_000019_leftImg8bit.png gtFine/val/munster/munster_000040_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000041_000019_leftImg8bit.png gtFine/val/munster/munster_000041_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000042_000019_leftImg8bit.png gtFine/val/munster/munster_000042_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000043_000019_leftImg8bit.png gtFine/val/munster/munster_000043_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000044_000019_leftImg8bit.png gtFine/val/munster/munster_000044_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000045_000019_leftImg8bit.png gtFine/val/munster/munster_000045_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000046_000019_leftImg8bit.png gtFine/val/munster/munster_000046_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000047_000019_leftImg8bit.png gtFine/val/munster/munster_000047_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000048_000019_leftImg8bit.png gtFine/val/munster/munster_000048_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000049_000019_leftImg8bit.png gtFine/val/munster/munster_000049_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000050_000019_leftImg8bit.png gtFine/val/munster/munster_000050_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000051_000019_leftImg8bit.png gtFine/val/munster/munster_000051_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000052_000019_leftImg8bit.png gtFine/val/munster/munster_000052_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000053_000019_leftImg8bit.png gtFine/val/munster/munster_000053_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000054_000019_leftImg8bit.png gtFine/val/munster/munster_000054_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000055_000019_leftImg8bit.png gtFine/val/munster/munster_000055_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000056_000019_leftImg8bit.png gtFine/val/munster/munster_000056_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000057_000019_leftImg8bit.png gtFine/val/munster/munster_000057_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000058_000019_leftImg8bit.png gtFine/val/munster/munster_000058_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000059_000019_leftImg8bit.png gtFine/val/munster/munster_000059_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000060_000019_leftImg8bit.png gtFine/val/munster/munster_000060_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000061_000019_leftImg8bit.png gtFine/val/munster/munster_000061_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000062_000019_leftImg8bit.png gtFine/val/munster/munster_000062_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000063_000019_leftImg8bit.png gtFine/val/munster/munster_000063_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000064_000019_leftImg8bit.png gtFine/val/munster/munster_000064_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000065_000019_leftImg8bit.png gtFine/val/munster/munster_000065_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000066_000019_leftImg8bit.png gtFine/val/munster/munster_000066_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000067_000019_leftImg8bit.png gtFine/val/munster/munster_000067_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000068_000019_leftImg8bit.png gtFine/val/munster/munster_000068_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000069_000019_leftImg8bit.png gtFine/val/munster/munster_000069_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000070_000019_leftImg8bit.png gtFine/val/munster/munster_000070_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000071_000019_leftImg8bit.png gtFine/val/munster/munster_000071_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000072_000019_leftImg8bit.png gtFine/val/munster/munster_000072_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000073_000019_leftImg8bit.png gtFine/val/munster/munster_000073_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000074_000019_leftImg8bit.png gtFine/val/munster/munster_000074_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000075_000019_leftImg8bit.png gtFine/val/munster/munster_000075_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000076_000019_leftImg8bit.png gtFine/val/munster/munster_000076_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000077_000019_leftImg8bit.png gtFine/val/munster/munster_000077_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000078_000019_leftImg8bit.png gtFine/val/munster/munster_000078_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000079_000019_leftImg8bit.png gtFine/val/munster/munster_000079_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000080_000019_leftImg8bit.png gtFine/val/munster/munster_000080_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000081_000019_leftImg8bit.png gtFine/val/munster/munster_000081_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000082_000019_leftImg8bit.png gtFine/val/munster/munster_000082_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000083_000019_leftImg8bit.png gtFine/val/munster/munster_000083_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000084_000019_leftImg8bit.png gtFine/val/munster/munster_000084_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000085_000019_leftImg8bit.png gtFine/val/munster/munster_000085_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000086_000019_leftImg8bit.png gtFine/val/munster/munster_000086_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000087_000019_leftImg8bit.png gtFine/val/munster/munster_000087_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000088_000019_leftImg8bit.png gtFine/val/munster/munster_000088_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000089_000019_leftImg8bit.png gtFine/val/munster/munster_000089_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000090_000019_leftImg8bit.png gtFine/val/munster/munster_000090_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000091_000019_leftImg8bit.png gtFine/val/munster/munster_000091_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000092_000019_leftImg8bit.png gtFine/val/munster/munster_000092_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000093_000019_leftImg8bit.png gtFine/val/munster/munster_000093_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000094_000019_leftImg8bit.png gtFine/val/munster/munster_000094_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000095_000019_leftImg8bit.png gtFine/val/munster/munster_000095_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000096_000019_leftImg8bit.png gtFine/val/munster/munster_000096_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000097_000019_leftImg8bit.png gtFine/val/munster/munster_000097_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000098_000019_leftImg8bit.png gtFine/val/munster/munster_000098_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000099_000019_leftImg8bit.png gtFine/val/munster/munster_000099_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000100_000019_leftImg8bit.png gtFine/val/munster/munster_000100_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000101_000019_leftImg8bit.png gtFine/val/munster/munster_000101_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000102_000019_leftImg8bit.png gtFine/val/munster/munster_000102_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000103_000019_leftImg8bit.png gtFine/val/munster/munster_000103_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000104_000019_leftImg8bit.png gtFine/val/munster/munster_000104_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000105_000019_leftImg8bit.png gtFine/val/munster/munster_000105_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000106_000019_leftImg8bit.png gtFine/val/munster/munster_000106_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000107_000019_leftImg8bit.png gtFine/val/munster/munster_000107_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000108_000019_leftImg8bit.png gtFine/val/munster/munster_000108_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000109_000019_leftImg8bit.png gtFine/val/munster/munster_000109_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000110_000019_leftImg8bit.png gtFine/val/munster/munster_000110_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000111_000019_leftImg8bit.png gtFine/val/munster/munster_000111_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000112_000019_leftImg8bit.png gtFine/val/munster/munster_000112_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000113_000019_leftImg8bit.png gtFine/val/munster/munster_000113_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000114_000019_leftImg8bit.png gtFine/val/munster/munster_000114_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000115_000019_leftImg8bit.png gtFine/val/munster/munster_000115_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000116_000019_leftImg8bit.png gtFine/val/munster/munster_000116_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000117_000019_leftImg8bit.png gtFine/val/munster/munster_000117_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000118_000019_leftImg8bit.png gtFine/val/munster/munster_000118_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000119_000019_leftImg8bit.png gtFine/val/munster/munster_000119_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000120_000019_leftImg8bit.png gtFine/val/munster/munster_000120_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000121_000019_leftImg8bit.png gtFine/val/munster/munster_000121_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000122_000019_leftImg8bit.png gtFine/val/munster/munster_000122_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000123_000019_leftImg8bit.png gtFine/val/munster/munster_000123_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000124_000019_leftImg8bit.png gtFine/val/munster/munster_000124_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000125_000019_leftImg8bit.png gtFine/val/munster/munster_000125_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000126_000019_leftImg8bit.png gtFine/val/munster/munster_000126_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000127_000019_leftImg8bit.png gtFine/val/munster/munster_000127_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000128_000019_leftImg8bit.png gtFine/val/munster/munster_000128_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000129_000019_leftImg8bit.png gtFine/val/munster/munster_000129_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000130_000019_leftImg8bit.png gtFine/val/munster/munster_000130_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000131_000019_leftImg8bit.png gtFine/val/munster/munster_000131_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000132_000019_leftImg8bit.png gtFine/val/munster/munster_000132_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000133_000019_leftImg8bit.png gtFine/val/munster/munster_000133_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000134_000019_leftImg8bit.png gtFine/val/munster/munster_000134_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000135_000019_leftImg8bit.png gtFine/val/munster/munster_000135_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000136_000019_leftImg8bit.png gtFine/val/munster/munster_000136_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000137_000019_leftImg8bit.png gtFine/val/munster/munster_000137_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000138_000019_leftImg8bit.png gtFine/val/munster/munster_000138_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000139_000019_leftImg8bit.png gtFine/val/munster/munster_000139_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000140_000019_leftImg8bit.png gtFine/val/munster/munster_000140_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000141_000019_leftImg8bit.png gtFine/val/munster/munster_000141_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000142_000019_leftImg8bit.png gtFine/val/munster/munster_000142_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000143_000019_leftImg8bit.png gtFine/val/munster/munster_000143_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000144_000019_leftImg8bit.png gtFine/val/munster/munster_000144_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000145_000019_leftImg8bit.png gtFine/val/munster/munster_000145_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000146_000019_leftImg8bit.png gtFine/val/munster/munster_000146_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000147_000019_leftImg8bit.png gtFine/val/munster/munster_000147_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000148_000019_leftImg8bit.png gtFine/val/munster/munster_000148_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000149_000019_leftImg8bit.png gtFine/val/munster/munster_000149_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000150_000019_leftImg8bit.png gtFine/val/munster/munster_000150_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000151_000019_leftImg8bit.png gtFine/val/munster/munster_000151_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000152_000019_leftImg8bit.png gtFine/val/munster/munster_000152_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000153_000019_leftImg8bit.png gtFine/val/munster/munster_000153_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000154_000019_leftImg8bit.png gtFine/val/munster/munster_000154_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000155_000019_leftImg8bit.png gtFine/val/munster/munster_000155_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000156_000019_leftImg8bit.png gtFine/val/munster/munster_000156_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000157_000019_leftImg8bit.png gtFine/val/munster/munster_000157_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000158_000019_leftImg8bit.png gtFine/val/munster/munster_000158_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000159_000019_leftImg8bit.png gtFine/val/munster/munster_000159_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000160_000019_leftImg8bit.png gtFine/val/munster/munster_000160_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000161_000019_leftImg8bit.png gtFine/val/munster/munster_000161_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000162_000019_leftImg8bit.png gtFine/val/munster/munster_000162_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000163_000019_leftImg8bit.png gtFine/val/munster/munster_000163_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000164_000019_leftImg8bit.png gtFine/val/munster/munster_000164_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000165_000019_leftImg8bit.png gtFine/val/munster/munster_000165_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000166_000019_leftImg8bit.png gtFine/val/munster/munster_000166_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000167_000019_leftImg8bit.png gtFine/val/munster/munster_000167_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000168_000019_leftImg8bit.png gtFine/val/munster/munster_000168_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000169_000019_leftImg8bit.png gtFine/val/munster/munster_000169_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000170_000019_leftImg8bit.png gtFine/val/munster/munster_000170_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000171_000019_leftImg8bit.png gtFine/val/munster/munster_000171_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000172_000019_leftImg8bit.png gtFine/val/munster/munster_000172_000019_gtFine_labelTrainIds.png +leftImg8bit/val/munster/munster_000173_000019_leftImg8bit.png gtFine/val/munster/munster_000173_000019_gtFine_labelTrainIds.png diff --git a/CDARTS_segmentation/tools/datasets/coco/__init__.py b/CDARTS_segmentation/tools/datasets/coco/__init__.py new file mode 100644 index 0000000..15671ae --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/coco/__init__.py @@ -0,0 +1,3 @@ +from .coco import COCO + +__all__ = ['COCO'] diff --git a/CDARTS_segmentation/tools/datasets/coco/coco.py b/CDARTS_segmentation/tools/datasets/coco/coco.py new file mode 100644 index 0000000..914cc7e --- /dev/null +++ b/CDARTS_segmentation/tools/datasets/coco/coco.py @@ -0,0 +1,160 @@ +# import numpy as np + +from datasets.BaseDataset import BaseDataset + +COCO_CATEGORIES = [ + {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, + {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, + {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, + {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, + {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, + {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, + {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, + {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, + {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, + {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, + {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, + {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, + {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, + {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, + {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, + {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, + {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, + {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, + {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, + {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, + {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, + {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, + {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, + {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, + {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, + {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, + {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, + {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, + {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, + {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, + {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, + {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, + {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, + {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, + {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, + {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, + {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, + {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, + {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, + {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, + {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, + {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, + {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, + {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, + {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, + {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, + {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, + {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, + {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, + {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, + {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, + {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, + {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, + {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, + {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, + {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, + {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, + {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, + {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, + {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, + {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, + {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, + {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, + {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, + {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, + {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, + {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, + {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, + {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, + {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, + {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, + {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, + {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, + {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, + {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, + {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, + {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, + {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, + {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, + {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, + {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, + {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, + {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, + {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, + {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, + {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, + {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, + {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, + {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, + {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, + {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, + {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, + {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, + {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, + {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, + {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, + {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, + {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, + {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, + {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, + {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, + {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, + {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, + {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, + {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, + {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, + {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, + {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, + {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, + {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, + {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, + {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, + {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, + {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, + {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, + {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, + {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, + {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, + {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, + {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, + {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, + {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, + {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, + {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, + {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, + {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, + {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, + {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, + {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, + {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, + {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, + {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, + {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, +] + +COCO_COLORS = [] +COCO_NAMES = [] + +for item in COCO_CATEGORIES: + COCO_COLORS.append(item["color"]) + COCO_NAMES.append(item["name"]) + +class COCO(BaseDataset): + # trans_labels = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33] + + @classmethod + def get_class_colors(*args): + return COCO_COLORS + + @classmethod + def get_class_names(*args): + # class counting(gtFine) + # 2953 2811 2934 970 1296 2949 1658 2808 2891 1654 2686 2343 1023 2832 + # 359 274 142 513 1646 + return COCO_NAMES diff --git a/CDARTS_segmentation/tools/engine/__init__.py b/CDARTS_segmentation/tools/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_segmentation/tools/engine/evaluator.py b/CDARTS_segmentation/tools/engine/evaluator.py new file mode 100644 index 0000000..ae72827 --- /dev/null +++ b/CDARTS_segmentation/tools/engine/evaluator.py @@ -0,0 +1,339 @@ +import os +import cv2 +import numpy as np +import time +from tqdm import tqdm + +import torch +import torch.multiprocessing as mp + +from engine.logger import get_logger +from utils.pyt_utils import load_model, link_file, ensure_dir +from utils.img_utils import pad_image_to_shape, normalize + +logger = get_logger() + + +class Evaluator(object): + def __init__(self, dataset, class_num, image_mean, image_std, network, + multi_scales, is_flip, devices=0, out_idx=0, threds=5, config=None, logger=None, + verbose=False, save_path=None, show_image=False, show_prediction=False): + self.dataset = dataset + self.ndata = self.dataset.get_length() + self.class_num = class_num + self.image_mean = image_mean + self.image_std = image_std + self.multi_scales = multi_scales + self.is_flip = is_flip + self.network = network + self.devices = devices + if type(self.devices) == int: self.devices = [self.devices] + self.out_idx = out_idx + self.threds = threds + self.config = config + self.logger = logger + + self.context = mp.get_context('spawn') + self.val_func = None + self.results_queue = self.context.Queue(self.ndata) + + self.verbose = verbose + self.save_path = save_path + if save_path is not None: + ensure_dir(save_path) + self.show_image = show_image + self.show_prediction = show_prediction + + def run(self, model_path, model_indice, log_file, log_file_link): + """There are four evaluation modes: + 1.only eval a .pth model: -e *.pth + 2.only eval a certain epoch: -e epoch + 3.eval all epochs in a given section: -e start_epoch-end_epoch + 4.eval all epochs from a certain started epoch: -e start_epoch- + """ + if '.pth' in model_indice: + models = [model_indice, ] + elif "-" in model_indice: + start_epoch = int(model_indice.split("-")[0]) + end_epoch = model_indice.split("-")[1] + + models = os.listdir(model_path) + models.remove("epoch-last.pth") + sorted_models = [None] * len(models) + model_idx = [0] * len(models) + + for idx, m in enumerate(models): + num = m.split(".")[0].split("-")[1] + model_idx[idx] = num + sorted_models[idx] = m + model_idx = np.array([int(i) for i in model_idx]) + + down_bound = model_idx >= start_epoch + up_bound = [True] * len(sorted_models) + if end_epoch: + end_epoch = int(end_epoch) + assert start_epoch < end_epoch + up_bound = model_idx <= end_epoch + bound = up_bound * down_bound + model_slice = np.array(sorted_models)[bound] + models = [os.path.join(model_path, model) for model in + model_slice] + else: + models = [os.path.join(model_path, + 'epoch-%s.pth' % model_indice), ] + + results = open(log_file, 'a') + link_file(log_file, log_file_link) + + for model in models: + logger.info("Load Model: %s" % model) + self.val_func = load_model(self.network, model) + result_line, mIoU = self.multi_process_evaluation() + + results.write('Model: ' + model + '\n') + results.write(result_line) + results.write('\n') + results.flush() + + results.close() + + def run_online(self): + """ + eval during training + """ + self.val_func = self.network + result_line, mIoU = self.single_process_evaluation() + return result_line, mIoU + + def single_process_evaluation(self): + all_results = [] + from pdb import set_trace as bp + with torch.no_grad(): + for idx in tqdm(range(self.ndata)): + dd = self.dataset[idx] + results_dict = self.func_per_iteration(dd, self.devices[0], iter=idx) + all_results.append(results_dict) + _, _mIoU = self.compute_metric([results_dict]) + result_line, mIoU = self.compute_metric(all_results) + return result_line, mIoU + + def run_online_multiprocess(self): + """ + eval during training + """ + self.val_func = self.network + result_line, mIoU = self.multi_process_single_gpu_evaluation() + return result_line, mIoU + + def multi_process_single_gpu_evaluation(self): + # start_eval_time = time.perf_counter() + stride = int(np.ceil(self.ndata / self.threds)) + + # start multi-process on single-gpu + procs = [] + for d in range(self.threds): + e_record = min((d + 1) * stride, self.ndata) + shred_list = list(range(d * stride, e_record)) + device = self.devices[0] + logger.info('Thread %d handle %d data.' % (d, len(shred_list))) + p = self.context.Process(target=self.worker, args=(shred_list, device)) + procs.append(p) + + for p in procs: + p.start() + + all_results = [] + for _ in tqdm(range(self.ndata)): + t = self.results_queue.get() + all_results.append(t) + if self.verbose: + self.compute_metric(all_results) + + for p in procs: + p.join() + + result_line, mIoU = self.compute_metric(all_results) + # logger.info('Evaluation Elapsed Time: %.2fs' % (time.perf_counter() - start_eval_time)) + return result_line, mIoU + + def multi_process_evaluation(self): + start_eval_time = time.perf_counter() + nr_devices = len(self.devices) + stride = int(np.ceil(self.ndata / nr_devices)) + + # start multi-process on multi-gpu + procs = [] + for d in range(nr_devices): + e_record = min((d + 1) * stride, self.ndata) + shred_list = list(range(d * stride, e_record)) + device = self.devices[d] + logger.info('GPU %s handle %d data.' % (device, len(shred_list))) + p = self.context.Process(target=self.worker, args=(shred_list, device)) + procs.append(p) + + for p in procs: + p.start() + + all_results = [] + for _ in tqdm(range(self.ndata)): + t = self.results_queue.get() + all_results.append(t) + if self.verbose: + self.compute_metric(all_results) + + for p in procs: + p.join() + + result_line, mIoU = self.compute_metric(all_results) + logger.info('Evaluation Elapsed Time: %.2fs' % (time.perf_counter() - start_eval_time)) + return result_line, mIoU + + def worker(self, shred_list, device): + # start_load_time = time.time() + # logger.info('Load Model on Device %d: %.2fs' % (device, time.time() - start_load_time)) + for idx in shred_list: + dd = self.dataset[idx] + results_dict = self.func_per_iteration(dd, device, iter=idx) + self.results_queue.put(results_dict) + + def func_per_iteration(self, data, device, iter=None): + raise NotImplementedError + + def compute_metric(self, results): + raise NotImplementedError + + # evaluate the whole image at once + def whole_eval(self, img, output_size, input_size=None, device=None): + if input_size is not None: + img, margin = self.process_image(img, input_size) + else: + img = self.process_image(img, input_size) + + pred = self.val_func_process(img, device) + if input_size is not None: + pred = pred[:, margin[0]:(pred.shape[1] - margin[1]), + margin[2]:(pred.shape[2] - margin[3])] + pred = pred.permute(1, 2, 0) + pred = pred.cpu().numpy() + if output_size is not None: + pred = cv2.resize(pred, + (output_size[1], output_size[0]), + interpolation=cv2.INTER_LINEAR) + + pred = pred.argmax(2) + + return pred + + # slide the window to evaluate the image + def sliding_eval(self, img, crop_size, stride_rate, device=None): + ori_rows, ori_cols, c = img.shape + processed_pred = np.zeros((ori_rows, ori_cols, self.class_num)) + + for s in self.multi_scales: + img_scale = cv2.resize(img, None, fx=s, fy=s, + interpolation=cv2.INTER_LINEAR) + new_rows, new_cols, _ = img_scale.shape + processed_pred += self.scale_process(img_scale, + (ori_rows, ori_cols), + crop_size, stride_rate, device) + + pred = processed_pred.argmax(2) + + return pred + + def scale_process(self, img, ori_shape, crop_size, stride_rate, + device=None): + new_rows, new_cols, c = img.shape + long_size = new_cols if new_cols > new_rows else new_rows + + if long_size <= crop_size: + input_data, margin = self.process_image(img, crop_size) + score = self.val_func_process(input_data, device) + score = score[:, margin[0]:(score.shape[1] - margin[1]), + margin[2]:(score.shape[2] - margin[3])] + else: + stride = int(np.ceil(crop_size * stride_rate)) + img_pad, margin = pad_image_to_shape(img, crop_size, + cv2.BORDER_CONSTANT, value=0) + + pad_rows = img_pad.shape[0] + pad_cols = img_pad.shape[1] + r_grid = int(np.ceil((pad_rows - crop_size) / stride)) + 1 + c_grid = int(np.ceil((pad_cols - crop_size) / stride)) + 1 + data_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda( + device) + count_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda( + device) + + for grid_yidx in range(r_grid): + for grid_xidx in range(c_grid): + s_x = grid_xidx * stride + s_y = grid_yidx * stride + e_x = min(s_x + crop_size, pad_cols) + e_y = min(s_y + crop_size, pad_rows) + s_x = e_x - crop_size + s_y = e_y - crop_size + img_sub = img_pad[s_y:e_y, s_x: e_x, :] + count_scale[:, s_y: e_y, s_x: e_x] += 1 + + input_data, tmargin = self.process_image(img_sub, crop_size) + temp_score = self.val_func_process(input_data, device) + temp_score = temp_score[:, + tmargin[0]:(temp_score.shape[1] - tmargin[1]), + tmargin[2]:(temp_score.shape[2] - tmargin[3])] + data_scale[:, s_y: e_y, s_x: e_x] += temp_score + # score = data_scale / count_scale + score = data_scale + score = score[:, margin[0]:(score.shape[1] - margin[1]), + margin[2]:(score.shape[2] - margin[3])] + + score = score.permute(1, 2, 0) + data_output = cv2.resize(score.cpu().numpy(), + (ori_shape[1], ori_shape[0]), + interpolation=cv2.INTER_LINEAR) + + return data_output + + def val_func_process(self, input_data, device=None): + input_data = np.ascontiguousarray(input_data[None, :, :, :], dtype=np.float32) + input_data = torch.FloatTensor(input_data).cuda(device) + + with torch.cuda.device(input_data.get_device()): + self.val_func.eval() + self.val_func.to(input_data.get_device()) + with torch.no_grad(): + score = self.val_func(input_data) + if (isinstance(score, tuple) or isinstance(score, list)) and len(score) > 1: + score = score[self.out_idx] + score = score[0] # a single image pass, ignore batch dim + + if self.is_flip: + input_data = input_data.flip(-1) + score_flip = self.val_func(input_data) + score_flip = score_flip[0] + score += score_flip.flip(-1) + score = torch.exp(score) + # score = score.data + + return score + + def process_image(self, img, crop_size=None): + p_img = img + + if img.shape[2] < 3: + im_b = p_img + im_g = p_img + im_r = p_img + p_img = np.concatenate((im_b, im_g, im_r), axis=2) + + p_img = normalize(p_img, self.image_mean, self.image_std) + + if crop_size is not None: + p_img, margin = pad_image_to_shape(p_img, crop_size, cv2.BORDER_CONSTANT, value=0) + p_img = p_img.transpose(2, 0, 1) + + return p_img, margin + + p_img = p_img.transpose(2, 0, 1) + + return p_img diff --git a/CDARTS_segmentation/tools/engine/logger.py b/CDARTS_segmentation/tools/engine/logger.py new file mode 100644 index 0000000..10d8f2e --- /dev/null +++ b/CDARTS_segmentation/tools/engine/logger.py @@ -0,0 +1,90 @@ +import os +import sys +import logging + +_default_level_name = os.getenv('ENGINE_LOGGING_LEVEL', 'INFO') +_default_level = logging.getLevelName(_default_level_name.upper()) + + +class LogFormatter(logging.Formatter): + log_fout = None + date_full = '[%(asctime)s %(lineno)d@%(filename)s:%(name)s] ' + date = '%(asctime)s ' + msg = '%(message)s' + + def format(self, record): + if record.levelno == logging.DEBUG: + mcl, mtxt = self._color_dbg, 'DBG' + elif record.levelno == logging.WARNING: + mcl, mtxt = self._color_warn, 'WRN' + elif record.levelno == logging.ERROR: + mcl, mtxt = self._color_err, 'ERR' + else: + mcl, mtxt = self._color_normal, '' + + if mtxt: + mtxt += ' ' + + if self.log_fout: + self.__set_fmt(self.date_full + mtxt + self.msg) + formatted = super(LogFormatter, self).format(record) + # self.log_fout.write(formatted) + # self.log_fout.write('\n') + # self.log_fout.flush() + return formatted + + self.__set_fmt(self._color_date(self.date) + mcl(mtxt + self.msg)) + formatted = super(LogFormatter, self).format(record) + + return formatted + + if sys.version_info.major < 3: + def __set_fmt(self, fmt): + self._fmt = fmt + else: + def __set_fmt(self, fmt): + self._style._fmt = fmt + + @staticmethod + def _color_dbg(msg): + return '\x1b[36m{}\x1b[0m'.format(msg) + + @staticmethod + def _color_warn(msg): + return '\x1b[1;31m{}\x1b[0m'.format(msg) + + @staticmethod + def _color_err(msg): + return '\x1b[1;4;31m{}\x1b[0m'.format(msg) + + @staticmethod + def _color_omitted(msg): + return '\x1b[35m{}\x1b[0m'.format(msg) + + @staticmethod + def _color_normal(msg): + return msg + + @staticmethod + def _color_date(msg): + return '\x1b[32m{}\x1b[0m'.format(msg) + + +def get_logger(log_dir=None, log_file=None, formatter=LogFormatter): + logger = logging.getLogger() + logger.setLevel(_default_level) + del logger.handlers[:] + + if log_dir and log_file: + if not os.path.isdir(log_dir): os.makedirs(log_dir) + LogFormatter.log_fout = True + file_handler = logging.FileHandler(log_file, mode='a') + file_handler.setLevel(logging.INFO) + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(formatter(datefmt='%d %H:%M:%S')) + stream_handler.setLevel(0) + logger.addHandler(stream_handler) + return logger diff --git a/CDARTS_segmentation/tools/engine/tester.py b/CDARTS_segmentation/tools/engine/tester.py new file mode 100644 index 0000000..5b80011 --- /dev/null +++ b/CDARTS_segmentation/tools/engine/tester.py @@ -0,0 +1,312 @@ +import os +import os.path as osp +import cv2 +import numpy as np +import time +from tqdm import tqdm + +import torch +import torch.nn.functional as F +import torch.multiprocessing as mp + +from engine.logger import get_logger +from utils.pyt_utils import load_model, link_file, ensure_dir +from utils.img_utils import pad_image_to_shape, normalize + +logger = get_logger() + + +class Tester(object): + def __init__(self, dataset, class_num, image_mean, image_std, network, + multi_scales, is_flip, devices=0, out_idx=0, threds=3, config=None, logger=None, + verbose=False, save_path=None, show_prediction=False): + self.dataset = dataset + self.ndata = self.dataset.get_length() + self.class_num = class_num + self.image_mean = image_mean + self.image_std = image_std + self.multi_scales = multi_scales + self.is_flip = is_flip + self.network = network + self.devices = devices + if type(self.devices) == int: self.devices = [self.devices] + self.out_idx = out_idx + self.threds = threds + self.config = config + self.logger = logger + + self.context = mp.get_context('spawn') + self.val_func = None + self.results_queue = self.context.Queue(self.ndata) + + self.verbose = verbose + self.save_path = save_path + if save_path is not None: + ensure_dir(save_path) + self.show_prediction = show_prediction + + def run(self, model_path, model_indice, log_file, log_file_link): + """There are four evaluation modes: + 1.only eval a .pth model: -e *.pth + 2.only eval a certain epoch: -e epoch + 3.eval all epochs in a given section: -e start_epoch-end_epoch + 4.eval all epochs from a certain started epoch: -e start_epoch- + """ + if '.pth' in model_indice: + models = [model_indice, ] + elif "-" in model_indice: + start_epoch = int(model_indice.split("-")[0]) + end_epoch = model_indice.split("-")[1] + + models = os.listdir(model_path) + models.remove("epoch-last.pth") + sorted_models = [None] * len(models) + model_idx = [0] * len(models) + + for idx, m in enumerate(models): + num = m.split(".")[0].split("-")[1] + model_idx[idx] = num + sorted_models[idx] = m + model_idx = np.array([int(i) for i in model_idx]) + + down_bound = model_idx >= start_epoch + up_bound = [True] * len(sorted_models) + if end_epoch: + end_epoch = int(end_epoch) + assert start_epoch < end_epoch + up_bound = model_idx <= end_epoch + bound = up_bound * down_bound + model_slice = np.array(sorted_models)[bound] + models = [os.path.join(model_path, model) for model in + model_slice] + else: + models = [os.path.join(model_path, + 'epoch-%s.pth' % model_indice), ] + + results = open(log_file, 'a') + link_file(log_file, log_file_link) + + for model in models: + logger.info("Load Model: %s" % model) + self.val_func = load_model(self.network, model) + result_line, mIoU = self.multi_process_evaluation() + + results.write('Model: ' + model + '\n') + results.write(result_line) + results.write('\n') + results.flush() + + results.close() + + def run_online(self): + """ + eval during training + """ + self.val_func = self.network + self.single_process_evaluation() + + def single_process_evaluation(self): + with torch.no_grad(): + for idx in tqdm(range(self.ndata)): + dd = self.dataset[idx] + self.func_per_iteration(dd, self.devices[0], iter=idx) + + def run_online_multiprocess(self): + """ + eval during training + """ + self.val_func = self.network + self.multi_process_single_gpu_evaluation() + + def multi_process_single_gpu_evaluation(self): + # start_eval_time = time.perf_counter() + stride = int(np.ceil(self.ndata / self.threds)) + + # start multi-process on single-gpu + procs = [] + for d in range(self.threds): + e_record = min((d + 1) * stride, self.ndata) + shred_list = list(range(d * stride, e_record)) + device = self.devices[0] + logger.info('Thread %d handle %d data.' % (d, len(shred_list))) + p = self.context.Process(target=self.worker, args=(shred_list, device)) + procs.append(p) + + for p in procs: + p.start() + + for p in procs: + p.join() + + + def multi_process_evaluation(self): + start_eval_time = time.perf_counter() + nr_devices = len(self.devices) + stride = int(np.ceil(self.ndata / nr_devices)) + + # start multi-process on multi-gpu + procs = [] + for d in range(nr_devices): + e_record = min((d + 1) * stride, self.ndata) + shred_list = list(range(d * stride, e_record)) + device = self.devices[d] + logger.info('GPU %s handle %d data.' % (device, len(shred_list))) + p = self.context.Process(target=self.worker, args=(shred_list, device)) + procs.append(p) + + for p in procs: + p.start() + + for p in procs: + p.join() + + + def worker(self, shred_list, device): + start_load_time = time.time() + # logger.info('Load Model on Device %d: %.2fs' % (device, time.time() - start_load_time)) + for idx in shred_list: + dd = self.dataset[idx] + results_dict = self.func_per_iteration(dd, device, iter=idx) + self.results_queue.put(results_dict) + + def func_per_iteration(self, data, device, iter=None): + raise NotImplementedError + + def compute_metric(self, results): + raise NotImplementedError + + # evaluate the whole image at once + def whole_eval(self, img, output_size, input_size=None, device=None): + if input_size is not None: + img, margin = self.process_image(img, input_size) + else: + img = self.process_image(img, input_size) + + pred = self.val_func_process(img, device) + if input_size is not None: + pred = pred[:, margin[0]:(pred.shape[1] - margin[1]), + margin[2]:(pred.shape[2] - margin[3])] + pred = pred.permute(1, 2, 0) + pred = pred.cpu().numpy() + if output_size is not None: + pred = cv2.resize(pred, + (output_size[1], output_size[0]), + interpolation=cv2.INTER_LINEAR) + + pred = pred.argmax(2) + + return pred + + # slide the window to evaluate the image + def sliding_eval(self, img, crop_size, stride_rate, device=None): + ori_rows, ori_cols, c = img.shape + processed_pred = np.zeros((ori_rows, ori_cols, self.class_num)) + + for s in self.multi_scales: + img_scale = cv2.resize(img, None, fx=s, fy=s, + interpolation=cv2.INTER_LINEAR) + new_rows, new_cols, _ = img_scale.shape + processed_pred += self.scale_process(img_scale, + (ori_rows, ori_cols), + crop_size, stride_rate, device) + + pred = processed_pred.argmax(2) + + return pred + + def scale_process(self, img, ori_shape, crop_size, stride_rate, + device=None): + new_rows, new_cols, c = img.shape + long_size = new_cols if new_cols > new_rows else new_rows + + if long_size <= crop_size: + input_data, margin = self.process_image(img, crop_size) + score = self.val_func_process(input_data, device) + score = score[:, margin[0]:(score.shape[1] - margin[1]), + margin[2]:(score.shape[2] - margin[3])] + else: + stride = int(np.ceil(crop_size * stride_rate)) + img_pad, margin = pad_image_to_shape(img, crop_size, + cv2.BORDER_CONSTANT, value=0) + + pad_rows = img_pad.shape[0] + pad_cols = img_pad.shape[1] + r_grid = int(np.ceil((pad_rows - crop_size) / stride)) + 1 + c_grid = int(np.ceil((pad_cols - crop_size) / stride)) + 1 + data_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda( + device) + count_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda( + device) + + for grid_yidx in range(r_grid): + for grid_xidx in range(c_grid): + s_x = grid_xidx * stride + s_y = grid_yidx * stride + e_x = min(s_x + crop_size, pad_cols) + e_y = min(s_y + crop_size, pad_rows) + s_x = e_x - crop_size + s_y = e_y - crop_size + img_sub = img_pad[s_y:e_y, s_x: e_x, :] + count_scale[:, s_y: e_y, s_x: e_x] += 1 + + input_data, tmargin = self.process_image(img_sub, crop_size) + temp_score = self.val_func_process(input_data, device) + temp_score = temp_score[:, + tmargin[0]:(temp_score.shape[1] - tmargin[1]), + tmargin[2]:(temp_score.shape[2] - tmargin[3])] + data_scale[:, s_y: e_y, s_x: e_x] += temp_score + # score = data_scale / count_scale + score = data_scale + score = score[:, margin[0]:(score.shape[1] - margin[1]), + margin[2]:(score.shape[2] - margin[3])] + + score = score.permute(1, 2, 0) + data_output = cv2.resize(score.cpu().numpy(), + (ori_shape[1], ori_shape[0]), + interpolation=cv2.INTER_LINEAR) + + return data_output + + def val_func_process(self, input_data, device=None): + input_data = np.ascontiguousarray(input_data[None, :, :, :], dtype=np.float32) + input_data = torch.FloatTensor(input_data).cuda(device) + + with torch.cuda.device(input_data.get_device()): + self.val_func.eval() + self.val_func.to(input_data.get_device()) + with torch.no_grad(): + score = self.val_func(input_data) + if (isinstance(score, tuple) or isinstance(score, list)) and len(score) > 1: + score = score[self.out_idx] + score = score[0] # a single image pass, ignore batch dim + + if self.is_flip: + input_data = input_data.flip(-1) + score_flip = self.val_func(input_data) + score_flip = score_flip[0] + score += score_flip.flip(-1) + score = torch.exp(score) + # score = score.data + + return score + + def process_image(self, img, crop_size=None): + p_img = img + + if img.shape[2] < 3: + im_b = p_img + im_g = p_img + im_r = p_img + p_img = np.concatenate((im_b, im_g, im_r), axis=2) + + p_img = normalize(p_img, self.image_mean, self.image_std) + + if crop_size is not None: + p_img, margin = pad_image_to_shape(p_img, crop_size, cv2.BORDER_CONSTANT, value=0) + p_img = p_img.transpose(2, 0, 1) + + return p_img, margin + + p_img = p_img.transpose(2, 0, 1) + + return p_img diff --git a/CDARTS_segmentation/tools/seg_opr/__init__.py b/CDARTS_segmentation/tools/seg_opr/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_segmentation/tools/seg_opr/loss_opr.py b/CDARTS_segmentation/tools/seg_opr/loss_opr.py new file mode 100644 index 0000000..455b758 --- /dev/null +++ b/CDARTS_segmentation/tools/seg_opr/loss_opr.py @@ -0,0 +1,199 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from engine.logger import get_logger + +logger = get_logger() + +L1Loss = nn.L1Loss +MSELoss = nn.MSELoss +CrossEntropyLoss = nn.CrossEntropyLoss + +class SigmoidFocalLoss(nn.Module): + def __init__(self, ignore_label, gamma=2.0, alpha=0.25, + reduction='mean'): + super(SigmoidFocalLoss, self).__init__() + self.ignore_label = ignore_label + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + + def forward(self, pred, target): + b, h, w = target.size() + pred = pred.view(b, -1, 1) + pred_sigmoid = pred.sigmoid() + target = target.view(b, -1).float() + mask = (target.ne(self.ignore_label)).float() + target = mask * target + onehot = target.view(b, -1, 1) + + max_val = (-pred_sigmoid).clamp(min=0) + + pos_part = (1 - pred_sigmoid) ** self.gamma * ( + pred_sigmoid - pred_sigmoid * onehot) + neg_part = pred_sigmoid ** self.gamma * (max_val + ( + (-max_val).exp() + (-pred_sigmoid - max_val).exp()).log()) + + loss = -(self.alpha * pos_part + (1 - self.alpha) * neg_part).sum( + dim=-1) * mask + if self.reduction == 'mean': + loss = loss.mean() + + return loss + + +class ProbOhemCrossEntropy2d(nn.Module): + def __init__(self, ignore_label, reduction='mean', thresh=0.6, min_kept=256, + down_ratio=1, use_weight=False): + super(ProbOhemCrossEntropy2d, self).__init__() + self.ignore_label = ignore_label + self.thresh = float(thresh) + self.min_kept = int(min_kept) + self.down_ratio = down_ratio + if use_weight: + weight = torch.FloatTensor( + [0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489, + 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, + 1.0865, 1.1529, 1.0507]).cuda() + self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction, + weight=weight, + ignore_index=ignore_label) + else: + self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction, + ignore_index=ignore_label) + + def forward(self, pred, target): + b, c, h, w = pred.size() + target = target.view(-1) + valid_mask = target.ne(self.ignore_label) + target = target * valid_mask.long() + num_valid = valid_mask.sum() + + prob = F.softmax(pred, dim=1) + prob = (prob.transpose(0, 1)).reshape(c, -1) + + if self.min_kept > num_valid: + logger.info('Labels: {}'.format(num_valid)) + elif num_valid > 0: + prob = prob.masked_fill_(~valid_mask, 1) + mask_prob = prob[ + target, torch.arange(len(target), dtype=torch.long)] + threshold = self.thresh + if self.min_kept > 0: + index = mask_prob.argsort() + threshold_index = index[min(len(index), self.min_kept) - 1] + if mask_prob[threshold_index] > self.thresh: + threshold = mask_prob[threshold_index] + kept_mask = mask_prob.le(threshold) + target = target * kept_mask.long() + valid_mask = valid_mask * kept_mask + # logger.info('Valid Mask: {}'.format(valid_mask.sum())) + + target = target.masked_fill_(~valid_mask, self.ignore_label) + target = target.view(b, h, w) + + return self.criterion(pred, target) + +class RegularCE(nn.Module): + """ + Regular cross entropy loss for semantic segmentation, support pixel-wise loss weight. + Arguments: + ignore_label: Integer, label to ignore. + weight: Tensor, a manual rescaling weight given to each class. + """ + def __init__(self, ignore_label=-1, weight=None): + super(RegularCE, self).__init__() + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss(weight=weight, + ignore_index=ignore_label, + reduction='none') + + def forward(self, logits, labels, **kwargs): + if 'semantic_weights' in kwargs: + pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights'] + pixel_losses = pixel_losses.contiguous().view(-1) + else: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + mask = labels.contiguous().view(-1) != self.ignore_label + + pixel_losses = pixel_losses[mask] + return pixel_losses.mean() + + +class OhemCE(nn.Module): + """ + Online hard example mining with cross entropy loss, for semantic segmentation. + This is widely used in PyTorch semantic segmentation frameworks. + Reference: https://github.com/HRNet/HRNet-Semantic-Segmentation/blob/1b3ae72f6025bde4ea404305d502abea3c2f5266/lib/core/criterion.py#L29 + Arguments: + ignore_label: Integer, label to ignore. + threshold: Float, threshold for softmax score (of gt class), only predictions with softmax score + below this threshold will be kept. + min_kept: Integer, minimum number of pixels to be kept, it is used to adjust the + threshold value to avoid number of examples being too small. + weight: Tensor, a manual rescaling weight given to each class. + """ + def __init__(self, ignore_label=-1, threshold=0.7, + min_kept=100000, weight=None): + super(OhemCE, self).__init__() + self.threshold = threshold + self.min_kept = max(1, min_kept) + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss(weight=weight, + ignore_index=ignore_label, + reduction='none') + + def forward(self, logits, labels, **kwargs): + predictions = F.softmax(logits, dim=1) + if 'semantic_weights' in kwargs: + pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights'] + pixel_losses = pixel_losses.contiguous().view(-1) + else: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + mask = labels.contiguous().view(-1) != self.ignore_label + + tmp_labels = labels.clone() + tmp_labels[tmp_labels == self.ignore_label] = 0 + # Get the score for gt class at each pixel location. + predictions = predictions.gather(1, tmp_labels.unsqueeze(1)) + predictions, indices = predictions.contiguous().view(-1, )[mask].contiguous().sort() + min_value = predictions[min(self.min_kept, predictions.numel() - 1)] + threshold = max(min_value, self.threshold) + + pixel_losses = pixel_losses[mask][indices] + pixel_losses = pixel_losses[predictions < threshold] + return pixel_losses.mean() + + +class DeepLabCE(nn.Module): + """ + Hard pixel mining mining with cross entropy loss, for semantic segmentation. + This is used in TensorFlow DeepLab frameworks. + Reference: https://github.com/tensorflow/models/blob/bd488858d610e44df69da6f89277e9de8a03722c/research/deeplab/utils/train_utils.py#L33 + Arguments: + ignore_label: Integer, label to ignore. + top_k_percent_pixels: Float, the value lies in [0.0, 1.0]. When its value < 1.0, only compute the loss for + the top k percent pixels (e.g., the top 20% pixels). This is useful for hard pixel mining. + weight: Tensor, a manual rescaling weight given to each class. + """ + def __init__(self, ignore_label=-1, top_k_percent_pixels=1.0, weight=None): + super(DeepLabCE, self).__init__() + self.top_k_percent_pixels = top_k_percent_pixels + self.ignore_label = ignore_label + self.criterion = nn.CrossEntropyLoss(weight=weight, + ignore_index=ignore_label, + reduction='none') + + def forward(self, logits, labels, **kwargs): + if 'semantic_weights' in kwargs: + pixel_losses = self.criterion(logits, labels) * kwargs['semantic_weights'] + pixel_losses = pixel_losses.contiguous().view(-1) + else: + pixel_losses = self.criterion(logits, labels).contiguous().view(-1) + if self.top_k_percent_pixels == 1.0: + return pixel_losses.mean() + + top_k_pixels = int(self.top_k_percent_pixels * pixel_losses.numel()) + pixel_losses, _ = torch.topk(pixel_losses, top_k_pixels) + return pixel_losses.mean() \ No newline at end of file diff --git a/CDARTS_segmentation/tools/seg_opr/metric.py b/CDARTS_segmentation/tools/seg_opr/metric.py new file mode 100644 index 0000000..503c3cf --- /dev/null +++ b/CDARTS_segmentation/tools/seg_opr/metric.py @@ -0,0 +1,87 @@ +import numpy as np + +np.seterr(divide='ignore', invalid='ignore') + + +# voc cityscapes metric +def hist_info(n_cl, pred, gt): + assert (pred.shape == gt.shape) + k = (gt >= 0) & (gt < n_cl) + labeled = np.sum(k) + correct = np.sum((pred[k] == gt[k])) + + return np.bincount(n_cl * gt[k].astype(int) + pred[k].astype(int), + minlength=n_cl ** 2).reshape(n_cl, + n_cl), labeled, correct + + +def compute_score(hist, correct, labeled): + iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) + mean_IU = np.nanmean(iu) + mean_IU_no_back = np.nanmean(iu[1:]) + # freq = hist.sum(1) / hist.sum() + # freq_IU = (iu[freq > 0] * freq[freq > 0]).sum() + mean_pixel_acc = correct / labeled + + return iu, mean_IU, mean_IU_no_back, mean_pixel_acc + + +# ade metric +def meanIoU(area_intersection, area_union): + iou = 1.0 * np.sum(area_intersection, axis=1) / np.sum(area_union, axis=1) + meaniou = np.nanmean(iou) + meaniou_no_back = np.nanmean(iou[1:]) + + return iou, meaniou, meaniou_no_back + + +def intersectionAndUnion(imPred, imLab, numClass): + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + imPred = np.asarray(imPred).copy() + imLab = np.asarray(imLab).copy() + + imPred += 1 + imLab += 1 + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + imPred = imPred * (imLab > 0) + + # imPred = imPred * (imLab >= 0) + + # Compute area intersection: + intersection = imPred * (imPred == imLab) + (area_intersection, _) = np.histogram(intersection, bins=numClass, + range=(1, numClass)) + + # Compute area union: + (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) + (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) + area_union = area_pred + area_lab - area_intersection + + return area_intersection, area_union + + +def mean_pixel_accuracy(pixel_correct, pixel_labeled): + mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / ( + np.spacing(1) + np.sum(pixel_labeled)) + + return mean_pixel_accuracy + + +def pixelAccuracy(imPred, imLab): + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + pixel_labeled = np.sum(imLab >= 0) + pixel_correct = np.sum((imPred == imLab) * (imLab >= 0)) + pixel_accuracy = 1.0 * pixel_correct / pixel_labeled + + return pixel_accuracy, pixel_correct, pixel_labeled + + +def accuracy(preds, label): + valid = (label >= 0) + acc_sum = (valid * (preds == label)).sum() + valid_sum = valid.sum() + acc = float(acc_sum) / (valid_sum + 1e-10) + return acc, valid_sum diff --git a/CDARTS_segmentation/tools/utils/__init__.py b/CDARTS_segmentation/tools/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/CDARTS_segmentation/tools/utils/cal_model.py b/CDARTS_segmentation/tools/utils/cal_model.py new file mode 100644 index 0000000..27ec362 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/cal_model.py @@ -0,0 +1,216 @@ +from __future__ import division +import os +import sys +import time +import glob +import json +import logging +import argparse +from tqdm import tqdm + +import torch +import torch.nn as nn +import torch.utils +import torch.nn.functional as F +import torch.optim as optim +import torch.distributed as dist +from tensorboardX import SummaryWriter + +import numpy as np +from thop import profile +from ptflops import get_model_complexity_info + +from config_train import config +# if config.is_eval: +# config.save = '../OUTPUT/eval-{}-{}'.format(config.save, time.strftime("%Y%m%d-%H%M%S")) +# else: +# config.save = '../OUTPUT/train-{}-{}'.format(config.save, time.strftime("%Y%m%d-%H%M%S")) +from dataloader import get_train_loader, CyclicIterator +from datasets import Cityscapes + +import dataloaders +from utils.init_func import init_weight +from utils.lr_scheduler import Iter_LR_Scheduler +from seg_opr.loss_opr import ProbOhemCrossEntropy2d +from eval import SegEvaluator +from test import SegTester + +from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat +from utils.dist_utils import reduce_tensor, ModelEma +from model_seg import Network_Multi_Path_Infer_SPOS as Network +import seg_metrics + +import yaml +import timm +from timm.optim import create_optimizer +from utils.pyt_utils import AverageMeter, to_cuda, get_loss_info_str, compute_hist, compute_hist_np, load_pretrain + +def adjust_learning_rate(base_lr, power, optimizer, epoch, total_epoch): + for param_group in optimizer.param_groups: + param_group['lr'] = param_group['lr'] * power + + +# The first arg parser parses out only the --config argument, this argument is used to +# load a yaml file containing key-values that override the defaults for the main parser below +config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) +parser.add_argument('-c', '--config', default='../configs/auto2/sz512drop0.2.yaml', type=str, metavar='FILE', + help='YAML config file specifying default arguments') + +parser = argparse.ArgumentParser(description='PyTorch Training') +parser.add_argument('--det2_cfg', type=str, default='configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml', help='') +parser.add_argument('--save', type=str, default='../OUTPUT/train', help='') +parser.add_argument("--local_rank", default=0, type=int) +parser.add_argument("--world_size", default=1, type=int) +parser.add_argument("--eval_height", default=1025, type=int, help='train height') +parser.add_argument("--eval_width", default=2049, type=int, help='train width') +parser.add_argument("--test_epoch", default=250, type=int, help='Epochs for test') +parser.add_argument("--batch_size", default=12, type=int, help='batch size') +parser.add_argument("--Fch", default=12, type=int, help='Fch') +parser.add_argument('--stem_head_width', type=float, default=1.0, help='base learning rate') +parser.add_argument('--resume', type=str, default='../OUTPUT/train/', help='resume') + +## new retrain ### +parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER', + help='LR scheduler (default: "step"') +parser.add_argument('--epochs', type=int, default=4000, help='num of training epochs') +parser.add_argument('--dataset', type=str, default='cityscapes', help='pascal or cityscapes') +parser.add_argument('--base_lr', type=float, default=0.05, help='base learning rate') +parser.add_argument('--warmup_start_lr', type=float, default=5e-6, help='warm up learning rate') +parser.add_argument('--lr-step', type=float, default=None) +parser.add_argument('--warmup-iters', type=int, default=1000) +parser.add_argument('--min-lr', type=float, default=None) +parser.add_argument('--crop_size', type=int, default=769, help='image crop size') +parser.add_argument('--resize', type=int, default=769, help='image crop size') +parser.add_argument("--image_height", default=513, type=int, help='train height') +parser.add_argument("--image_width", default=1025, type=int, help='train width') +parser.add_argument('--workers', type=int, default=4, help='number of data loading workers') +parser.add_argument('--dist', type=bool, default=True) +parser.add_argument('--autodeeplab', type=str, default='train_seg') +parser.add_argument('--max-iteration', default=1000000, type=bool) +parser.add_argument('--mode', default='poly', type=str, help='how lr decline') +parser.add_argument('--train_mode', type=str, default='iter', choices=['iter', 'epoch']) + +parser.add_argument("--data_path", default='/home/t-hongyuanyu/data/cityscapes', type=str, help='If specified, replace config.load_path') +parser.add_argument("--load_path", default='', type=str, help='If specified, replace config.load_path') +parser.add_argument("--json_file", default='jsons/0.json', type=str, help='model_arch') +parser.add_argument("--seed", default=12345, type=int, help="random seed") +parser.add_argument('--sync_bn', action='store_false', + help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') +parser.add_argument('--random_sample', action='store_true', + help='Random sample path.') +parser.add_argument('--drop_path_prob', type=float, default=0.0, help='drop path prob') + +# Optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') + +# Model Exponential Moving Average +parser.add_argument('--model-ema', action='store_true', default=False, + help='Enable tracking moving average of model weights') +parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, + help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') +parser.add_argument('--model-ema-decay', type=float, default=0.9998, + help='decay factor for model weights moving average (default: 0.9998)') + +# train val +parser.add_argument('--ignore', type=int, default=255, help='semantic ignore') +parser.add_argument('--eval_flip', action='store_true', default=False, + help='semantic eval flip') + + +class NpEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return super(NpEncoder, self).default(obj) + +def _parse_args(): + # Do we have a config file to parse? + args_config, remaining = config_parser.parse_known_args() + if args_config.config: + with open(args_config.config, 'r') as f: + cfg = yaml.safe_load(f) + parser.set_defaults(**cfg) + + # The main arg parser parses the rest of the args, the usual + # defaults will have been overridden if config file specified. + args = parser.parse_args(remaining) + + # Cache the args as a text string to save them in the output dir later + args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) + return args, args_text + +def main(): + args, args_text = _parse_args() + + + if args.load_path: + config.load_path = args.load_path + + config.batch_size = args.batch_size + config.image_height = args.image_height + config.image_width = args.image_width + config.eval_height = args.eval_height + config.eval_width = args.eval_width + config.Fch = args.Fch + config.dataset_path = args.data_path + config.save = args.save + + # preparation ################ + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + model_files = glob.glob("Search/1paths/*.json") + glob.glob("Search/2paths/*.json") + glob.glob("Search/3paths/*.json") + + for model_file in model_files: + + with open(model_file, 'r') as f: + # dict_a = json.loads(f, cls=NpEncoder) + model_dict = json.loads(f.read()) + + model = Network( + model_dict["ops"], model_dict["paths"], model_dict["downs"], model_dict["widths"], model_dict["lasts"], + num_classes=config.num_classes, layers=config.layers, Fch=config.Fch, width_mult_list=config.width_mult_list, stem_head_width=(args.stem_head_width, args.stem_head_width)) + + if args.local_rank == 0: + print("net: " + str(model)) + # with torch.cuda.device(0): + # macs, params = get_model_complexity_info(model, (3, 1024, 2048), as_strings=True, + # print_per_layer_stat=True, verbose=True) + # logging.info('{:<30} {:<8}'.format('Computational complexity: ', macs)) + # logging.info('{:<30} {:<8}'.format('Number of parameters: ', params)) + + flops, params = profile(model, inputs=(torch.randn(1, 3, 1024, 2048),), verbose=False) + flops = flops / 1e9 + params = params / 1e6 + model_dict['flops'] = flops + model_dict['params'] = params + print("params = %fMB, FLOPs = %fGB", params, flops) + + with open(model_file, 'w') as f: + json.dump(model_dict, f, cls=NpEncoder) + + +if __name__ == '__main__': + main() + #launch( + # main, + # 2, + # num_machines=1, + # machine_rank=0, + # dist_url='auto', + #) diff --git a/CDARTS_segmentation/tools/utils/darts_utils.py b/CDARTS_segmentation/tools/utils/darts_utils.py new file mode 100644 index 0000000..7973d27 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/darts_utils.py @@ -0,0 +1,352 @@ +import os +import math +import numpy as np +import torch +import shutil +from torch.autograd import Variable +import time +from tqdm import tqdm +from genotypes import PRIMITIVES +import matplotlib +# Force matplotlib to not use any Xwindows backend. +matplotlib.use('Agg') +from matplotlib import pyplot as plt +from pdb import set_trace as bp +import warnings + + +class AvgrageMeter(object): + + def __init__(self): + self.reset() + + def reset(self): + self.avg = 0 + self.sum = 0 + self.cnt = 0 + + def update(self, val, n=1): + self.sum += val * n + self.cnt += n + self.avg = self.sum / self.cnt + + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + return img + + +def count_parameters_in_MB(model): + return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6 + + +def save_checkpoint(state, is_best, save): + filename = os.path.join(save, 'checkpoint.pth.tar') + torch.save(state, filename) + if is_best: + best_filename = os.path.join(save, 'model_best.pth.tar') + shutil.copyfile(filename, best_filename) + + +def save(model, model_path): + torch.save(model.state_dict(), model_path) + + +def load(model, model_path): + model.load_state_dict(torch.load(model_path)) + + +def drop_path(x, drop_prob): + if drop_prob > 0.: + keep_prob = 1.-drop_prob + mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob)) + x.div_(keep_prob) + x.mul_(mask) + return x + + +def create_exp_dir(path, scripts_to_save=None): + if not os.path.exists(path): + # os.mkdir(path) + os.makedirs(path, exist_ok=True) + print('Experiment dir : {}'.format(path)) + + if scripts_to_save is not None: + # os.mkdir(os.path.join(path, 'scripts')) + os.makedirs(os.path.join(path, 'scripts'), exist_ok=True) + for script in scripts_to_save: + dst_file = os.path.join(path, 'scripts', os.path.basename(script)) + shutil.copyfile(script, dst_file) + +########################## TensorRT speed_test ################################# +try: + import tensorrt as trt + import pycuda.driver as cuda + import pycuda.autoinit + + MAX_BATCH_SIZE = 1 + MAX_WORKSPACE_SIZE = 1 << 30 + + TRT_LOGGER = trt.Logger(trt.Logger.WARNING) + DTYPE = trt.float32 + + # Model + INPUT_NAME = 'input' + OUTPUT_NAME = 'output' + + def allocate_buffers(engine): + h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(DTYPE)) + h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(DTYPE)) + d_input = cuda.mem_alloc(h_input.nbytes) + d_output = cuda.mem_alloc(h_output.nbytes) + return h_input, d_input, h_output, d_output + + + def build_engine(model_file): + with trt.Builder(TRT_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, TRT_LOGGER) as parser: + builder.max_workspace_size = MAX_WORKSPACE_SIZE + builder.max_batch_size = MAX_BATCH_SIZE + + with open(model_file, 'rb') as model: + parser.parse(model.read()) + last_layer = network.get_layer(network.num_layers - 1) + network.mark_output(last_layer.get_output(0)) + return builder.build_cuda_engine(network) + + + def load_input(input_size, host_buffer): + assert len(input_size) == 4 + b, c, h, w = input_size + dtype = trt.nptype(DTYPE) + img_array = np.random.randn(c, h, w).astype(dtype).ravel() + np.copyto(host_buffer, img_array) + + + def do_inference(context, h_input, d_input, h_output, d_output, iterations=None): + # Transfer input data to the GPU. + cuda.memcpy_htod(d_input, h_input) + # warm-up + for _ in range(10): + context.execute(batch_size=1, bindings=[int(d_input), int(d_output)]) + # test proper iterations + if iterations is None: + elapsed_time = 0 + iterations = 100 + while elapsed_time < 1: + t_start = time.time() + for _ in range(iterations): + context.execute(batch_size=1, bindings=[int(d_input), int(d_output)]) + elapsed_time = time.time() - t_start + iterations *= 2 + FPS = iterations / elapsed_time + iterations = int(FPS * 3) + # Run inference. + t_start = time.time() + for _ in tqdm(range(iterations)): + context.execute(batch_size=1, bindings=[int(d_input), int(d_output)]) + elapsed_time = time.time() - t_start + latency = elapsed_time / iterations * 1000 + return latency + + + def compute_latency_ms_tensorrt(model, input_size, iterations=None): + model = model.cuda() + model.eval() + _, c, h, w = input_size + dummy_input = torch.randn(1, c, h, w, device='cuda') + torch.onnx.export(model, dummy_input, "model.onnx", verbose=False, input_names=["input"], output_names=["output"]) + with build_engine("model.onnx") as engine: + h_input, d_input, h_output, d_output = allocate_buffers(engine) + load_input(input_size, h_input) + with engine.create_execution_context() as context: + latency = do_inference(context, h_input, d_input, h_output, d_output, iterations=iterations) + # FPS = 1000 / latency (in ms) + return latency +except: + warnings.warn("TensorRT (or pycuda) is not installed. compute_latency_ms_tensorrt() cannot be used.") +######################################################################### + +def compute_latency_ms_pytorch(model, input_size, iterations=None, device=None): + torch.backends.cudnn.enabled = True + torch.backends.cudnn.benchmark = True + + model.eval() + model = model.cuda() + + input = torch.randn(*input_size).cuda() + + with torch.no_grad(): + for _ in range(10): + model(input) + + if iterations is None: + elapsed_time = 0 + iterations = 100 + while elapsed_time < 1: + torch.cuda.synchronize() + torch.cuda.synchronize() + t_start = time.time() + for _ in range(iterations): + model(input) + torch.cuda.synchronize() + torch.cuda.synchronize() + elapsed_time = time.time() - t_start + iterations *= 2 + FPS = iterations / elapsed_time + iterations = int(FPS * 6) + + print('=========Speed Testing=========') + torch.cuda.synchronize() + torch.cuda.synchronize() + t_start = time.time() + for _ in tqdm(range(iterations)): + model(input) + torch.cuda.synchronize() + torch.cuda.synchronize() + elapsed_time = time.time() - t_start + latency = elapsed_time / iterations * 1000 + torch.cuda.empty_cache() + # FPS = 1000 / latency (in ms) + return latency + + +def plot_path(lasts, paths=[]): + ''' + paths: list of path0~path2 + ''' + assert len(paths) > 0 + path0 = paths[0] + path1 = paths[1] if len(paths) > 1 else [] + path2 = paths[2] if len(paths) > 2 else [] + + if path0[-1] != lasts[0]: path0.append(lasts[0]) + if len(path1) != 0 and path1[-1] != lasts[1]: path1.append(lasts[1]) + if len(path2) != 0 and path2[-1] != lasts[2]: path2.append(lasts[2]) + x_len = max(len(path0), len(path1), len(path2)) + f, ax = plt.subplots(figsize=(x_len, 3)) + ax.plot(np.arange(len(path0)), 2 - np.array(path0), label='1/32', lw=2.5, color='#000000', linestyle='-')#, marker='o', markeredgecolor='r', markerfacecolor='r') + ax.plot(np.arange(len(path1)), 2 - np.array(path1) - 0.08, lw=1.8, label='1/16', color='#313131', linestyle='--')#, marker='^', markeredgecolor='b', markerfacecolor='b') + ax.plot(np.arange(len(path2)), 2 - np.array(path2) - 0.16, lw=1.2, label='1/8', color='#5a5858', linestyle='-.')#, marker='s', markeredgecolor='m', markerfacecolor='m') + plt.xticks(np.arange(x_len), list(range(1, x_len+1))) + plt.yticks(np.array([0, 1, 2]), ["1/32", "1/16", "1/8"]) + plt.ylabel("Scale", fontsize=17) + plt.xlabel("Layer", fontsize=17) + for tick in ax.xaxis.get_major_ticks(): + tick.label.set_fontsize(14) + for tick in ax.yaxis.get_major_ticks(): + tick.label.set_fontsize(14) + f.tight_layout() + plt.legend(prop={'size': 14}, loc=3) + return f + + +def plot_path_width(lasts, paths=[], widths=[]): + ''' + paths: list of path0~path2 + ''' + assert len(paths) > 0 and len(widths) > 0 + path0 = paths[0] + path1 = paths[1] if len(paths) > 1 else [] + path2 = paths[2] if len(paths) > 2 else [] + width0 = widths[0] + width1 = widths[1] if len(widths) > 1 else [] + width2 = widths[2] if len(widths) > 2 else [] + + # just for visualization purpose + if path0[-1] != lasts[0]: path0.append(lasts[0]) + if len(path1) != 0 and path1[-1] != lasts[1]: path1.append(lasts[1]) + if len(path2) != 0 and path2[-1] != lasts[2]: path2.append(lasts[2]) + line_updown = -0.07 + annotation_updown = 0.05; annotation_down_scale = 1.7 + x_len = max(len(path0), len(path1), len(path2)) + f, ax = plt.subplots(figsize=(x_len, 3)) + + assert len(path0) == len(width0) + 1 or len(path0) + len(width0) == 0, "path0 %d, width0 %d"%(len(path0), len(width0)) + assert len(path1) == len(width1) + 1 or len(path1) + len(width1) == 0, "path1 %d, width1 %d"%(len(path1), len(width1)) + assert len(path2) == len(width2) + 1 or len(path2) + len(width2) == 0, "path2 %d, width2 %d"%(len(path2), len(width2)) + + ax.plot(np.arange(len(path0)), 2 - np.array(path0), label='1/32', lw=2.5, color='#000000', linestyle='-') + ax.plot(np.arange(len(path1)), 2 - np.array(path1) + line_updown, lw=1.8, label='1/16', color='#313131', linestyle='--') + ax.plot(np.arange(len(path2)), 2 - np.array(path2) + line_updown*2, lw=1.2, label='1/8', color='#5a5858', linestyle='-.') + + annotations = {} # (idx, scale, width, down): ((x, y), width) + for idx, width in enumerate(width2): + annotations[(idx, path2[idx], width, path2[idx+1]-path2[idx])] = ((0.35 + idx, 2 - path2[idx] + line_updown*2 + annotation_updown - (path2[idx+1]-path2[idx])/annotation_down_scale), width) + for idx, width in enumerate(width1): + annotations[(idx, path1[idx], width, path1[idx+1]-path1[idx])] = ((0.35 + idx, 2 - path1[idx] + line_updown + annotation_updown - (path1[idx+1]-path1[idx])/annotation_down_scale), width) + for idx, width in enumerate(width0): + annotations[(idx, path0[idx], width, path0[idx+1]-path0[idx])] = ((0.35 + idx, 2 - path0[idx] + annotation_updown - (path0[idx+1]-path0[idx])/annotation_down_scale), width) + for k, v in annotations.items(): + plt.annotate("%.2f"%v[1], v[0], fontsize=12, color='red') + + plt.xticks(np.arange(x_len), list(range(1, x_len+1))) + plt.yticks(np.array([0, 1, 2]), ["1/32", "1/16", "1/8"]) + plt.ylim([-0.4, 2.5]) + plt.ylabel("Scale", fontsize=17) + plt.xlabel("Layer", fontsize=17) + for tick in ax.xaxis.get_major_ticks(): + tick.label.set_fontsize(14) + for tick in ax.yaxis.get_major_ticks(): + tick.label.set_fontsize(14) + f.tight_layout() + plt.legend(prop={'size': 14}, loc=3) + return f + +def plot_op(ops, path, width=[], head_width=None, F_base=16): + assert len(width) == 0 or len(width) == len(ops) - 1 + table_vals = [] + scales = {0: "1/8", 1: "1/16", 2: "1/32"}; base_scale = 3 + for idx, op in enumerate(ops): + scale = path[idx] + if len(width) > 0: + if idx < len(width): + ch = int(F_base*2**(scale+base_scale)*width[idx]) + else: + ch = int(F_base*2**(scale+base_scale)*head_width) + else: + ch = F_base*2**(scale+base_scale) + row = [idx+1, PRIMITIVES[op], scales[scale], ch] + table_vals.append(row) + + # Based on http://stackoverflow.com/a/8531491/190597 (Andrey Sobolev) + col_labels = ['Stage', 'Operator', 'Scale', '#Channel_out'] + plt.tight_layout() + fig = plt.figure(figsize=(3,3)) + ax = fig.add_subplot(111, frame_on=False) + ax.xaxis.set_visible(False) # hide the x axis + ax.yaxis.set_visible(False) # hide the y axis + + table = plt.table(cellText=table_vals, + colWidths=[0.22, 0.6, 0.25, 0.5], + colLabels=col_labels, + cellLoc='center', + loc='center') + table.auto_set_font_size(False) + table.set_fontsize(20) + table.scale(2, 2) + + return fig + +def objective_acc_lat(acc, lat, lat_target=8.3, alpha=-0.07, beta=-0.07): + if lat <= lat_target: + w = alpha + else: + w = beta + return acc * math.pow(lat / lat_target, w) \ No newline at end of file diff --git a/CDARTS_segmentation/tools/utils/dist_utils.py b/CDARTS_segmentation/tools/utils/dist_utils.py new file mode 100644 index 0000000..d96ac9a --- /dev/null +++ b/CDARTS_segmentation/tools/utils/dist_utils.py @@ -0,0 +1,81 @@ +""" Common distribution utilities +Hacked by Hongyuan Yu +""" + +from copy import deepcopy + +import torch +from torch import distributed as dist + +import logging +from collections import OrderedDict + + +_logger = logging.getLogger(__name__) + +def reduce_tensor(tensor, n): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= n + return rt + +class ModelEma: + """ Model Exponential Moving Average + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use + RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA + smoothing of weights to match results. Pay attention to the decay constant you are using + relative to your update count per epoch. + To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but + disable validation of the EMA weights. Validation will have to be done manually in a separate + process, or after the training stops converging. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU. + """ + def __init__(self, model, decay=0.9999, device='', resume=''): + # make a copy of the model for accumulating moving average of weights + self.ema = deepcopy(model) + self.ema.eval() + self.decay = decay + self.device = device # perform ema on different device from model if set + if device: + self.ema.to(device=device) + self.ema_has_module = hasattr(self.ema, 'module') + if resume: + self._load_checkpoint(resume) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def _load_checkpoint(self, checkpoint_path): + checkpoint = torch.load(checkpoint_path, map_location='cpu') + assert isinstance(checkpoint, dict) + if 'state_dict_ema' in checkpoint: + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict_ema'].items(): + # ema model may have been wrapped by DataParallel, and need module prefix + if self.ema_has_module: + name = 'module.' + k if not k.startswith('module') else k + else: + name = k + new_state_dict[name] = v + self.ema.load_state_dict(new_state_dict) + _logger.info("Loaded state_dict_ema") + else: + _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") + + def update(self, model): + # correct a mismatch in state dict keys + needs_module = hasattr(model, 'module') and not self.ema_has_module + with torch.no_grad(): + msd = model.state_dict() + for k, ema_v in self.ema.state_dict().items(): + if needs_module: + k = 'module.' + k + model_v = msd[k].detach() + if self.device: + model_v = model_v.to(device=self.device) + ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) \ No newline at end of file diff --git a/CDARTS_segmentation/tools/utils/genotypes.py b/CDARTS_segmentation/tools/utils/genotypes.py new file mode 100644 index 0000000..eed8eff --- /dev/null +++ b/CDARTS_segmentation/tools/utils/genotypes.py @@ -0,0 +1,75 @@ +from collections import namedtuple + +Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') + +PRIMITIVES = [ + 'skip', + 'conv', + 'conv_di', + 'conv_2x', + 'conv_2x_di', +] + +NASNet = Genotype( + normal = [ + ('sep_conv_5x5', 1), + ('sep_conv_3x3', 0), + ('sep_conv_5x5', 0), + ('sep_conv_3x3', 0), + ('avg_pool_3x3', 1), + ('skip_connect', 0), + ('avg_pool_3x3', 0), + ('avg_pool_3x3', 0), + ('sep_conv_3x3', 1), + ('skip_connect', 1), + ], + normal_concat = [2, 3, 4, 5, 6], + reduce = [ + ('sep_conv_5x5', 1), + ('sep_conv_7x7', 0), + ('max_pool_3x3', 1), + ('sep_conv_7x7', 0), + ('avg_pool_3x3', 1), + ('sep_conv_5x5', 0), + ('skip_connect', 3), + ('avg_pool_3x3', 2), + ('sep_conv_3x3', 2), + ('max_pool_3x3', 1), + ], + reduce_concat = [4, 5, 6], +) + +AmoebaNet = Genotype( + normal = [ + ('avg_pool_3x3', 0), + ('max_pool_3x3', 1), + ('sep_conv_3x3', 0), + ('sep_conv_5x5', 2), + ('sep_conv_3x3', 0), + ('avg_pool_3x3', 3), + ('sep_conv_3x3', 1), + ('skip_connect', 1), + ('skip_connect', 0), + ('avg_pool_3x3', 1), + ], + normal_concat = [4, 5, 6], + reduce = [ + ('avg_pool_3x3', 0), + ('sep_conv_3x3', 1), + ('max_pool_3x3', 0), + ('sep_conv_7x7', 2), + ('sep_conv_7x7', 0), + ('avg_pool_3x3', 1), + ('max_pool_3x3', 0), + ('max_pool_3x3', 1), + ('conv_7x1_1x7', 0), + ('sep_conv_3x3', 5), + ], + reduce_concat = [3, 4, 6] +) + +DARTS_V1 = Genotype(normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5]) +DARTS_V2 = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1), ('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5]) + +DARTS = DARTS_V2 + diff --git a/CDARTS_segmentation/tools/utils/img_utils.py b/CDARTS_segmentation/tools/utils/img_utils.py new file mode 100644 index 0000000..95e3608 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/img_utils.py @@ -0,0 +1,185 @@ +import cv2 +import numpy as np +import numbers +import random +import collections + + +def get_2dshape(shape, *, zero=True): + if not isinstance(shape, collections.Iterable): + shape = int(shape) + shape = (shape, shape) + else: + h, w = map(int, shape) + shape = (h, w) + if zero: + minv = 0 + else: + minv = 1 + + assert min(shape) >= minv, 'invalid shape: {}'.format(shape) + return shape + + +def random_crop_pad_to_shape(img, crop_pos, crop_size, pad_label_value): + h, w = img.shape[:2] + start_crop_h, start_crop_w = crop_pos + assert ((start_crop_h < h) and (start_crop_h >= 0)) + assert ((start_crop_w < w) and (start_crop_w >= 0)) + + crop_size = get_2dshape(crop_size) + crop_h, crop_w = crop_size + + img_crop = img[start_crop_h:start_crop_h + crop_h, + start_crop_w:start_crop_w + crop_w, ...] + + img_, margin = pad_image_to_shape(img_crop, crop_size, cv2.BORDER_CONSTANT, + pad_label_value) + + return img_, margin + + +def generate_random_crop_pos(ori_size, crop_size): + ori_size = get_2dshape(ori_size) + h, w = ori_size + + crop_size = get_2dshape(crop_size) + crop_h, crop_w = crop_size + + pos_h, pos_w = 0, 0 + + if h > crop_h: + pos_h = random.randint(0, h - crop_h + 1) + + if w > crop_w: + pos_w = random.randint(0, w - crop_w + 1) + + return pos_h, pos_w + + +def pad_image_to_shape(img, shape, border_mode, value): + margin = np.zeros(4, np.uint32) + shape = get_2dshape(shape) + pad_height = shape[0] - img.shape[0] if shape[0] - img.shape[0] > 0 else 0 + pad_width = shape[1] - img.shape[1] if shape[1] - img.shape[1] > 0 else 0 + + margin[0] = pad_height // 2 + margin[1] = pad_height // 2 + pad_height % 2 + margin[2] = pad_width // 2 + margin[3] = pad_width // 2 + pad_width % 2 + + img = cv2.copyMakeBorder(img, margin[0], margin[1], margin[2], margin[3], + border_mode, value=value) + + return img, margin + + +def pad_image_size_to_multiples_of(img, multiple, pad_value): + h, w = img.shape[:2] + d = multiple + + def canonicalize(s): + v = s // d + return (v + (v * d != s)) * d + + th, tw = map(canonicalize, (h, w)) + + return pad_image_to_shape(img, (th, tw), cv2.BORDER_CONSTANT, pad_value) + + +def resize_ensure_shortest_edge(img, edge_length, + interpolation_mode=cv2.INTER_LINEAR): + assert isinstance(edge_length, int) and edge_length > 0, edge_length + h, w = img.shape[:2] + if h < w: + ratio = float(edge_length) / h + th, tw = edge_length, max(1, int(ratio * w)) + else: + ratio = float(edge_length) / w + th, tw = max(1, int(ratio * h)), edge_length + img = cv2.resize(img, (tw, th), interpolation_mode) + + return img + + +def random_scale(img, gt, scales): + scale = random.choice(scales) + sh = int(img.shape[0] * scale) + sw = int(img.shape[1] * scale) + img = cv2.resize(img, (sw, sh), interpolation=cv2.INTER_LINEAR) + gt = cv2.resize(gt, (sw, sh), interpolation=cv2.INTER_NEAREST) + + return img, gt, scale + + +def random_scale_with_length(img, gt, length): + size = random.choice(length) + sh = size + sw = size + img = cv2.resize(img, (sw, sh), interpolation=cv2.INTER_LINEAR) + gt = cv2.resize(gt, (sw, sh), interpolation=cv2.INTER_NEAREST) + + return img, gt, size + + +def random_mirror(img, gt): + if random.random() >= 0.5: + img = cv2.flip(img, 1) + gt = cv2.flip(gt, 1) + + return img, gt, + + +def random_rotation(img, gt): + angle = random.random() * 20 - 10 + h, w = img.shape[:2] + rotation_matrix = cv2.getRotationMatrix2D((w / 2, h / 2), angle, 1) + img = cv2.warpAffine(img, rotation_matrix, (w, h), flags=cv2.INTER_LINEAR) + gt = cv2.warpAffine(gt, rotation_matrix, (w, h), flags=cv2.INTER_NEAREST) + + return img, gt + + +def random_gaussian_blur(img): + gauss_size = random.choice([1, 3, 5, 7]) + if gauss_size > 1: + # do the gaussian blur + img = cv2.GaussianBlur(img, (gauss_size, gauss_size), 0) + + return img + + +def center_crop(img, shape): + h, w = shape[0], shape[1] + y = (img.shape[0] - h) // 2 + x = (img.shape[1] - w) // 2 + return img[y:y + h, x:x + w] + + +def random_crop(img, gt, size): + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + + h, w = img.shape[:2] + crop_h, crop_w = size[0], size[1] + + if h > crop_h: + x = random.randint(0, h - crop_h + 1) + img = img[x:x + crop_h, :, :] + gt = gt[x:x + crop_h, :] + + if w > crop_w: + x = random.randint(0, w - crop_w + 1) + img = img[:, x:x + crop_w, :] + gt = gt[:, x:x + crop_w] + + return img, gt + + +def normalize(img, mean, std): + # pytorch pretrained model need the input range: 0-1 + img = img.astype(np.float32) / 255.0 + img = img - mean + img = img / std + + return img diff --git a/CDARTS_segmentation/tools/utils/init_func.py b/CDARTS_segmentation/tools/utils/init_func.py new file mode 100644 index 0000000..e2bae28 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/init_func.py @@ -0,0 +1,67 @@ +import numpy as np +import json +import torch +import torch.nn as nn + + +def __init_weight(feature, conv_init, norm_layer, bn_eps, bn_momentum, + **kwargs): + for name, m in feature.named_modules(): + if isinstance(m, (nn.Conv2d, nn.Conv3d)): + conv_init(m.weight, **kwargs) + elif isinstance(m, norm_layer): + m.eps = bn_eps + m.momentum = bn_momentum + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + +def init_weights(m): + if type(m) == nn.Linear: + torch.nn.init.xavier_uniform(m.weight) + m.bias.data.fill_(0.01) + +def init_weight(module_list, conv_init, norm_layer, bn_eps, bn_momentum, + **kwargs): + if isinstance(module_list, list): + for feature in module_list: + __init_weight(feature, conv_init, norm_layer, bn_eps, bn_momentum, + **kwargs) + else: + __init_weight(module_list, conv_init, norm_layer, bn_eps, bn_momentum, + **kwargs) + + +def group_weight(weight_group, module, norm_layer, lr): + group_decay = [] + group_no_decay = [] + for m in module.modules(): + if isinstance(m, nn.Linear): + group_decay.append(m.weight) + if m.bias is not None: + group_no_decay.append(m.bias) + elif isinstance(m, (nn.Conv2d, nn.Conv3d)): + group_decay.append(m.weight) + if m.bias is not None: + group_no_decay.append(m.bias) + elif isinstance(m, norm_layer) or isinstance(m, nn.GroupNorm): + if m.weight is not None: + group_no_decay.append(m.weight) + if m.bias is not None: + group_no_decay.append(m.bias) + + assert len(list(module.parameters())) == len(group_decay) + len( + group_no_decay) + weight_group.append(dict(params=group_decay, lr=lr)) + weight_group.append(dict(params=group_no_decay, weight_decay=.0, lr=lr)) + return weight_group + +class NpEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return super(NpEncoder, self).default(obj) diff --git a/CDARTS_segmentation/tools/utils/lr_scheduler.py b/CDARTS_segmentation/tools/utils/lr_scheduler.py new file mode 100644 index 0000000..ba6b3e3 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/lr_scheduler.py @@ -0,0 +1,56 @@ +import math + + +class Iter_LR_Scheduler(object): + """Learning Rate Scheduler + Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}`` + Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))`` + Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9`` + Args: + args: + :attr:`args.lr_scheduler` lr scheduler mode (`cos`, `poly`), + :attr:`args.lr` base learning rate, :attr:`args.epochs` number of epochs, + :attr:`args.lr_step` + iters_per_epoch: number of iterations per epoch + """ + + def __init__(self, args, max_iteration, iters_per_epoch): + self.mode = args.mode + print('Using {} LR Scheduler!'.format(self.mode)) + self.lr = args.base_lr + self.lr_step = args.lr_step + self.iters_per_epoch = iters_per_epoch + self.max_iteration = max_iteration + self.epoch = -1 + self.warmup_iters = args.warmup_iters + self.min_lr = args.min_lr if args.min_lr is not None else 0 + self.warmup_start_lr = args.warmup_start_lr + self.warmup_factor = (self.lr / args.warmup_start_lr) ** (1. / args.warmup_iters) + + def __call__(self, optimizer, iteration): + if self.warmup_iters > 0 and iteration < self.warmup_iters: + lr = self.warmup_start_lr * (self.warmup_factor ** iteration) + elif self.mode == 'cos': + lr = 0.5 * self.lr * (1 + math.cos(1.0 * iteration / self.max_iteration * math.pi)) + elif self.mode == 'poly': + lr = self.lr * pow((1 - (iteration - self.warmup_iters) / (self.max_iteration - self.warmup_iters)), 0.9) + elif self.mode == 'step': # TODO: Fix the step mode + print('Warning! Now the step decline lr exists some issue') + if not self.lr_step: + raise NotImplementedError + epoch = iteration // self.iters_per_epoch + lr = self.lr * (0.1 ** (epoch // self.lr_step)) + else: + raise NotImplemented + # warm up lr schedule + if iteration == self.warmup_iters: + print('==> warmup done, start to implement poly lr strategy') + if (not iteration % self.iters_per_epoch) and (iteration // self.iters_per_epoch > self.epoch): + epoch = iteration // self.iters_per_epoch + print('\n=>Epoches %i, learning rate = %.4f' % (epoch, lr)) + self.epoch = epoch + + optimizer.param_groups[0]['lr'] = max(lr, self.min_lr) + + def get_lr(self, optimizer): + return optimizer.param_groups[0]['lr'] \ No newline at end of file diff --git a/CDARTS_segmentation/tools/utils/metrics.py b/CDARTS_segmentation/tools/utils/metrics.py new file mode 100644 index 0000000..ee235e3 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/metrics.py @@ -0,0 +1,46 @@ +import numpy as np + + +class Evaluator(object): + def __init__(self, num_class): + self.num_class = num_class + self.confusion_matrix = np.zeros((self.num_class,)*2) + + def Pixel_Accuracy(self): + Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum() + return Acc + + def Pixel_Accuracy_Class(self): + Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1) + Acc = np.nanmean(Acc) + return Acc + + def Mean_Intersection_over_Union(self): + MIoU = np.diag(self.confusion_matrix) / ( + np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) - + np.diag(self.confusion_matrix)) + MIoU = np.nanmean(MIoU) + return MIoU + + def Frequency_Weighted_Intersection_over_Union(self): + freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix) + iu = np.diag(self.confusion_matrix) / ( + np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) - + np.diag(self.confusion_matrix)) + + FWIoU = (freq[freq > 0] * iu[freq > 0]).sum() + return FWIoU + + def _generate_matrix(self, gt_image, pre_image): + mask = (gt_image >= 0) & (gt_image < self.num_class) + label = self.num_class * gt_image[mask].astype('int') + pre_image[mask] + count = np.bincount(label, minlength=self.num_class**2) + confusion_matrix = count.reshape(self.num_class, self.num_class) + return confusion_matrix + + def add_batch(self, gt_image, pre_image): + assert gt_image.shape == pre_image.shape + self.confusion_matrix += self._generate_matrix(gt_image, pre_image) + + def reset(self): + self.confusion_matrix = np.zeros((self.num_class,) * 2) \ No newline at end of file diff --git a/CDARTS_segmentation/tools/utils/pyt_utils.py b/CDARTS_segmentation/tools/utils/pyt_utils.py new file mode 100644 index 0000000..a5487b9 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/pyt_utils.py @@ -0,0 +1,291 @@ +# encoding: utf-8 +import os +import time +import numpy as np +import numba +import argparse +from collections import OrderedDict + +import torch +import torch.distributed as dist + +from engine.logger import get_logger + +logger = get_logger() +EPS = 1e-10 + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count if self.count != 0 else 0 + +def to_cuda(batch, device): + if type(batch) == torch.Tensor: + batch = batch.cuda(non_blocking=True) + elif type(batch) == dict: + for key in batch.keys(): + batch[key] = to_cuda(batch[key], device) + elif type(batch) == list: + for i in range(len(batch)): + batch[i] = to_cuda(batch[i], device) + return batch + +def get_loss_info_str(loss_meter_dict): + msg = '' + for key in loss_meter_dict.keys(): + msg += '{name}: {meter.val:.3e} ({meter.avg:.3e})\t'.format( + name=key, meter=loss_meter_dict[key] + ) + + return msg + +def reduce_tensor(tensor, dst=0, op=dist.ReduceOp.SUM, world_size=1): + tensor = tensor.clone() + dist.reduce(tensor, dst, op) + if dist.get_rank() == dst: + tensor.div_(world_size) + + return tensor + +def all_reduce_tensor(tensor, op=dist.ReduceOp.SUM, world_size=1): + tensor = tensor.clone() + dist.all_reduce(tensor, op) + tensor.div_(world_size) + + return tensor + + +def load_model(model, model_file, is_restore=False): + t_start = time.time() + if isinstance(model_file, str): + state_dict = torch.load(model_file) + if 'model' in state_dict.keys(): + state_dict = state_dict['model'] + else: + state_dict = model_file + t_ioend = time.time() + + if is_restore: + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = 'module.' + k + new_state_dict[name] = v + state_dict = new_state_dict + + model.load_state_dict(state_dict, strict=False) + ckpt_keys = set(state_dict.keys()) + own_keys = set(model.state_dict().keys()) + missing_keys = own_keys - ckpt_keys + unexpected_keys = ckpt_keys - own_keys + + if len(missing_keys) > 0: + logger.warning('Missing key(s) in state_dict: {}'.format( + ', '.join('{}'.format(k) for k in missing_keys))) + + if len(unexpected_keys) > 0: + logger.warning('Unexpected key(s) in state_dict: {}'.format( + ', '.join('{}'.format(k) for k in unexpected_keys))) + + del state_dict + t_end = time.time() + logger.info( + "Load model, Time usage:\n\tIO: {}, initialize parameters: {}".format( + t_ioend - t_start, t_end - t_ioend)) + + return model + + +def parse_devices(input_devices): + if input_devices.endswith('*'): + devices = list(range(torch.cuda.device_count())) + return devices + + devices = [] + for d in input_devices.split(','): + if '-' in d: + start_device, end_device = d.split('-')[0], d.split('-')[1] + assert start_device != '' + assert end_device != '' + start_device, end_device = int(start_device), int(end_device) + assert start_device < end_device + assert end_device < torch.cuda.device_count() + for sd in range(start_device, end_device + 1): + devices.append(sd) + else: + device = int(d) + assert device < torch.cuda.device_count() + devices.append(device) + + logger.info('using devices {}'.format( + ', '.join([str(d) for d in devices]))) + + return devices + + +def extant_file(x): + """ + 'Type' for argparse - checks that file exists but does not open. + """ + if not os.path.exists(x): + # Argparse uses the ArgumentTypeError to give a rejection message like: + # error: argument input: x does not exist + raise argparse.ArgumentTypeError("{0} does not exist".format(x)) + return x + + +def link_file(src, target): + if os.path.isdir(target) or os.path.isfile(target): + os.remove(target) + os.system('ln -s {} {}'.format(src, target)) + + +def ensure_dir(path): + if not os.path.isdir(path): + os.makedirs(path) + + +def _dbg_interactive(var, value): + from IPython import embed + embed() + + +def check_keys(model, pretrained_state_dict): + ckpt_keys = set(pretrained_state_dict.keys()) + model_keys = set(model.state_dict().keys()) + used_pretrained_keys = model_keys & ckpt_keys + unused_pretrained_keys = ckpt_keys - model_keys + missing_keys = model_keys - ckpt_keys + + print('missing keys:{}'.format(missing_keys)) + print('unused checkpoint keys:{}'.format(unused_pretrained_keys)) + # print('used keys:{}'.format(used_pretrained_keys)) + assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' + return True + +def remove_prefix(state_dict, prefix): + ''' + Old style model is stored with all names of parameters share common prefix 'module.' + ''' + print('remove prefix \'{}\''.format(prefix)) + f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x + return {f(key): value for key, value in state_dict.items()} + +def load_pretrain(model, pretrained_path): + print('load pretrained model from {}'.format(pretrained_path)) + + device = torch.cuda.current_device() + pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device)) + + if "state_dict" in pretrained_dict.keys(): + pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.') + else: + pretrained_dict = remove_prefix(pretrained_dict, 'module.') + check_keys(model, pretrained_dict) + model.load_state_dict(pretrained_dict, strict=False) + return model + +def nanmean(x): + """Computes the arithmetic mean ignoring any NaNs.""" + return torch.mean(x[x == x]) + +# computes confusion matrix +def _fast_hist(true, pred, num_classes): + mask = (true >= 0) & (true < num_classes) + hist = torch.bincount( + num_classes * true[mask] + pred[mask], + minlength=num_classes ** 2, + ).reshape(num_classes, num_classes).float() + return hist + +def compute_hist(pred, lb, n_classes, ignore_label): + n_classes = n_classes + keep = torch.logical_not(lb == ignore_label) + merge = pred[keep] * n_classes + lb[keep] + hist = torch.bincount(merge, minlength=n_classes ** 2) + hist = hist.reshape((n_classes, n_classes)).float() + return hist + +@numba.jit +def compute_hist_np(pred, lb, n_classes, ignore_label): + n_classes = n_classes + keep = np.logical_not(lb == ignore_label) + merge = pred[keep] * n_classes + lb[keep] + hist = np.bincount(merge, minlength=n_classes ** 2) + hist = hist.reshape((n_classes, n_classes)) + return hist + +# computes IoU based on confusion matrix +def jaccard_index(hist): + """Computes the Jaccard index, a.k.a the Intersection over Union (IoU). + Args: + hist: confusion matrix. + Returns: + avg_jacc: the average per-class jaccard index. + """ + A_inter_B = torch.diag(hist) + A = hist.sum(dim=1) + B = hist.sum(dim=0) + jaccard = A_inter_B / (A + B - A_inter_B + EPS) + avg_jacc = nanmean(jaccard) #the mean of jaccard without NaNs + return avg_jacc, jaccard + +def check_keys(model, pretrained_state_dict): + ckpt_keys = set(pretrained_state_dict.keys()) + model_keys = set(model.state_dict().keys()) + used_pretrained_keys = model_keys & ckpt_keys + unused_pretrained_keys = ckpt_keys - model_keys + missing_keys = model_keys - ckpt_keys + + print('missing keys:{}'.format(missing_keys)) + print('unused checkpoint keys:{}'.format(unused_pretrained_keys)) + # print('used keys:{}'.format(used_pretrained_keys)) + assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' + return True + +def remove_prefix(state_dict, prefix): + ''' + Old style model is stored with all names of parameters share common prefix 'module.' + ''' + print('remove prefix \'{}\''.format(prefix)) + f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x + return {f(key): value for key, value in state_dict.items()} + +def load_pretrain(model, pretrained_path): + print('load pretrained model from {}'.format(pretrained_path)) + + device = torch.cuda.current_device() + pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device)) + if "state_dict" in pretrained_dict.keys(): + pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.') + # new_dict = {} + # for k in pretrained_dict.keys(): + # if "heads" in k: + # continue + # else: + # new_dict[k] = pretrained_dict[k] + # pretrained_dict = new_dict + else: + pretrained_dict = remove_prefix(pretrained_dict, 'module.') + check_keys(model, pretrained_dict) + model.load_state_dict(pretrained_dict, strict=False) + return model \ No newline at end of file diff --git a/CDARTS_segmentation/tools/utils/visualize.py b/CDARTS_segmentation/tools/utils/visualize.py new file mode 100644 index 0000000..08eed25 --- /dev/null +++ b/CDARTS_segmentation/tools/utils/visualize.py @@ -0,0 +1,89 @@ +import numpy as np +import cv2 +import scipy.io as sio + + +def set_img_color(colors, background, img, gt, show255=False, weight_foreground=0.55): + origin = np.array(img) + for i in range(len(colors)): + if i != background: + img[np.where(gt == i)] = colors[i] + if show255: + img[np.where(gt == 255)] = 0 + cv2.addWeighted(img, weight_foreground, origin, (1 - weight_foreground), 0, img) + return img + + +def show_prediction(colors, background, img, pred, weight_foreground=1): + im = np.array(img, np.uint8) + set_img_color(colors, background, im, pred, weight_foreground=weight_foreground) + final = np.array(im) + return final + + +def show_img(colors, background, img, clean, gt, *pds): + im1 = np.array(img, np.uint8) + # set_img_color(colors, background, im1, clean) + final = np.array(im1) + # the pivot black bar + pivot = np.zeros((im1.shape[0], 15, 3), dtype=np.uint8) + for pd in pds: + im = np.array(img, np.uint8) + # pd[np.where(gt == 255)] = 255 + set_img_color(colors, background, im, pd) + final = np.column_stack((final, pivot)) + final = np.column_stack((final, im)) + + im = np.array(img, np.uint8) + set_img_color(colors, background, im, gt, True) + final = np.column_stack((final, pivot)) + final = np.column_stack((final, im)) + return final + + +def get_colors(class_num): + colors = [] + for i in range(class_num): + colors.append((np.random.random((1, 3)) * 255).tolist()[0]) + + return colors + + +def get_ade_colors(): + colors = sio.loadmat('./color150.mat')['colors'] + colors = colors[:, ::-1, ] + colors = np.array(colors).astype(int).tolist() + colors.insert(0, [0, 0, 0]) + + return colors + + +def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False, + no_print=False): + n = iu.size + lines = [] + for i in range(n): + if class_names is None: + cls = 'Class %d:' % (i + 1) + else: + cls = '%d %s' % (i + 1, class_names[i]) + lines.append('%-8s\t%.3f%%' % (cls, iu[i] * 100)) + mean_IU = np.nanmean(iu) + # mean_IU_no_back = np.nanmean(iu[1:]) + mean_IU_no_back = np.nanmean(iu[:-1]) + if show_no_back: + lines.append( + '---------------------------- %-8s\t%.3f%%\t%-8s\t%.3f%%\t%-8s\t%.3f%%' % ( + 'mean_IU', mean_IU * 100, 'mean_IU_no_back', + mean_IU_no_back * 100, + 'mean_pixel_ACC', mean_pixel_acc * 100)) + else: + print(mean_pixel_acc) + lines.append( + '---------------------------- %-8s\t%.3f%%\t%-8s\t%.3f%%' % ( + 'mean_IU', mean_IU * 100, 'mean_pixel_ACC', + mean_pixel_acc * 100)) + line = "\n".join(lines) + if not no_print: + print(line) + return line diff --git a/CDARTS_segmentation/tools/vis/panoptic_coco_categories.json b/CDARTS_segmentation/tools/vis/panoptic_coco_categories.json new file mode 100644 index 0000000..673a19e --- /dev/null +++ b/CDARTS_segmentation/tools/vis/panoptic_coco_categories.json @@ -0,0 +1 @@ +[{"supercategory": "person", "color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, {"supercategory": "vehicle", "color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, {"supercategory": "vehicle", "color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, {"supercategory": "vehicle", "color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, {"supercategory": "vehicle", "color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, {"supercategory": "vehicle", "color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, {"supercategory": "vehicle", "color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, {"supercategory": "vehicle", "color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, {"supercategory": "vehicle", "color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, {"supercategory": "outdoor", "color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, {"supercategory": "outdoor", "color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, {"supercategory": "outdoor", "color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, {"supercategory": "outdoor", "color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, {"supercategory": "outdoor", "color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, {"supercategory": "animal", "color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, {"supercategory": "animal", "color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, {"supercategory": "animal", "color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, {"supercategory": "animal", "color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, {"supercategory": "animal", "color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, {"supercategory": "animal", "color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, {"supercategory": "animal", "color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, {"supercategory": "animal", "color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, {"supercategory": "animal", "color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, {"supercategory": "animal", "color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, {"supercategory": "accessory", "color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, {"supercategory": "accessory", "color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, {"supercategory": "accessory", "color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, {"supercategory": "accessory", "color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, {"supercategory": "accessory", "color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, {"supercategory": "sports", "color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, {"supercategory": "sports", "color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, {"supercategory": "sports", "color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, {"supercategory": "sports", "color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, {"supercategory": "sports", "color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, {"supercategory": "sports", "color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, {"supercategory": "sports", "color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, {"supercategory": "sports", "color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, {"supercategory": "sports", "color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, {"supercategory": "sports", "color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, {"supercategory": "kitchen", "color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, {"supercategory": "kitchen", "color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, {"supercategory": "kitchen", "color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, {"supercategory": "kitchen", "color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, {"supercategory": "kitchen", "color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, {"supercategory": "kitchen", "color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, {"supercategory": "kitchen", "color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, {"supercategory": "food", "color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, {"supercategory": "food", "color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, {"supercategory": "food", "color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, {"supercategory": "food", "color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, {"supercategory": "food", "color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, {"supercategory": "food", "color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, {"supercategory": "food", "color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, {"supercategory": "food", "color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, {"supercategory": "food", "color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, {"supercategory": "food", "color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, {"supercategory": "furniture", "color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, {"supercategory": "furniture", "color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, {"supercategory": "furniture", "color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, {"supercategory": "furniture", "color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, {"supercategory": "furniture", "color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, {"supercategory": "furniture", "color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, {"supercategory": "electronic", "color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, {"supercategory": "electronic", "color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, {"supercategory": "electronic", "color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, {"supercategory": "electronic", "color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, {"supercategory": "electronic", "color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, {"supercategory": "electronic", "color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, {"supercategory": "appliance", "color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, {"supercategory": "appliance", "color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, {"supercategory": "appliance", "color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, {"supercategory": "appliance", "color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, {"supercategory": "appliance", "color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, {"supercategory": "indoor", "color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, {"supercategory": "indoor", "color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, {"supercategory": "indoor", "color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, {"supercategory": "indoor", "color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, {"supercategory": "indoor", "color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, {"supercategory": "indoor", "color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, {"supercategory": "indoor", "color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, {"supercategory": "textile", "color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, {"supercategory": "textile", "color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, {"supercategory": "building", "color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, {"supercategory": "raw-material", "color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, {"supercategory": "furniture-stuff", "color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, {"supercategory": "textile", "color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, {"supercategory": "furniture-stuff", "color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, {"supercategory": "floor", "color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, {"supercategory": "plant", "color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, {"supercategory": "food-stuff", "color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, {"supercategory": "ground", "color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, {"supercategory": "building", "color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, {"supercategory": "furniture-stuff", "color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, {"supercategory": "furniture-stuff", "color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, {"supercategory": "structural", "color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, {"supercategory": "textile", "color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, {"supercategory": "ground", "color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, {"supercategory": "ground", "color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, {"supercategory": "ground", "color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, {"supercategory": "water", "color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, {"supercategory": "ground", "color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, {"supercategory": "building", "color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, {"supercategory": "ground", "color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, {"supercategory": "water", "color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, {"supercategory": "furniture-stuff", "color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, {"supercategory": "ground", "color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, {"supercategory": "furniture-stuff", "color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, {"supercategory": "building", "color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, {"supercategory": "textile", "color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, {"supercategory": "wall", "color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, {"supercategory": "wall", "color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, {"supercategory": "wall", "color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, {"supercategory": "wall", "color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, {"supercategory": "water", "color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, {"supercategory": "window", "color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, {"supercategory": "window", "color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, {"supercategory": "plant", "color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, {"supercategory": "structural", "color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, {"supercategory": "ceiling", "color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, {"supercategory": "sky", "color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, {"supercategory": "furniture-stuff", "color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, {"supercategory": "furniture-stuff", "color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, {"supercategory": "floor", "color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, {"supercategory": "ground", "color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, {"supercategory": "solid", "color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, {"supercategory": "plant", "color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, {"supercategory": "ground", "color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, {"supercategory": "raw-material", "color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, {"supercategory": "food-stuff", "color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, {"supercategory": "building", "color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, {"supercategory": "solid", "color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, {"supercategory": "wall", "color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, {"supercategory": "textile", "color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}] diff --git a/CDARTS_segmentation/tools/vis/vis_cityscapes.py b/CDARTS_segmentation/tools/vis/vis_cityscapes.py new file mode 100644 index 0000000..583d0c6 --- /dev/null +++ b/CDARTS_segmentation/tools/vis/vis_cityscapes.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python2 +''' +Visualization demo for panoptic COCO sample_data +The code shows an example of color generation for panoptic data (with +"generate_new_colors" set to True). For each segment distinct color is used in +a way that it close to the color of corresponding semantic class. +''' +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals +import os, sys +import numpy as np +import json +import cv2 +import os + +import PIL.Image as Image +import matplotlib.pyplot as plt +from skimage.segmentation import find_boundaries + +from panopticapi.utils import IdGenerator, rgb2id + +# whether from the PNG are used or new colors are generated +generate_new_colors = True + +json_file = './panoptic_cityscapes_mul2/panoptic/predictions.json' +segmentations_folder = './panoptic_cityscapes_mul2/panoptic/predictions/' +img_folder = '/home2/hongyuan/data/cityscapes/leftImg8bit/val/' +panoptic_coco_categories = './panoptic_coco_categories.json' +output_dir = 'cityscapes_vis_results' + +os.makedirs(output_dir, exist_ok=True) + +with open(json_file, 'r') as f: + coco_d = json.load(f) + +# ann = np.random.choice(coco_d['annotations']) + +with open(panoptic_coco_categories, 'r') as f: + categories_list = json.load(f) +categegories = {category['id']: category for category in categories_list} + +# find input img that correspond to the annotation +img = None +# for image_info in coco_d['images']: +for image_info in coco_d['images']: + for ann in coco_d['annotations']: + if image_info['id'] == ann['image_id']: + try: + img = np.array( + Image.open(os.path.join(img_folder, image_info['file_name'].split('_')[0], image_info['file_name'].split('gtFine_leftImg8bit.png')[0]+'leftImg8bit.png')) + ) + except: + print("Undable to find correspoding input image.") + break + + segmentation = np.array( + Image.open(os.path.join(segmentations_folder, ann['file_name'])), + dtype=np.uint8 + ) + segmentation_id = rgb2id(segmentation) + # find segments boundaries + boundaries = find_boundaries(segmentation_id, mode='thick') + + if generate_new_colors: + segmentation[:, :, :] = 0 + color_generator = IdGenerator(categegories) + for segment_info in ann['segments_info']: + try: + color = color_generator.get_color(segment_info['category_id']) + mask = segmentation_id == segment_info['id'] + segmentation[mask] = color + except: + pass + + # depict boundaries + segmentation[boundaries] = [0, 0, 0] + if img.shape[:2] == segmentation.shape[:2]: + pass + else: + print('img: {} shape error! img shape: {} seg shape: {}'.format(ann['image_id'], img.shape[:2], segmentation.shape[:2])) + continue + + if len(img.shape) == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + try: + segmentation = cv2.addWeighted(img, 0.6, segmentation, 0.4, 0) + except: + import pdb; pdb.set_trace() + cv2.imwrite(os.path.join(output_dir, '{}.jpg').format(ann['image_id']), img[:, :, ::-1]) + cv2.imwrite(os.path.join(output_dir, '{}_mask.jpg').format(ann['image_id']), segmentation[:, :, ::-1]) + #if img is None: + # plt.figure() + # plt.imshow(segmentation) + # plt.axis('off') + #else: + # plt.figure(figsize=(9, 5)) + # plt.subplot(121) + # plt.imshow(img) + # plt.axis('off') + # plt.subplot(122) + # plt.imshow(segmentation) + # plt.axis('off') + # plt.tight_layout() + #plt.show() diff --git a/CDARTS_segmentation/tools/vis/vis_coco.py b/CDARTS_segmentation/tools/vis/vis_coco.py new file mode 100644 index 0000000..c747395 --- /dev/null +++ b/CDARTS_segmentation/tools/vis/vis_coco.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python2 +''' +Visualization demo for panoptic COCO sample_data +The code shows an example of color generation for panoptic data (with +"generate_new_colors" set to True). For each segment distinct color is used in +a way that it close to the color of corresponding semantic class. +''' +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals +import os, sys +import numpy as np +import json +import cv2 +import os + +import PIL.Image as Image +import matplotlib.pyplot as plt +from skimage.segmentation import find_boundaries + +from panopticapi.utils import IdGenerator, rgb2id + +# whether from the PNG are used or new colors are generated +generate_new_colors = True + +json_file = './panoptic_coco_mul2/panoptic/predictions.json' +segmentations_folder = './panoptic_coco_mul2/panoptic/predictions/' +img_folder = '/home2/hongyuan/data/coco2017/val2017/' +panoptic_coco_categories = './panoptic_coco_categories.json' +output_dir = 'coco_vis_results' + +os.makedirs(output_dir, exist_ok=True) + +with open(json_file, 'r') as f: + coco_d = json.load(f) + +# ann = np.random.choice(coco_d['annotations']) + +with open(panoptic_coco_categories, 'r') as f: + categories_list = json.load(f) +categegories = {category['id']: category for category in categories_list} + +# find input img that correspond to the annotation +img = None +# for image_info in coco_d['images']: +for image_info in coco_d['images']: + for ann in coco_d['annotations']: + if image_info['id'] == ann['image_id']: + try: + img = np.array( + Image.open(os.path.join(img_folder, image_info['file_name'])) + ) + except: + print("Undable to find correspoding input image.") + break + + segmentation = np.array( + Image.open(os.path.join(segmentations_folder, ann['file_name'])), + dtype=np.uint8 + ) + segmentation_id = rgb2id(segmentation) + # find segments boundaries + boundaries = find_boundaries(segmentation_id, mode='thick') + + if generate_new_colors: + segmentation[:, :, :] = 0 + color_generator = IdGenerator(categegories) + for segment_info in ann['segments_info']: + color = color_generator.get_color(segment_info['category_id']) + mask = segmentation_id == segment_info['id'] + segmentation[mask] = color + + # depict boundaries + segmentation[boundaries] = [0, 0, 0] + if img.shape[:2] == segmentation.shape[:2]: + pass + else: + print('img: {} shape error! img shape: {} seg shape: {}'.format(ann['image_id'], img.shape[:2], segmentation.shape[:2])) + continue + + if len(img.shape) == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + try: + segmentation = cv2.addWeighted(img, 0.6, segmentation, 0.4, 0) + except: + import pdb; pdb.set_trace() + cv2.imwrite(os.path.join(output_dir, '{}.jpg').format(ann['image_id']), img[:, :, ::-1]) + cv2.imwrite(os.path.join(output_dir, '{}_mask.jpg').format(ann['image_id']), segmentation[:, :, ::-1]) + #if img is None: + # plt.figure() + # plt.imshow(segmentation) + # plt.axis('off') + #else: + # plt.figure(figsize=(9, 5)) + # plt.subplot(121) + # plt.imshow(img) + # plt.axis('off') + # plt.subplot(122) + # plt.imshow(segmentation) + # plt.axis('off') + # plt.tight_layout() + #plt.show() diff --git a/CDARTS_segmentation/train/_init_paths.py b/CDARTS_segmentation/train/_init_paths.py new file mode 100644 index 0000000..d823798 --- /dev/null +++ b/CDARTS_segmentation/train/_init_paths.py @@ -0,0 +1,19 @@ +# ------------------------------------------------------------------------------ +# Adds `segmentation` package into Python path. +# Written by Bowen Cheng (bcheng9@illinois.edu) +# ------------------------------------------------------------------------------ + +import os.path as osp +import sys + + +def add_path(path): + if path not in sys.path: + sys.path.insert(0, path) + + +this_dir = osp.dirname(__file__) +lib_path = osp.join(this_dir, '..') +add_path(lib_path) +add_path(this_dir) +add_path(osp.join(lib_path, 'tools')) diff --git a/CDARTS_segmentation/train/att_sa.py b/CDARTS_segmentation/train/att_sa.py new file mode 100644 index 0000000..6761d5b --- /dev/null +++ b/CDARTS_segmentation/train/att_sa.py @@ -0,0 +1,231 @@ + +import torch +import math +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +from torch import nn, einsum + +from einops import rearrange + + +def pair(x): + return (x, x) if not isinstance(x, tuple) else x + +def expand_dim(t, dim, k): + t = t.unsqueeze(dim = dim) + expand_shape = [-1] * len(t.shape) + expand_shape[dim] = k + return t.expand(*expand_shape) + +def rel_to_abs(x): + b, h, l, _, device, dtype = *x.shape, x.device, x.dtype + dd = {'device': device, 'dtype': dtype} + col_pad = torch.zeros((b, h, l, 1), **dd) + x = torch.cat((x, col_pad), dim = 3) + flat_x = rearrange(x, 'b h l c -> b h (l c)') + flat_pad = torch.zeros((b, h, l - 1), **dd) + flat_x_padded = torch.cat((flat_x, flat_pad), dim = 2) + final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1) + final_x = final_x[:, :, :l, (l-1):] + return final_x + +def relative_logits_1d(q, rel_k): + b, heads, h, w, dim = q.shape + logits = einsum('b h x y d, r d -> b h x y r', q, rel_k) + logits = rearrange(logits, 'b h x y r -> b (h x) y r') + logits = rel_to_abs(logits) + logits = logits.reshape(b, heads, h, w, w) + logits = expand_dim(logits, dim = 3, k = h) + return logits + +# positional embeddings + +class AbsPosEmb(nn.Module): + def __init__( + self, + fmap_size, + dim_head + ): + super().__init__() + height, width = pair(fmap_size) + scale = dim_head ** -0.5 + self.height = nn.Parameter(torch.randn(height, dim_head) * scale) + self.width = nn.Parameter(torch.randn(width, dim_head) * scale) + + def forward(self, q): + emb = rearrange(self.height, 'h d -> h () d') + rearrange(self.width, 'w d -> () w d') + emb = rearrange(emb, ' h w d -> (h w) d') + logits = einsum('b h i d, j d -> b h i j', q, emb) + return logits + +class RelPosEmb(nn.Module): + def __init__( + self, + fmap_size, + dim_head + ): + super().__init__() + height, width = pair(fmap_size) + scale = dim_head ** -0.5 + self.fmap_size = fmap_size + self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale) + self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale) + + def forward(self, q): + h, w = self.fmap_size + + q = rearrange(q, 'b h (x y) d -> b h x y d', x = h, y = w) + rel_logits_w = relative_logits_1d(q, self.rel_width) + rel_logits_w = rearrange(rel_logits_w, 'b h x i y j-> b h (x y) (i j)') + + q = rearrange(q, 'b h x y d -> b h y x d') + rel_logits_h = relative_logits_1d(q, self.rel_height) + rel_logits_h = rearrange(rel_logits_h, 'b h x i y j -> b h (y x) (j i)') + return rel_logits_w + rel_logits_h + +# classes + +class Attention(nn.Module): + def __init__( + self, + *, + dim, + fmap_size, + heads = 4, + dim_head = 128, + rel_pos_emb = False + ): + super().__init__() + self.heads = heads + self.scale = dim_head ** -0.5 + inner_dim = heads * dim_head + + self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False) + + rel_pos_class = AbsPosEmb if not rel_pos_emb else RelPosEmb + self.pos_emb = rel_pos_class(fmap_size, dim_head) + + def forward(self, fmap): + heads, b, c, h, w = self.heads, *fmap.shape + + q, k, v = self.to_qkv(fmap).chunk(3, dim = 1) + q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v)) + + q *= self.scale + + sim = einsum('b h i d, b h j d -> b h i j', q, k) + sim += self.pos_emb(q) + + attn = sim.softmax(dim = -1) + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w) + return out + +class Self_Attn(nn.Module): + def __init__( + self, + *, + dim, + fmap_size, + dim_out, + proj_factor, + downsample, + heads = 4, + dim_head = 128, + rel_pos_emb = False, + activation = nn.ReLU(inplace=True) + ): + super().__init__() + + # shortcut + proj_factor = 1 + self.stride = 2 if downsample else 1 + + if dim != dim_out or downsample: + kernel_size, stride, padding = (3, 2, 1) if downsample else (1, 1, 0) + + self.shortcut = nn.Sequential( + nn.Conv2d(dim, dim_out, kernel_size, stride = stride, padding = padding, bias = False), + nn.BatchNorm2d(dim_out), + activation + ) + else: + self.shortcut = nn.Identity() + + # contraction and expansion + + attn_dim_in = dim_out // proj_factor + # attn_dim_out = heads * dim_head + attn_dim_out = attn_dim_in + + self.net = nn.Sequential( + nn.Conv2d(dim, attn_dim_in, 1, bias = False), + nn.BatchNorm2d(attn_dim_in), + activation, + ATT(attn_dim_in), + # Attention( + # dim = attn_dim_in, + # fmap_size = fmap_size, + # heads = heads, + # dim_head = dim_head, + # rel_pos_emb = rel_pos_emb + # ), + nn.AvgPool2d((2, 2)) if downsample else nn.Identity(), + nn.BatchNorm2d(attn_dim_out), + activation, + nn.Conv2d(attn_dim_out, dim_out, 1, bias = False), + nn.BatchNorm2d(dim_out) + ) + + # init last batch norm gamma to zero + + nn.init.zeros_(self.net[-1].weight) + + # final activation + + self.activation = activation + + def forward(self, x): + shortcut = self.shortcut(x) + out = F.interpolate(x, size=(int(x.size(2))//2, int(x.size(3))//2), mode='bilinear', align_corners=True) + out = self.net(out) + if self.stride == 1: + out = F.interpolate(out, size=(int(x.size(2)), int(x.size(3))), mode='bilinear', align_corners=True) + out += shortcut + return self.activation(out) + +class ATT(nn.Module): + """ Self attention Layer""" + def __init__(self, in_dim): + super(ATT, self).__init__() + self.chanel_in = in_dim + + self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1) + self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1) + self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1) + self.gamma = nn.Parameter(torch.zeros(1)) + + self.softmax = nn.Softmax(dim=-1) # + def forward(self,x): + """ + inputs : + x : input feature maps( B X C X W X H) + returns : + out : self attention value + input feature + attention: B X N X N (N is Width*Height) + """ + m_batchsize, C, width, height = x.size() + proj_query = self.query_conv(x).view(m_batchsize, -1,width*height).permute(0,2,1) # B X CX(N) + proj_key = self.key_conv(x).view(m_batchsize,-1, width*height) # B X C x (*W*H) + energy = torch.bmm(proj_query, proj_key) # transpose check + attention = self.softmax(energy) # BX (N) X (N) + proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N + + out = torch.bmm(proj_value, attention.permute(0,2,1) ) + out = out.view(m_batchsize, C, width,height) + + out = self.gamma*out + x + return out \ No newline at end of file diff --git a/CDARTS_segmentation/train/builder.py b/CDARTS_segmentation/train/builder.py new file mode 100644 index 0000000..028463f --- /dev/null +++ b/CDARTS_segmentation/train/builder.py @@ -0,0 +1,872 @@ +import torch +import logging +import math +import re +from collections.__init__ import OrderedDict +from copy import deepcopy +from typing import Tuple, Optional, List + +import torch.nn as nn +import numpy as np +from functools import partial +from itertools import repeat +from torch._six import container_abcs +# from timm.models.efficientnet_blocks import * + + +def swish(x, inplace: bool = False): + """Swish - Described in: https://arxiv.org/abs/1710.05941 + """ + return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) + +class Swish(nn.Module): + def __init__(self, inplace: bool = False): + super(Swish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return swish(x, self.inplace) + +class HardSwish(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSwish, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_swish(x, self.inplace) + +def _ntuple(n): + def parse(x): + if isinstance(x, container_abcs.Iterable): + return x + return tuple(repeat(x, n)) + return parse + +def get_same_padding(x: int, k: int, s: int, d: int): + return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) + +def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): + ih, iw = x.size()[-2:] + pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) + return x + +def conv2d_same( + x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + +tup_pair = _ntuple(2) + +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic + +class CondConv2d(nn.Module): + """ Conditionally Parameterized Convolution + Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py + Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: + https://github.com/pytorch/pytorch/issues/17983 + """ + __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): + super(CondConv2d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = tup_pair(kernel_size) + self.stride = tup_pair(stride) + padding_val, is_padding_dynamic = get_padding_value( + padding, kernel_size, stride=stride, dilation=dilation) + self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript + self.padding = tup_pair(padding_val) + self.dilation = tup_pair(dilation) + self.groups = groups + self.num_experts = num_experts + + self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight_num_param = 1 + for wd in self.weight_shape: + weight_num_param *= wd + self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) + + if bias: + self.bias_shape = (self.out_channels,) + self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self): + init_weight = get_condconv_initializer( + partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) + init_weight(self.weight) + if self.bias is not None: + fan_in = np.prod(self.weight_shape[1:]) + bound = 1 / math.sqrt(fan_in) + init_bias = get_condconv_initializer( + partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) + init_bias(self.bias) + + def forward(self, x, routing_weights): + B, C, H, W = x.shape + weight = torch.matmul(routing_weights, self.weight) + new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size + weight = weight.view(new_weight_shape) + bias = None + if self.bias is not None: + bias = torch.matmul(routing_weights, self.bias) + bias = bias.view(B * self.out_channels) + # move batch elements with channels so each batch element can be efficiently convolved with separate kernel + x = x.view(1, B * C, H, W) + if self.dynamic_padding: + out = conv2d_same( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + else: + out = F.conv2d( + x, weight, bias, stride=self.stride, padding=self.padding, + dilation=self.dilation, groups=self.groups * B) + out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) + + # Literal port (from TF definition) + # x = torch.split(x, 1, 0) + # weight = torch.split(weight, 1, 0) + # if self.bias is not None: + # bias = torch.matmul(routing_weights, self.bias) + # bias = torch.split(bias, 1, 0) + # else: + # bias = [None] * B + # out = [] + # for xi, wi, bi in zip(x, weight, bias): + # wi = wi.view(*self.weight_shape) + # if bi is not None: + # bi = bi.view(*self.bias_shape) + # out.append(self.conv_fn( + # xi, wi, bi, stride=self.stride, padding=self.padding, + # dilation=self.dilation, groups=self.groups)) + # out = torch.cat(out, 0) + return out + +def get_condconv_initializer(initializer, num_experts, expert_shape): + def condconv_initializer(weight): + """CondConv initializer function.""" + num_params = np.prod(expert_shape) + if (len(weight.shape) != 2 or weight.shape[0] != num_experts or + weight.shape[1] != num_params): + raise (ValueError( + 'CondConv variables must have shape [num_experts, num_params]')) + for i in range(num_experts): + initializer(weight[i].view(expert_shape)) + return condconv_initializer + +def resolve_bn_args(kwargs): + bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + +def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + channels *= multiplier + return make_divisible(channels, divisor, channel_min) + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + +def make_divisible(v, divisor=8, min_value=None): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion + (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. + """ + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + pw_kernel_size=1, pw_act=False, se_ratio=0., se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + norm_kwargs = norm_kwargs or {} + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.drop_path_rate = drop_path_rate + + self.conv_dw = create_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True) + self.bn1 = norm_layer(in_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = norm_layer(out_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() + + def feature_info(self, location): + if location == 'expansion': + # no expansion in this block, use depthwise, before SE + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pw.out_channels) + return info + + def forward(self, x): + residual = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act1(x) + + if self.se is not None: + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + x = self.act2(x) + + if self.has_residual: + x += residual + return x + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE and CondConv routing""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, + conv_kwargs=None, drop_path_rate=0.): + super(InvertedResidual, self).__init__() + norm_kwargs = norm_kwargs or {} + conv_kwargs = conv_kwargs or {} + mid_chs = make_divisible(in_chs * exp_ratio) + has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.drop_path_rate = drop_path_rate + + # Point-wise expansion + self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) + self.bn1 = norm_layer(mid_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + # Depth-wise convolution + self.conv_dw = create_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, + padding=pad_type, depthwise=True, **conv_kwargs) + self.bn2 = norm_layer(mid_chs, **norm_kwargs) + self.act2 = act_layer(inplace=True) + + # Squeeze-and-excitation + if has_se: + se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) + self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) + else: + self.se = None + + # Point-wise linear projection + self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) + self.bn3 = norm_layer(out_chs, **norm_kwargs) + + def feature_info(self, location): + if location == 'expansion': + info = dict(module='act1', hook_type='forward', num_chs=self.conv_pw.in_channels) + elif location == 'depthwise': # after SE + info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels) + return info + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act1(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act2(x) + + # Squeeze-and-excitation + if self.se is not None: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + x += residual + + return x + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + +def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m + +def resolve_se_args(kwargs, in_chs, act_layer=None): + se_kwargs = kwargs.copy() if kwargs is not None else {} + # fill in args that aren't specified with the defaults + for k, v in _SE_ARGS_DEFAULT.items(): + se_kwargs.setdefault(k, v) + # some models, like MobilNetV3, calculate SE reduction chs from the containing block's mid_ch instead of in_ch + if not se_kwargs.pop('reduce_mid'): + se_kwargs['reduced_base_chs'] = in_chs + # act_layer override, if it remains None, the containing block's act_layer will be used + if se_kwargs['act_layer'] is None: + assert act_layer is not None + se_kwargs['act_layer'] = act_layer + return se_kwargs + +def sigmoid(x, inplace: bool = False): + return x.sigmoid_() if inplace else x.sigmoid() + +_SE_ARGS_DEFAULT = dict( + gate_fn=sigmoid, + act_layer=None, + reduce_mid=False, + divisor=1) + +def _decode_block_str(block_str): + """ Decode block definition string + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = nn.ReLU + elif v == 'r6': + value = nn.ReLU6 + elif v == 'hs': + value = HardSwish + elif v == 'sw': + value = Swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_layer is None, the model default (passed to model init) will be used + act_layer = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + fake_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + if 'cc' in options: + block_args['num_experts'] = int(options['cc']) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'er': + block_args = dict( + block_type=block_type, + exp_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + fake_in_chs=fake_in_chs, + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_layer=act_layer, + noskip=noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_layer=act_layer, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + return block_args, num_repeat + +class SqueezeExcite(nn.Module): + def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, + act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_): + super(SqueezeExcite, self).__init__() + self.gate_fn = gate_fn + reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor) + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) + self.act1 = act_layer(inplace=True) + self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) + + def forward(self, x): + x_se = self.avg_pool(x) + x_se = self.conv_reduce(x_se) + x_se = self.act1(x_se) + x_se = self.conv_expand(x_se) + x = x * self.gate_fn(x_se) + return x + +class Sigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(Sigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return x.sigmoid_() if self.inplace else x.sigmoid() + +class ConvBnAct(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, + stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, + norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(ConvBnAct, self).__init__() + norm_kwargs = norm_kwargs or {} + self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type) + self.bn1 = norm_layer(out_chs, **norm_kwargs) + self.act1 = act_layer(inplace=True) + + def feature_info(self, location): + if location == 'expansion' or location == 'depthwise': + # no expansion or depthwise this block, use act after conv + info = dict(module='act1', hook_type='forward', num_chs=self.conv.out_channels) + else: # location == 'bottleneck' + info = dict(module='', hook_type='', num_chs=self.conv.out_channels) + return info + + def forward(self, x): + x = self.conv(x) + x = self.bn1(x) + x = self.act1(x) + return x + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + +def modify_block_args(block_args, kernel_size, exp_ratio): + # kernel_size: 3,5,7 + # exp_ratio: 4,6 + block_type = block_args['block_type'] + # each type of block has different valid arguments, fill accordingly + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + +def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): + """ Per-stage depth scaling + Scales the block repeats in each stage. This depth scaling impl maintains + compatibility with the EfficientNet scaling method, while allowing sensible + scaling for other models that may have multiple block arg definitions in each stage. + """ + + # We scale the total repeat count for each stage, there may be multiple + # block arg defs per stage so we need to sum. + num_repeat = sum(repeats) + if depth_trunc == 'round': + # Truncating to int by rounding allows stages with few repeats to remain + # proportionally smaller for longer. This is a good choice when stage definitions + # include single repeat stages that we'd prefer to keep that way as long as possible + num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) + else: + # The default for EfficientNet truncates repeats to int via 'ceil'. + # Any multiplier > 1.0 will result in an increased depth for every stage. + num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) + + # Proportionally distribute repeat count scaling to each block definition in the stage. + # Allocation is done in reverse as it results in the first block being less likely to be scaled. + # The first block makes less sense to repeat in most of the arch definitions. + repeats_scaled = [] + for r in repeats[::-1]: + rs = max(1, round((r / num_repeat * num_repeat_scaled))) + repeats_scaled.append(rs) + num_repeat -= r + num_repeat_scaled -= rs + repeats_scaled = repeats_scaled[::-1] + + # Apply the calculated scaling to each block arg in the stage + sa_scaled = [] + for ba, rep in zip(stack_args, repeats_scaled): + sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) + return sa_scaled + + +def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + repeats = [] + for block_str in block_strings: + assert isinstance(block_str, str) + ba, rep = _decode_block_str(block_str) + if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: + ba['num_experts'] *= experts_multiplier + stack_args.append(ba) + repeats.append(rep) + arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) + return arch_args + + +class ChildNetBuilder: + """ Build Trunk Blocks + """ + def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + output_stride=32, pad_type='', act_layer=None, se_kwargs=None, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', + verbose=False): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.output_stride = output_stride + self.pad_type = pad_type + self.act_layer = act_layer + self.se_kwargs = se_kwargs + self.norm_layer = norm_layer + self.norm_kwargs = norm_kwargs + self.drop_path_rate = drop_path_rate + self.feature_location = feature_location + assert feature_location in ('pre_pwl', 'post_exp', '') + self.verbose = verbose + + # state updated during build, consumed by model + self.in_chs = None + self.features = OrderedDict() + + def _round_channels(self, chs): + return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) + + def _make_block(self, ba, block_idx, block_count): + drop_path_rate = self.drop_path_rate * block_idx / block_count + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + if 'fake_in_chs' in ba and ba['fake_in_chs']: + # FIXME this is a hack to work around mismatch in origin impl input filters + ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) + ba['norm_layer'] = self.norm_layer + ba['norm_kwargs'] = self.norm_kwargs + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer + assert ba['act_layer'] is not None + if bt == 'ir': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba))) + if ba.get('num_experts', 0) > 0: + block = CondConvResidual(**ba) + else: + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'er': + ba['drop_path_rate'] = drop_path_rate + ba['se_kwargs'] = self.se_kwargs + if self.verbose: + logging.info(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba))) + block = EdgeResidual(**ba) + elif bt == 'cn': + if self.verbose: + logging.info(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def __call__(self, in_chs, model_block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + model_block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + logging.info('Building model trunk with %d stages...' % len(model_block_args)) + self.in_chs = in_chs + total_block_count = sum([len(x) for x in model_block_args]) + total_block_idx = 0 + current_stride = 2 + current_dilation = 1 + feature_idx = 0 + stages = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stage_idx, stage_block_args in enumerate(model_block_args): + last_stack = stage_idx == (len(model_block_args) - 1) + if self.verbose: + logging.info('Stack: {}'.format(stage_idx)) + assert isinstance(stage_block_args, list) + + blocks = [] + # each stack (stage) contains a list of block arguments + for block_idx, block_args in enumerate(stage_block_args): + last_block = block_idx == (len(stage_block_args) - 1) + extract_features = '' # No features extracted + if self.verbose: + logging.info(' Block: {}'.format(block_idx)) + + # Sort out stride, dilation, and feature extraction details + assert block_args['stride'] in (1, 2) + if block_idx >= 1: + # only the first block in any stack can have a stride > 1 + block_args['stride'] = 1 + + do_extract = False + if self.feature_location == 'pre_pwl': + if last_block: + next_stage_idx = stage_idx + 1 + if next_stage_idx >= len(model_block_args): + do_extract = True + else: + do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 + elif self.feature_location == 'post_exp': + if block_args['stride'] > 1 or (last_stack and last_block) : + do_extract = True + if do_extract: + extract_features = self.feature_location + + next_dilation = current_dilation + if block_args['stride'] > 1: + next_output_stride = current_stride * block_args['stride'] + if next_output_stride > self.output_stride: + next_dilation = current_dilation * block_args['stride'] + block_args['stride'] = 1 + if self.verbose: + logging.info(' Converting stride to dilation to maintain output_stride=={}'.format( + self.output_stride)) + else: + current_stride = next_output_stride + block_args['dilation'] = current_dilation + if next_dilation != current_dilation: + current_dilation = next_dilation + + # create the block + block = self._make_block(block_args, total_block_idx, total_block_count) + blocks.append(block) + + # stash feature module name and channel info for model feature extraction + if extract_features: + feature_module = block.feature_module(extract_features) + if feature_module: + feature_module = 'blocks.{}.{}.'.format(stage_idx, block_idx) + feature_module + feature_channels = block.feature_channels(extract_features) + self.features[feature_idx] = dict( + name=feature_module, + num_chs=feature_channels + ) + feature_idx += 1 + + total_block_idx += 1 # incr global block idx (across all stacks) + stages.append(nn.Sequential(*blocks)) + return stages + +def _init_weight_goog(m, n='', fix_group_fanout=True, last_bn=None): + """ Weight initialization as per Tensorflow official implementations. + Args: + m (nn.Module): module to init + n (str): module name + fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs + Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: + * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py + """ + if isinstance(m, CondConv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + init_weight_fn = get_condconv_initializer( + lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) + init_weight_fn(m.weight) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + if fix_group_fanout: + fan_out //= m.groups + m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + if n in last_bn: + m.weight.data.zero_() + m.bias.data.zero_() + else: + m.weight.data.fill_(1.0) + m.bias.data.zero_() + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + fan_out = m.weight.size(0) # fan-out + fan_in = 0 + if 'routing_fn' in n: + fan_in = m.weight.size(1) + init_range = 1.0 / math.sqrt(fan_in + fan_out) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def efficientnet_init_weights(model: nn.Module, init_fn=None, zero_gamma=False): + last_bn = [] + if zero_gamma: + prev_n = '' + for n, m in model.named_modules(): + if isinstance(m, nn.BatchNorm2d): + if ''.join(prev_n.split('.')[:-1]) != ''.join(n.split('.')[:-1]): + last_bn.append(prev_n) + prev_n = n + last_bn.append(prev_n) + + init_fn = init_fn or _init_weight_goog + for n, m in model.named_modules(): + init_fn(m, n, last_bn=last_bn) diff --git a/CDARTS_segmentation/train/cal_model.py b/CDARTS_segmentation/train/cal_model.py new file mode 100644 index 0000000..033d0ce --- /dev/null +++ b/CDARTS_segmentation/train/cal_model.py @@ -0,0 +1,68 @@ +from __future__ import division +import os +import sys +import time +import glob +import json +import logging +import argparse +import numpy as np + +import matplotlib as mpl +import matplotlib.pyplot as plt +import seaborn as sns +import warnings; warnings.filterwarnings(action='once') + +class NpEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.integer): + return int(obj) + elif isinstance(obj, np.floating): + return float(obj) + elif isinstance(obj, np.ndarray): + return obj.tolist() + else: + return super(NpEncoder, self).default(obj) + +def main(): + + # Import Data + model_files = glob.glob("train_samples/*.json") + + + for model_file in model_files: + + with open(model_file, 'r') as f: + # dict_a = json.loads(f, cls=NpEncoder) + model_dict = json.loads(f.read()) + + + df = pd.read_csv("mpg_ggplot2.csv") + + # Prepare data + x_var = 'manufacturer' + groupby_var = 'class' + df_agg = df.loc[:, [x_var, groupby_var]].groupby(groupby_var) + vals = [df[x_var].values.tolist() for i, df in df_agg] + + # Draw + plt.figure(figsize=(16,9), dpi= 80) + colors = [plt.cm.Spectral(i/float(len(vals)-1)) for i in range(len(vals))] + n, bins, patches = plt.hist(vals, df[x_var].unique().__len__(), stacked=True, density=False, color=colors[:len(vals)]) + + # Decoration + plt.legend({group:col for group, col in zip(np.unique(df[groupby_var]).tolist(), colors[:len(vals)])}) + plt.title(f"Stacked Histogram of ${x_var}$ colored by ${groupby_var}$", fontsize=22) + plt.xlabel(x_var) + plt.ylabel("Frequency") + plt.ylim(0, 40) + plt.xticks(rotation=90, horizontalalignment='left') + plt.show() + + + + + +if __name__ == '__main__': + main() + diff --git a/CDARTS_segmentation/train/config_test.py b/CDARTS_segmentation/train/config_test.py new file mode 100644 index 0000000..7f549a8 --- /dev/null +++ b/CDARTS_segmentation/train/config_test.py @@ -0,0 +1,77 @@ +# encoding: utf-8 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path as osp +import sys +import numpy as np +from easydict import EasyDict as edict + +C = edict() +config = C +cfg = C + +C.seed = 12345 + +"""please config ROOT_dir and user when u first using""" +C.abs_dir = osp.realpath(".") +C.root_dir = osp.realpath("..") +C.this_dir = C.abs_dir.split(osp.sep)[-1] +C.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir)) + +"""Data Dir""" +C.dataset_path = "/home/t-hongyuanyu/data/cityscapes/" +C.img_root_folder = C.dataset_path +C.gt_root_folder = C.dataset_path +C.train_source = osp.join(C.dataset_path, "cityscapes_train_fine.txt") +C.train_eval_source = osp.join(C.dataset_path, "cityscapes_train_val_fine.txt") +C.eval_source = osp.join(C.dataset_path, "cityscapes_val_fine.txt") +C.test_source = osp.join(C.dataset_path, "cityscapes_test.txt") + +"""Path Config""" +def add_path(path): + if path not in sys.path: + sys.path.insert(0, path) + +add_path(osp.join(C.root_dir, 'tools')) +add_path(C.root_dir) + +"""Image Config""" +C.num_classes = 19 +C.background = -1 +C.image_mean = np.array([0.485, 0.456, 0.406]) +C.image_std = np.array([0.229, 0.224, 0.225]) +C.target_size = 1024 +C.down_sampling = 1 # first down_sampling then crop ...... +C.gt_down_sampling = 1 +C.num_train_imgs = 2975 +C.num_eval_imgs = 500 + +""" Settings for network, this would be different for each kind of model""" +C.bn_eps = 1e-5 +C.bn_momentum = 0.1 + +"""Eval Config""" +C.eval_stride_rate = 5 / 6 +C.eval_scale_array = [1, ] +C.eval_flip = False +C.eval_base_size = 1024 +C.eval_crop_size = 1024 +C.eval_height = 1024 +C.eval_width = 2048 + +C.layers = 16 +C.width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,] +C.stem_head_width = (1, 1) +C.Fch = 20 +C.image_height = 512 +C.image_width = 1024 + +######################################## +C.save = "test" +C.is_test = False # if True, prediction files for the test set will be generated +C.is_eval = True # if True, the train.py will only do evaluation for once +C.json_file = "./jsons/3path_big2.json" +C.model_path = "./3path_big2.pth.tar" # path to pretrained directory to be evaluated diff --git a/CDARTS_segmentation/train/config_train.py b/CDARTS_segmentation/train/config_train.py new file mode 100644 index 0000000..f23b244 --- /dev/null +++ b/CDARTS_segmentation/train/config_train.py @@ -0,0 +1,115 @@ +# encoding: utf-8 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os.path as osp +import sys +import numpy as np +from easydict import EasyDict as edict + +C = edict() +config = C +cfg = C + +C.seed = 12345 + +"""please config ROOT_dir and user when u first using""" +#C.repo_name = 'FasterSeg' +#C.abs_dir = osp.realpath(".") +#C.this_dir = C.abs_dir.split(osp.sep)[-1] +#C.root_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)] +C.abs_dir = osp.realpath(".") +C.root_dir = osp.realpath("..") +C.this_dir = C.abs_dir.split(osp.sep)[-1] +C.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir)) + +"""Data Dir""" +C.dataset_path = "../DATASET/cityscapes/" +C.img_root_folder = C.dataset_path +C.gt_root_folder = C.dataset_path +C.train_source = osp.join(C.dataset_path, "cityscapes_train_fine.txt") +C.train_eval_source = osp.join(C.dataset_path, "cityscapes_train_val_fine.txt") +C.eval_source = osp.join(C.dataset_path, "cityscapes_val_fine.txt") +C.test_source = osp.join(C.dataset_path, "cityscapes_test.txt") + +"""Path Config""" +def add_path(path): + if path not in sys.path: + sys.path.insert(0, path) + +add_path(osp.join(C.root_dir, 'tools')) +add_path(C.root_dir) + +"""Image Config""" +C.num_classes = 19 +C.background = -1 +C.image_mean = np.array([0.485, 0.456, 0.406]) +C.image_std = np.array([0.229, 0.224, 0.225]) +C.target_size = 1024 +C.down_sampling = 1 # first down_sampling then crop ...... +C.gt_down_sampling = 1 +C.num_train_imgs = 2975 +C.num_eval_imgs = 500 + +""" Settings for network, this would be different for each kind of model""" +C.bn_eps = 1e-5 +C.bn_momentum = 0.1 + +"""Train Config""" +C.lr = 0.01 +C.momentum = 0.9 +C.weight_decay = 5e-4 +C.nepochs = 600 +C.niters_per_epoch = 1000 +C.num_workers = 4 +C.train_scale_array = [0.75, 1, 1.25] + +"""Eval Config""" +C.eval_stride_rate = 5 / 6 +C.eval_scale_array = [1, ] +C.eval_flip = False +C.eval_base_size = 1024 +C.eval_crop_size = 1024 +C.eval_height = 1024 +C.eval_width = 2048 + + +C.layers = 16 +""" Train Config """ +C.mode = "teacher" # "teacher" or "student" +if C.mode == "teacher": + ##### train teacher model only #################################### + C.arch_idx = [1] # 0 for teacher + C.branch = [3] + C.width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,] + # C.stem_head_width = [(1, 1)] + C.stem_head_width = [(8./12, 8./12)] + C.load_path = "search-224x448_F12.L16_batch2-20200828-201547" # path to the searched directory + C.load_epoch = "last" # "last" or "int" (e.g. "30"): which epoch to load from the searched architecture + # C.batch_size = 12 + C.batch_size = 4 + C.Fch = 12 + C.image_height = 512 + C.image_width = 1024 + C.save = "%dx%d_model_batch%d"%(C.image_height, C.image_width, C.batch_size) +elif C.mode == "student": + ##### train student with KL distillation from teacher ############## + C.arch_idx = [0, 1] # 0 for teacher, 1 for student + C.branch = [2, 2] + C.width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,] + C.stem_head_width = [(1, 1), (8./12, 8./12),] + C.load_path = "fasterseg" # path to the searched directory + C.teacher_path = "fasterseg" # where to load the pretrained teacher's weight + C.load_epoch = "last" # "last" or "int" (e.g. "30") + C.batch_size = 12 + C.Fch = 12 + C.image_height = 512 + C.image_width = 1024 + C.save = "%dx%d_student_batch%d"%(C.image_height, C.image_width, C.batch_size) + +######################################## +C.is_test = False # if True, prediction files for the test set will be generated +C.is_eval = False # if True, the train.py will only do evaluation for once +C.eval_path = "fasterseg" # path to pretrained directory to be evaluated diff --git a/CDARTS_segmentation/train/configs/ADE20K/512.yaml b/CDARTS_segmentation/train/configs/ADE20K/512.yaml new file mode 100644 index 0000000..2d0662b --- /dev/null +++ b/CDARTS_segmentation/train/configs/ADE20K/512.yaml @@ -0,0 +1,42 @@ +_BASE_: ../Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-52.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False + SEM_SEG_HEAD: + NUM_CLASSES: 133 + LOSS_TOP_K: 1.0 + USE_DEPTHWISE_SEPARABLE_CONV: True + PANOPTIC_DEEPLAB: + STUFF_AREA: 4096 + NMS_KERNEL: 41 + SIZE_DIVISIBILITY: 640 + USE_DEPTHWISE_SEPARABLE_CONV: True +DATASETS: + TRAIN: ("ade20k_sem_seg_train",) + TEST: ("ade20k_sem_seg_val",) +SOLVER: + BASE_LR: 0.0005 + MAX_ITER: 200000000 + IMS_PER_BATCH: 32 +INPUT: + FORMAT: "RGB" + GAUSSIAN_SIGMA: 8 + MIN_SIZE_TRAIN: (256, 320, 352, 416, 448, 512, 576, 608, 672, 704, 768, 832, 864, 928, 960, 1024) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 512 + MAX_SIZE_TRAIN: 1024 + MAX_SIZE_TEST: 512 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (512, 512) diff --git a/CDARTS_segmentation/train/configs/ADE20K/base.yaml b/CDARTS_segmentation/train/configs/ADE20K/base.yaml new file mode 100644 index 0000000..2539500 --- /dev/null +++ b/CDARTS_segmentation/train/configs/ADE20K/base.yaml @@ -0,0 +1,42 @@ +_BASE_: ../Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-52.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False + SEM_SEG_HEAD: + NUM_CLASSES: 133 + LOSS_TOP_K: 1.0 + USE_DEPTHWISE_SEPARABLE_CONV: True + PANOPTIC_DEEPLAB: + STUFF_AREA: 4096 + NMS_KERNEL: 41 + SIZE_DIVISIBILITY: 640 + USE_DEPTHWISE_SEPARABLE_CONV: True +DATASETS: + TRAIN: ("ade20k_sem_seg_train",) + TEST: ("ade20k_sem_seg_val",) +SOLVER: + BASE_LR: 0.0005 + MAX_ITER: 200000000 + IMS_PER_BATCH: 32 +INPUT: + FORMAT: "RGB" + GAUSSIAN_SIGMA: 8 + MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 640) for x in range(5, 16)]"] + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 640 + MAX_SIZE_TRAIN: 960 + MAX_SIZE_TEST: 640 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (640, 640) diff --git a/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml new file mode 100644 index 0000000..e7e1340 --- /dev/null +++ b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/Base-PanopticDeepLab-OS16.yaml @@ -0,0 +1,65 @@ +MODEL: + META_ARCHITECTURE: "PanopticDeepLab" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + OUT_FEATURES: ["res2", "res3", "res5"] + RES5_DILATION: 2 + SEM_SEG_HEAD: + NAME: "PanopticDeepLabSemSegHead" + IN_FEATURES: ["res2", "res3", "res5"] + PROJECT_FEATURES: ["res2", "res3"] + PROJECT_CHANNELS: [32, 64] + ASPP_CHANNELS: 256 + ASPP_DILATIONS: [6, 12, 18] + ASPP_DROPOUT: 0.1 + HEAD_CHANNELS: 256 + CONVS_DIM: 256 + COMMON_STRIDE: 4 + NUM_CLASSES: 19 + LOSS_TYPE: "hard_pixel_mining" + NORM: "SyncBN" + INS_EMBED_HEAD: + NAME: "PanopticDeepLabInsEmbedHead" + IN_FEATURES: ["res2", "res3", "res5"] + PROJECT_FEATURES: ["res2", "res3"] + PROJECT_CHANNELS: [32, 64] + ASPP_CHANNELS: 256 + ASPP_DILATIONS: [6, 12, 18] + ASPP_DROPOUT: 0.1 + HEAD_CHANNELS: 32 + CONVS_DIM: 128 + COMMON_STRIDE: 4 + NORM: "SyncBN" + CENTER_LOSS_WEIGHT: 200.0 + OFFSET_LOSS_WEIGHT: 0.01 + PANOPTIC_DEEPLAB: + STUFF_AREA: 2048 + CENTER_THRESHOLD: 0.1 + NMS_KERNEL: 7 + TOP_K_INSTANCE: 200 +DATASETS: + TRAIN: ("cityscapes_fine_panoptic_train",) + TEST: ("cityscapes_fine_panoptic_val",) +SOLVER: + OPTIMIZER: "ADAM" + BASE_LR: 0.001 + WEIGHT_DECAY: 0.0 + WEIGHT_DECAY_NORM: 0.0 + WEIGHT_DECAY_BIAS: 0.0 + MAX_ITER: 60000 + LR_SCHEDULER_NAME: "WarmupPolyLR" + IMS_PER_BATCH: 4 +INPUT: + MIN_SIZE_TRAIN: (512, 640, 704, 832, 896, 1024, 1152, 1216, 1344, 1408, 1536, 1664, 1728, 1856, 1920, 2048) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 4096 + MAX_SIZE_TEST: 2048 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (1024, 2048) +DATALOADER: + NUM_WORKERS: 4 +VERSION: 2 \ No newline at end of file diff --git a/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/base.yaml b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/base.yaml new file mode 100644 index 0000000..e7e1340 --- /dev/null +++ b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/base.yaml @@ -0,0 +1,65 @@ +MODEL: + META_ARCHITECTURE: "PanopticDeepLab" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + OUT_FEATURES: ["res2", "res3", "res5"] + RES5_DILATION: 2 + SEM_SEG_HEAD: + NAME: "PanopticDeepLabSemSegHead" + IN_FEATURES: ["res2", "res3", "res5"] + PROJECT_FEATURES: ["res2", "res3"] + PROJECT_CHANNELS: [32, 64] + ASPP_CHANNELS: 256 + ASPP_DILATIONS: [6, 12, 18] + ASPP_DROPOUT: 0.1 + HEAD_CHANNELS: 256 + CONVS_DIM: 256 + COMMON_STRIDE: 4 + NUM_CLASSES: 19 + LOSS_TYPE: "hard_pixel_mining" + NORM: "SyncBN" + INS_EMBED_HEAD: + NAME: "PanopticDeepLabInsEmbedHead" + IN_FEATURES: ["res2", "res3", "res5"] + PROJECT_FEATURES: ["res2", "res3"] + PROJECT_CHANNELS: [32, 64] + ASPP_CHANNELS: 256 + ASPP_DILATIONS: [6, 12, 18] + ASPP_DROPOUT: 0.1 + HEAD_CHANNELS: 32 + CONVS_DIM: 128 + COMMON_STRIDE: 4 + NORM: "SyncBN" + CENTER_LOSS_WEIGHT: 200.0 + OFFSET_LOSS_WEIGHT: 0.01 + PANOPTIC_DEEPLAB: + STUFF_AREA: 2048 + CENTER_THRESHOLD: 0.1 + NMS_KERNEL: 7 + TOP_K_INSTANCE: 200 +DATASETS: + TRAIN: ("cityscapes_fine_panoptic_train",) + TEST: ("cityscapes_fine_panoptic_val",) +SOLVER: + OPTIMIZER: "ADAM" + BASE_LR: 0.001 + WEIGHT_DECAY: 0.0 + WEIGHT_DECAY_NORM: 0.0 + WEIGHT_DECAY_BIAS: 0.0 + MAX_ITER: 60000 + LR_SCHEDULER_NAME: "WarmupPolyLR" + IMS_PER_BATCH: 4 +INPUT: + MIN_SIZE_TRAIN: (512, 640, 704, 832, 896, 1024, 1152, 1216, 1344, 1408, 1536, 1664, 1728, 1856, 1920, 2048) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 4096 + MAX_SIZE_TEST: 2048 + CROP: + ENABLED: True + TYPE: "absolute" + SIZE: (1024, 2048) +DATALOADER: + NUM_WORKERS: 4 +VERSION: 2 \ No newline at end of file diff --git a/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml new file mode 100644 index 0000000..fde902b --- /dev/null +++ b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml @@ -0,0 +1,20 @@ +_BASE_: Base-PanopticDeepLab-OS16.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-52.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False +SOLVER: + MAX_ITER: 90000 +INPUT: + FORMAT: "RGB" + CROP: + SIZE: (512, 1024) diff --git a/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml new file mode 100644 index 0000000..8e31420 --- /dev/null +++ b/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024_dsconv.yaml @@ -0,0 +1,24 @@ +_BASE_: Base-PanopticDeepLab-OS16.yaml +MODEL: + WEIGHTS: "detectron2://DeepLab/R-52.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + BACKBONE: + NAME: "build_resnet_deeplab_backbone" + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + RES5_MULTI_GRID: [1, 2, 4] + STEM_TYPE: "deeplab" + STEM_OUT_CHANNELS: 128 + STRIDE_IN_1X1: False + PANOPTIC_DEEPLAB: + USE_DEPTHWISE_SEPARABLE_CONV: True + SEM_SEG_HEAD: + USE_DEPTHWISE_SEPARABLE_CONV: True +SOLVER: + MAX_ITER: 90000 +INPUT: + FORMAT: "RGB" + CROP: + SIZE: (512, 1024) diff --git a/CDARTS_segmentation/train/cydas.py b/CDARTS_segmentation/train/cydas.py new file mode 100644 index 0000000..2fee9f9 --- /dev/null +++ b/CDARTS_segmentation/train/cydas.py @@ -0,0 +1,435 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F +from builder import * +from operations import * +from operations import DropPath_ +from genotypes import PRIMITIVES +from pdb import set_trace as bp +from seg_oprs import FeatureFusion, Head, Decoder +from layers import NaiveSyncBatchNorm + +# BatchNorm2d = nn.BatchNorm2d +BatchNorm2d = NaiveSyncBatchNorm + +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) +IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) +IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) + +def hard_sigmoid(x, inplace: bool = False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + +class HardSigmoid(nn.Module): + def __init__(self, inplace: bool = False): + super(HardSigmoid, self).__init__() + self.inplace = inplace + + def forward(self, x): + return hard_sigmoid(x, self.inplace) + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='avg', flatten=False): + super(SelectAdaptivePool2d, self).__init__() + self.output_size = output_size + self.pool_type = pool_type + self.flatten = flatten + if pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + if pool_type != 'avg': + assert False, 'Invalid pool type: %s' % pool_type + self.pool = nn.AdaptiveAvgPool2d(output_size) + + def forward(self, x): + x = self.pool(x) + if self.flatten: + x = x.flatten(1) + return x + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'output_size=' + str(self.output_size) \ + + ', pool_type=' + self.pool_type + ')' + +def create_conv2d(in_chs, out_chs, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) + return m + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bilinear', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + +def conv_bn(inp, oup, stride, groups=1, act_fn=nn.ReLU): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False, groups=groups), + nn.BatchNorm2d(oup), + act_fn(inplace=True) + ) + + +def conv_1x1_bn(inp, oup, groups=1, act_fn=nn.ReLU): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False, groups=groups), + nn.BatchNorm2d(oup), + act_fn(inplace=True) + ) + + + +default_cfgs = { + 'mobilenetv3_large_075': _cfg(url=''), + 'mobilenetv3_large_100': _cfg( + interpolation='bicubic', + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'), + 'mobilenetv3_small_075': _cfg(url=''), + 'mobilenetv3_small_100': _cfg(url=''), + 'mobilenetv3_rw': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', + interpolation='bicubic'), + 'tf_mobilenetv3_large_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_large_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_075': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_100': _cfg( + url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), + 'tf_mobilenetv3_small_minimal_100': _cfg( + url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', + mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), +} + +_DEBUG = False + + +class ChildNet(nn.Module): + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, + channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., + se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', pool_bn=False, zero_gamma=False): + super(ChildNet, self).__init__() + + norm_layer = BatchNorm2d + self.num_classes = num_classes + self.num_features = num_features + self.drop_rate = drop_rate + self._in_chs = in_chans + self.pool_bn = pool_bn + + # Stem + stem_size = round_channels(stem_size, channel_multiplier) + self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = norm_layer(stem_size, **norm_kwargs) + self.act1 = act_layer(inplace=True) + self._in_chs = stem_size + + # Middle stages (IR/ER/DS Blocks) + builder = ChildNetBuilder( + channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs, + norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG) + self.blocks = nn.Sequential(*builder(self._in_chs, block_args)) + # self.blocks = builder(self._in_chs, block_args) + self._in_chs = builder.in_chs + + # Head + Pooling + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias) + self.act2 = act_layer(inplace=True) + + # Classifier + self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) + + if pool_bn: + self.pool_bn = nn.BatchNorm1d(1) + + efficientnet_init_weights(self, zero_gamma=zero_gamma) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None + + def forward_features(self, x): + # architecture = [[0], [], [], [], [], [0]] + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act1(x) + outputs = [] + # 16, 24, 40, 96, 320 + # block_idxs = [0, 1, 2, 4, 6] + block_idxs = [1, 2, 4, 6] + for i, block in enumerate(self.blocks): + x = block(x) + if i in block_idxs: + outputs.append(x) + + # x = self.blocks(x) + return tuple(outputs) + + def forward(self, x): + x = self.forward_features(x) + return x + + +def modify_block_args(block_args, kernel_size, exp_ratio): + # kernel_size: 3,5,7 + # exp_ratio: 4,6 + block_type = block_args['block_type'] + # each type of block has different valid arguments, fill accordingly + if block_type == 'cn': + block_args['kernel_size'] = kernel_size + elif block_type == 'er': + block_args['exp_kernel_size'] = kernel_size + else: + block_args['dw_kernel_size'] = kernel_size + + if block_type == 'ir' or block_type == 'er': + block_args['exp_ratio'] = exp_ratio + return block_args + + +def _gen_childnet(**kwargs): + # arch_list = [[0], [3, 2], [3, 2], [3, 3], [3, 3, 3], [3, 3, 3], [0]] + # arch_list = [[0], [3, 2, 3, 3], [3, 2, 3, 1], [3, 0, 3, 2], [3, 3, 3, 3], [3, 3, 3, 3], [0]] + # arch_list = [[0], [3,4,3,1],[3,2,3,0],[3,3,3,1],[3,3,3,3],[3,3,3,3],[0]] + arch_list = [[0], [3, 4, 2, 0], [5, 2, 4, 0], [4, 3, 2, 2], [1, 3, 0, 1], [2, 4, 4, 2], [0]] + # arch_list = [[0], [], [], [], [], [0]] + choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]} + choices_list = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']] + + num_features = 1280 + + # act_layer = HardSwish + act_layer = Swish + ''' + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + ''' + arch_def = [ + # stage 0, 112x112 in + ['ds_r1_k3_s1_e1_c16_se0.25'], + # stage 1, 112x112 in + ['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25'], + # stage 2, 56x56 in + ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # stage 3, 28x28 in + ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r2_k3_s1_e4_c80_se0.25'], + # stage 4, 14x14in + ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], + # stage 5, 14x14in + ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # stage 6, 7x7 in + ['cn_r1_k1_s1_c320_se0.25'], + ] + #arch_def = [ + # # stage 0, 112x112 in + # ['ds_r1_k3_s1_e1_c16_se0.25'], + # # stage 1, 112x112 in + # ['ir_r1_k3_s2_e4_c24_se0.25'], + # # stage 2, 56x56 in + # ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'], + # # stage 3, 28x28 in + # ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'], + # # stage 4, 14x14in + # ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'], + # # stage 5, 14x14in + # ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'], + # # stage 6, 7x7 in + # ['cn_r1_k1_s1_c320_se0.25'], + #] + + + new_arch = [] + # change to child arch_def + for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)): + if len(layer_arch) == 1: + new_arch.append(layer_arch) + continue + else: + new_layer = [] + for j, (block_choice, block_arch) in enumerate(zip(layer_choice, layer_arch)): + kernel_size, exp_ratio = choices_list[block_choice] + elements = block_arch.split('_') + block_arch = block_arch.replace(elements[2], 'k{}'.format(str(kernel_size))) + block_arch = block_arch.replace(elements[4], 'e{}'.format(str(exp_ratio))) + new_layer.append(block_arch) + new_arch.append(new_layer) + + model_kwargs = dict( + block_args=decode_arch_def(new_arch), + num_features=num_features, + stem_size=16, + # channel_multiplier=channel_multiplier, + norm_kwargs=resolve_bn_args(kwargs), + act_layer=act_layer, + se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8), + num_classes=1000, + drop_rate=0.2, + drop_path_rate=0.2, + global_pool='avg' + ) + model = ChildNet(**model_kwargs) + return model + +class CyDASseg(nn.Module): + def __init__(self, Fch=12, num_classes=19, stem_head_width=(1., 1.)): + super(CyDASseg, self).__init__() + self._num_classes = num_classes + self._stem_head_width = stem_head_width + self.backbone = _gen_childnet() + # self.f_channels = [16, 24, 40, 96] + self.f_channels = [24, 40, 96, 320] + self._Fch = Fch + # del self.backbone.blocks[3][2] + + #for m in self.backbone.modules(): + # if isinstance(m, nn.BatchNorm2d): + # m.eval() + # m.weight.requires_grad = False + # m.bias.requires_grad = False + + self.last_channel = self.backbone.blocks[-1][-1].conv.out_channels # self.backbone.blocks[-1][-1] + + # building decoder + self.build_arm_ffm_head() + + def init_weights(self, pretrained=None): + if pretrained: + state_dict = torch.load(pretrained) + state_dict = state_dict['state_dict'] + # resume_checkpoint(self.backbone, pretrained) + self.backbone.load_state_dict(state_dict, strict=True) + else: + print("No pretrained model!") + return + + def build_arm_ffm_head(self): + + # 24, 40, 96, 320 + + if self.training: + self.heads32 = Head(self.f_channels[-1], self._num_classes, True, norm_layer=BatchNorm2d) + self.heads16 = Head(self.f_channels[-2], self._num_classes, True, norm_layer=BatchNorm2d) + + self.heads8 = Decoder(self.num_filters(8, self._stem_head_width[1]), self.f_channels[0], self._num_classes, Fch=self._Fch, scale=4, branch=1, is_aux=False, norm_layer=BatchNorm2d) + + self.arms32 = nn.ModuleList([ + ConvNorm(self.f_channels[-1], self.num_filters(16, self._stem_head_width[1]), 1, 1, 0, slimmable=False), + ConvNorm(self.num_filters(16, self._stem_head_width[1]), self.num_filters(8, self._stem_head_width[1]), 1, 1, 0, slimmable=False), + ]) + + self.refines32 = nn.ModuleList([ + ConvNorm(self.num_filters(16, self._stem_head_width[1])+self.f_channels[-2], self.num_filters(16, self._stem_head_width[1]), 3, 1, 1, slimmable=False), + ConvNorm(self.num_filters(8, self._stem_head_width[1])+self.f_channels[-3], self.num_filters(8, self._stem_head_width[1]), 3, 1, 1, slimmable=False), + ]) + + + self.ffm = FeatureFusion(self.num_filters(8, self._stem_head_width[1]), self.num_filters(8, self._stem_head_width[1]), reduction=1, Fch=self._Fch, scale=8, branch=1, norm_layer=BatchNorm2d) + + def agg_ffm(self, outputs8, outputs16, outputs32, outputs4): + pred32 = []; pred16 = []; pred8 = [] # order of predictions is not important + + if self.training: pred32.append(outputs32) + out = self.arms32[0](outputs32) + out = F.interpolate(out, size=(int(out.size(2))*2, int(out.size(3))*2), mode='bilinear', align_corners=False) + out = self.refines32[0](torch.cat([out, outputs16], dim=1)) + if self.training: pred16.append(outputs16) + out = self.arms32[1](out) + out = F.interpolate(out, size=(int(out.size(2))*2, int(out.size(3))*2), mode='bilinear', align_corners=False) + out = self.refines32[1](torch.cat([out, outputs8], dim=1)) + pred8.append(out) + + if len(pred32) > 0: + pred32 = self.heads32(torch.cat(pred32, dim=1)) + else: + pred32 = None + if len(pred16) > 0: + pred16 = self.heads16(torch.cat(pred16, dim=1)) + else: + pred16 = None + pred8 = self.heads8(self.ffm(torch.cat(pred8, dim=1)), outputs4) + if self.training: + return pred8, pred16, pred32 + else: + return pred8 + + def num_filters(self, scale, width=1.0): + return int(np.round(scale * self._Fch * width)) + + def forward(self, x): + b,c,h,w = x.shape + outputs = self.backbone(x) + + outputs4, outputs8, outputs16, outputs32 =outputs[0], outputs[1], outputs[2], outputs[3] + if self.training: + pred8, pred16, pred32 = self.agg_ffm(outputs8, outputs16, outputs32, outputs4) + pred8 = F.interpolate(pred8, size=(h,w), mode='bilinear', align_corners=False) + if pred16 is not None: pred16 = F.interpolate(pred16, size=(h,w), mode='bilinear', align_corners=False) + if pred32 is not None: pred32 = F.interpolate(pred32, size=(h,w), mode='bilinear', align_corners=False) + return pred8, pred16, pred32 + else: + pred8 = self.agg_ffm(outputs8, outputs16, outputs32, outputs4) + out = F.interpolate(pred8, size=(int(pred8.size(2))*4, int(pred8.size(3))*4), mode='bilinear', align_corners=False) + return out + + + diff --git a/CDARTS_segmentation/train/dataloader.py b/CDARTS_segmentation/train/dataloader.py new file mode 100644 index 0000000..a6e0b21 --- /dev/null +++ b/CDARTS_segmentation/train/dataloader.py @@ -0,0 +1,93 @@ +import torch +import cv2 +cv2.setNumThreads(0) +from torch.utils import data + +from utils.img_utils import random_scale, random_mirror, normalize, generate_random_crop_pos, random_crop_pad_to_shape + + +class TrainPre(object): + def __init__(self, config, img_mean, img_std): + self.img_mean = img_mean + self.img_std = img_std + self.config = config + + def __call__(self, img, gt): + img, gt = random_mirror(img, gt) + if self.config.train_scale_array is not None: + img, gt, scale = random_scale(img, gt, self.config.train_scale_array) + + img = normalize(img, self.img_mean, self.img_std) + + crop_size = (self.config.image_height, self.config.image_width) + crop_pos = generate_random_crop_pos(img.shape[:2], crop_size) + p_img, _ = random_crop_pad_to_shape(img, crop_pos, crop_size, 0) + p_gt, _ = random_crop_pad_to_shape(gt, crop_pos, crop_size, 255) + p_gt = cv2.resize(p_gt, (self.config.image_width // self.config.gt_down_sampling, self.config.image_height // self.config.gt_down_sampling), interpolation=cv2.INTER_NEAREST) + + p_img = p_img.transpose(2, 0, 1) + + extra_dict = None + + return p_img, p_gt, extra_dict + + +class CyclicIterator: + def __init__(self, loader, sampler, distributed): + self.loader = loader + self.sampler = sampler + self.epoch = 0 + self.distributed = distributed + self._next_epoch() + + def _next_epoch(self): + if self.distributed: + self.sampler.set_epoch(self.epoch) + self.iterator = iter(self.loader) + self.epoch += 1 + + def __len__(self): + return len(self.loader) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self.iterator) + except StopIteration: + self._next_epoch() + return next(self.iterator) + +def get_train_loader(config, dataset, portion=None, worker=None, test=False): + data_setting = {'img_root': config.img_root_folder, + 'gt_root': config.gt_root_folder, + 'train_source': config.train_source, + 'eval_source': config.eval_source, + 'down_sampling': config.down_sampling, + 'portion': portion} + if test: + data_setting = {'img_root': config.img_root_folder, + 'gt_root': config.gt_root_folder, + 'train_source': config.train_eval_source, + 'eval_source': config.eval_source, + 'down_sampling': config.down_sampling, + 'portion': portion} + train_preprocess = TrainPre(config, config.image_mean, config.image_std) + + train_dataset = dataset(data_setting, "train", train_preprocess, config.batch_size * config.niters_per_epoch) + + is_shuffle = True + batch_size = config.batch_size + + train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) + + train_loader = data.DataLoader(train_dataset, + batch_size=batch_size, + sampler = train_sampler, + num_workers=config.num_workers if worker is None else worker, + # drop_last=True, + # shuffle=is_shuffle, + pin_memory=True) + + return train_loader, train_sampler diff --git a/CDARTS_segmentation/train/eval.py b/CDARTS_segmentation/train/eval.py new file mode 100644 index 0000000..678daa3 --- /dev/null +++ b/CDARTS_segmentation/train/eval.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +import os +import cv2 +cv2.setNumThreads(0) +import numpy as np + +from utils.visualize import print_iou, show_img, show_prediction +from engine.evaluator import Evaluator +from engine.logger import get_logger +from seg_opr.metric import hist_info, compute_score + +logger = get_logger() + + +class SegEvaluator(Evaluator): + def func_per_iteration(self, data, device, iter=None): + if self.config is not None: config = self.config + img = data['data'] + label = data['label'] + name = data['fn'] + + if len(config.eval_scale_array) == 1: + pred = self.whole_eval(img, None, device) + else: + pred = self.sliding_eval(img, config.eval_crop_size, config.eval_stride_rate, device) + hist_tmp, labeled_tmp, correct_tmp = hist_info(config.num_classes, pred, label) + results_dict = {'hist': hist_tmp, 'labeled': labeled_tmp, 'correct': correct_tmp} + + if self.save_path is not None: + fn = name + '.png' + cv2.imwrite(os.path.join(self.save_path, fn), pred) + logger.info('Save the image ' + fn) + + # tensorboard logger does not fit multiprocess + if self.logger is not None and iter is not None: + colors = self.dataset.get_class_colors() + image = img + clean = np.zeros(label.shape) + comp_img = show_img(colors, config.background, image, clean, label, pred) + self.logger.add_image('vis', np.swapaxes(np.swapaxes(comp_img, 0, 2), 1, 2), iter) + + if self.show_image or self.show_prediction: + colors = self.dataset.get_class_colors() + image = img + clean = np.zeros(label.shape) + if self.show_image: + comp_img = show_img(colors, config.background, image, clean, label, pred) + else: + comp_img = show_prediction(colors, config.background, image, pred) + cv2.imwrite(name + ".png", comp_img[:,:,::-1]) + + return results_dict + + def compute_metric(self, results): + hist = np.zeros((self.config.num_classes, self.config.num_classes)) + correct = 0 + labeled = 0 + count = 0 + for d in results: + hist += d['hist'] + correct += d['correct'] + labeled += d['labeled'] + count += 1 + + iu, mean_IU, mean_IU_no_back, mean_pixel_acc = compute_score(hist, correct, labeled) + result_line = print_iou(iu, mean_pixel_acc, self.dataset.get_class_names(), True) + return result_line, mean_IU diff --git a/CDARTS_segmentation/train/genotypes.py b/CDARTS_segmentation/train/genotypes.py new file mode 100644 index 0000000..e5d5295 --- /dev/null +++ b/CDARTS_segmentation/train/genotypes.py @@ -0,0 +1,13 @@ +from collections import namedtuple + +Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') + +PRIMITIVES = [ + 'skip', + 'conv', + 'conv_downup', + 'conv_2x_downup', + 'sa', +] + +# 'conv_2x', \ No newline at end of file diff --git a/CDARTS_segmentation/train/latency_lookup_table.npy b/CDARTS_segmentation/train/latency_lookup_table.npy new file mode 100644 index 0000000000000000000000000000000000000000..84ee13fae909502070d6086288a5031cd2edaf35 GIT binary patch literal 50122 zcmbuIbwCwa`^OO#yIa>@Yt4Ht%xhn}0}~NZQ33CW0mizn-MYrwC<=ltDh8-vU|^sq z26p$lu3hh$hchz|bIzH2{r%oQUU+@EAD;L=F=ysp{%YT?UB{kjoJKp1D>u+;`bl0@d`1oT_x6w^kATeXl9#8S??``Xl&61Yn^Ve9W%eDI^8eb>v~ikh zbM|!i^c(2qIWn`Cul!ABTUxg?PEJk(Z0X!w$k*W>{@%VOTl#4>m(I2f(`^|$+cK4J zlcoZN4fGEfL0*&D&6)gc2UlB`HcoBQOt)q2Y|B=@O$uqeEqnX+?OXpK|AGH%WAV4; z7%;Q1E$0MVuJUeG$qSqM`HpVyC;7OytX0Fkhso^f-jw{(I%fB#exv;B)N&u@CHZ;{ zH@Op6ya#%jY`LwLox2OBbDwX2{ma)D$z! zw!BtL@Ji3st`{E*Ue2+DIHJWglG07xK&HkkA+) zFY;S0CtMcX^Km`x)<_=>_kn(6d`FFN*GI**ni5-ow+s764I^LJ`2S+H#LcSwrhTh__#Dk1nJD`w>x1PKwOWpN zdS}0-LUcgwuobsjwxoSAdU^lhtQT@r z(J!aL(9~l_*)CzV96J`*>&DyfZjBvPH?4U-u%y*;eb@lcywB_M4b#GFobFOq%jxs! zZ@>8N%I6>lnexC6mbO|>UTf5@c*!fq4kDhCEzLm_irBAK%LPj=&(M+k#JZO24Fk1a z%j&3%)e@DjP~g};Ys5T3e<87Cd1bAZ*f$Nz^yuX-=IQ$Zl?azt&T5Hn{cZf%!P5mo zMUK);R5w?nhM8^UNf+OlHZHk)SvHTU+pwYsZ3f`5SFl=+-pep1+kuQ?xA?+G)}S>0 zX0=@GGU>BZ;8oTxeYmMxYWZP(RkT_z<#WCk5__HHr1np3Cu(%Cf0eA3gWacP+FN3g zSf~2585Uw?t7TvKj{*fcoe&2swSjV=px6r4KlLIT@p@zN=X+cUz}lbN=^1#(r_kAy}4YBJ+LeqB5pB zYx#C*4NHlS<`qt$nRL=k4QoGM9q3j^-$`}ClpBj2Xie!@QPr)MYva5-)E#?>HAqev zsy7-ZZ9rj84XfpFvx?DsGW-w}E{+i0SkwraZ8fcykU^0j^CyoH3sUbF9hm&aS<7lU z7v4H=g*JJ99;oUFN4&Pxa@>DM_!6IbY@qatqW*FBR9CCzN{{5_N#n<|oYWPfB#!T3 z9kQX!*nH<>!<@#-WD}?<3Ma6x)e>rRZtq&Ps~~|0q|87yP-a^_t7UBupN%`h68LW6 z^y?Q4Y({-D01_Ac)4X{nmLrE*D-f9|UYHtKEy*vgzG`^!l{o)8eH+KSA=%{iX1R2D zK@I!yQD(X-aFfkqwS+Z1(IDRsi{RxrK;(6@XPSO&jmT9}j^#7@e+d)2qgNrNX!5~v zZfv#OoS!}S?I>?!`DpQ08j{vWaCtN#i^a)o>s{uFHcSMfS3p`LG1;10EgL<;-2?ka zv1!+(i4h+*qZzplNNe8a+dd1QgRD^I4|m^gP6oD9SpL$j3$PqrA5y}s6$2XlEr^`= z=6HFk?E*)`tjL-lW~pZD`M;oCucSIOh-#^&H~Q}?)G>Q2_T za?kdiy+4&7CZtoPT-5U`9Ht)R!lIzx{o|IoEJv0TsusigbtC(2>mNt`tA-20!!by$ zUz4pTxem0fK34DVkv|*7eOmI}!eKU2V{fAB)&qL>89d9_F65oE zOK_{MkJWOZYh>RUzvVaPX<1YvZZsLcjMFM@!f948=Zx^w7)Ke>s$Xjd&^3^#moG+H}X&<4Kz6ijIMOug(w~ zr~@(vV%ZKPa+6jq&AIHGF;B~?GQ+fE49xQ)a_hA=(gisJ0J98r>!n-ioE#i!w{<_KH2krfb$2z z@dNTu4Vk(~nr+@hX~*9MR(UkX*fWe#`k`8w*t4OeFlXog=n)fVzX8z@X;%*@!98s znm|&OY=^dyl5Q|-o%hQ+tU#{o=@p<~j4)@U)$-7? zz88mlmwLtdm_vQWuuk;0%rt0kepoMuI*G!aZ|E#yiH zwc0V+#*q`Hc};v19;JgG%)PH%y=KZKq}+p3ZY)<3FSqsW>^Ch~A?OuOD_V0D2ksB63O`+I?4QO{>V@C(rjZuq^3Ue*W*Y?Y#wD!uCh}E@_jIzxzkEA5&BbQM z_8<)T7Df%X!_6RvaxrbY?_NKFA16-UxB{PPwcJ?j_S_@a3PD3b2C<2=%_45~9UtR1 z{gkeu;NPz-=;ZbKhBBKplm$gcE-Vmh92PB2GFT3ab_9UaK8MuTlkKi9UIs@d{7J{> z%_Ub46CdQB{6|}Um^HhUYd5}p=8+w7-sgEseO5XgX3Ze8Z9X}}%QUTfoj%u$!-S(w zOoKS_Hl79KWVv(go=({+*mFzS#APx&9~Y7%j(P!izI*O5N`H+MA#>8d$ZEM$rE643 z+YNT@plRoa_b>7YO-$eD!oN%ugjdUxQs$`r*cMwY>uw+Xug<0*)(riar&URsJR6QB z}!CAyYOkF67hy1;0BRRWwFLr0~EMIr_q21c(YP zlw8of-dL}xx8RUm%bd~x_!D3gIZa-$>+9y2*Q`DI%uy+FJ6#xg8tDG*V+Ybd7I!*5 zPf>!lULyy?Nx-Z3@jdG6K%hvZWo>c|+e}KNd;Mvf2UO)dhyuml+d%egA)R+;&Sn+; zoAWtX0Ms6>b0E%Ea`^J9M!=Twzr)DYE+tt*ZVt#Jbj9H&AP2 z1KA0+olMHHiG}Ot=*qe!&?8sZJFJ$(F|SOYn>E#SBJ$J9CAzvo5KB^bB9m<=(NBS8 z`~R-{!q_h@N=m)*{n|wi@n+P>!Q|~56KjGyD4XXO5m?zf<3K#iSR<%~Qn74=MU~E)!`sm7R-uN7T z!`(}6;=gLJZhDq5U7V01W_gUz`!uQJWU@t))h#x%e3gir;=0%AQ8ol{YU9SUaUW@H zRbIS_8q{4JC>h8R0++X6-bid!mX$my=II*~T_MP?U<>8|nNUYNH!7H6xnO0+il7}_ zQ-tlH)v|tAwv72|eHNReuRCgv+TO4ZqR8EWCPlyWbl=MkOg)F{Q!5w@iY6yY+gD`m z`)wLuP}nUx2)O<25E*dU>phxQ>2G5Pk-lWKEC@ABSPzFu;q2*JIlk#p`wr4T@f|!u z4w>^lxZfu9tm8^GJW7fo7jeOxZGp^Ra+W27T|DEc~kzmt5oP~yn9N?%|LPDW8Hrbqjw{GIsP6={WJr`)2Tzjc0!9XWy%pO` zEGWIBROwnT#*UsMz&q7q4<{VwM+`ft)Up~eT+63Pk19Am`h2s)EJ#)+N}&>ZbZUZ@ zl`~e$gMNFKWnDJYaTjTK*VbrkSv+yE;Qd3h^F4rZ!#+!mO{2j=1JHJs9O%rLMDF)G z8EXMys1*1iKPPvqn0Xx!Flz$&<;qFCat=pG`tf(3JhHsMXkwR6?SIxD;?NZOwxnQy-kgZ^6{fLgv&5iSgm88UnIGmz!oUe=I8M?LZw`O~6NU~a5oPo{^IJ$%pyNQgerVFi5xh#8 zMBeQCJDu4mkQyhaTIg)o$n!5n0}r2>(a1PpL|)4Nuk;%7Piz9N6QwQRd^uk~f43r} zyoY#s3`~6&MbT*;+YNGRIr!A5!lPdcEXVSuK~;aJ*_LdzZ0hS1QamxIpa*eYu8GEM zyGe?yOXStsJysfL0(-735^B4~eg77@NAl{^nk<=*8i!I#h@xe!Yh&JRvdYcRV19F( z^Ps1`$msk>{-JJkhiLBKibIaXEfEA-vq-tN)7rktc9*;nb8MOSf@28~fH(h<0Z^Fvi(bL;dZF|TZXiP z6BwK?+Kk8-cJRF=kJA-+`>c7yFJiZJfmiBIeY+yAyZ^{ZnqQ6ROZS%9tzX)f`1yNB zrpTrSJ3HX7R+aI(s4OF zsYl-2{K~`r8E$r8Nay|c@UdAL_pl(HhcZ*%*7+`eB`s;?+^!=m#dL>**c21JO-;n4 zPuVwe`!h?$mWh!=9j-7m`EB3H?d7nkmkuxGOh&mPH08o11;NUrh)QjeCdhUQKnQyH>`{T{2KP!+*BfUZYMYFd1&{|Twdmur z^k0EdICE4++H{n%0wK~`IKA_``uqrK!B&c2ix3WWHb7mgRkz&Oko7D|SO3Ze%2iQz zKt(N#U0TKISDlK;3zW-T^5&A1W<g@Bo~<7J(J6JJ72883FqB-FGf6$ZriK@&O*Ds)|} zTG=eDT3QHv)fNHfg@0K6ZzA2kk3O#oKKFMv9?^XaxSez{qczL@A{NBm^#vnZg> zIC($3(DN7&ExjR;wk2JOEc$V5ra>HNG&;KRBCy^ zk}Cno-2r)~Pi(~bz^BwDrKnfi`Z1~`pbi)MKIl`cmuw#N@-SK_N&)KXnL@SOoY-Wn zI5ZcGJuMB)ll{judEzo(%oNB)J2Ao1`xTf`X}q1@Bs%bp4t;Jhstllxv|Zfn_MSU} zI>o-y?uLq9Dhr5>jdln0%;$9+DXd5$}9@HHkPWAZWX^T(7cTG#gA( zbs(%e*d@>Qc882bjH0XLngp30WHkVJ@@9^P?ff3|C8*h?^iBT7R82tLKA5AsrBkv4 zK~|Ek%Em0!0z_zr9g~hnHWh>j2bsD?s+4*r)doUj{mCzLTkXG*heS!-gI)Hju+hya_q^p9^V85t%wDD<{>H;Dn-J|ZiJHBJB(e*;=m^O^C zHT3`$-SKpje?C?+ZvWUQ+6$3UsQuRm=8cQ}i(S1ih%I=1UsdeWa*tRIfR(s4UB|Ht zM+u^Z^sQVeBEzJHfC$Z#Saj#Bmc}k(5?lAkEDJC%Ryq6XZZA7;ltxUY`czMGCK>_q z=q(rC-5|9hwSu3yvUGde&G$dpOOZT=#Qm)`pw3>spWE+-=%Z_} z>9qXOaBKsFoh6>vJ2~taYm8h8T8A$w*}Ot@GtMK`yChP1r?|w7*bry5NQX5 zkm9@dPssHX%OdrPd|^f09*}1u;=St!m0=|)`^T|Kwk?H)_0j=Y#}{`Vv;6!Nu>^G` zPpv~mielCgkkOkfW)1cU6~qiDkd_FFStlUu&K!2EOWB*o9T$U`_J|$r49rVX{l0nc zMv5KPw@D%tS*qMTbOGeaDc5EP{^|Iq=c-goX7)n2oJAiqu4z}EQcy_h zEkl#k4G1eevfNK9vCx6&QwPZxMX}w15R&=Kq#08y8b_ZaGjwf;G|>Z?i6`lU$%q2v#oC(n+KI<=s~+jU~|bc$6%rIk4jJNZ0*!i-tgo-|$TFK|KJ_guu*ooHQXe2}4bPK)S4sKpvg-(KOv8dS*}aP?@vHL&*yWK}AoG z42#$;HcH=ohlI z_duwmg|U-+2%s)Z>N$DB_MChajy|1?Tt~eDb??E`+<9Ny`Djgyq>geEIuwv6hVBV3 zCYAr`&dOf~h5_?<#}31Nc4v31LZ0wq?*x#1DtFmvYRMO68Vv_PP=4PzwWls;L#9t8 z$%Gt!u75s&irVtly?(D*?A|1!kSWTrR=UO^X9CorRVHHk zrP$~8`;@qBOU2qvEjYzYR96T&0+1)`jw$ii@%avSMwCj)285t2b02>{T!@Xoz(H>3 zmjD%CX__g?HCXJQE)P@^{Jml%Ab0H@7g9D~A7do00y=lOR{8@nVRZVoQ}%}Q9p<1^ z#p1PM6rkd^4c&9+Mk7Jr#)_lr&{AoZMgtSy%mPmL>vW!;Ym@XXk9}aGzBHCh2zo1Qi*5YDBaLn+RP| z%Eriz45bovok_roTbA#9wh?AlVRBNjH?(JSOXv?k9o*nHAgt5EHUpTac2t^qqw60I^il?9l4b&8^MgXu-X(Bu|EErpy02>2)=1&A z02LSc{ZzF?AF+pe4UvSw_~)Jm&IV>oVQX+=M?ovZ31=L{(V7FSgwTiPrtu5;dc+B* z4U7~b+vDc~>hOYY)zf>X+`-d6-2mAmcLHU#(LWzG4-iR>@0@p^q13K+j$o1&0%67e=-1O%R}*w^ z6e-e_lt58yq(wjoUi7Hpi5_!-z>Tq-GWn$hmJ(w1kK9Rr0U^{gbIwiIeC#KjHZyKO z76U5w$7rYFOUm(^1@cC*N&d?A#U;QxyrW3@Cy~trp}|6A_7-U=5Vmw!<9|K)5NnO7 zhp5(I(JTYj)nZ+a<=+%09wz##rSpl{#;WIUKpq+De>!iSHR5w08Hs{MzLoQ+jwfv7 zav<(095sD>|5bd4am1)`+Eof}11o@)+%|2yKPm-5)6{g8YOSIaVproQ|%Bj@~Prb0_W_C3oqvAiA){Y)6M)K3ZkKGzT zZtdjV#rbf5`xQk~N(^R2vKCMW77grI|GzfI*5TArB>BP?RuCW~l7o-D{B<*{5}l*+ zW~%EsC?D&9b@Ir%{yF14#KTO#V<|c2j_dWnyc1W$f6}qqZ1nVHOYKu##RU-zsAI)O zd{`WA6Gu;Pp1#H5y4V27sQr05evj=bM#@p-C{T46Te%UCku!5ot?rcaAwcXw2Tm>> zxthmP2oQFAc;3w6-$0xOL29)zi5(41iQVIsXXdy3|wth0qp2C7CR@JI(asqqOKN3lCZ?P?2s0)aB4>S1vUS6J!$W zOD}(uv<(Pb25ugEbM{u&7&$~3pcg34;C4VATD+=S;;D6P_tAw&*_rq;+5xOv3#_wq z+IHC86HvOyu*38aX6F5!=>a6Sb@wn~e(ViZ5xy8xAtXGcJjr>l)sk7K3}I*wTc zAa`ac_a^gXyMH@MsYJeYy8*TL=FZ~7{x$eyEqz8w(1csZ9zdRptG701`6RbrxbM|h zj55`c_QH=pY+C zm#5P8)MkYn%YI;8`mlCfR24fP%Zrsjb;@lp2LPGWdC`ACH}A29s2^k$Me32=EDi$d zK{x+(3+CJTOfy#c{7xDL$jF%|U*v4<;&^v93}Q4e4?Vtdu5IOej_26O^Fx5Vkgwg9 z_31u|&D6Id!$s&YFi%*IzOFTICqFT0#wkk`H^)Z+b$;8HHlR7r{+s|IQEbt4PMI9qw z=$0`yCGJ)zRnpC8f_;5d8ESYfB&q%#2Bg7xja0?TmLCQ?rrH4``66Y zVpZu2NU3uYLbe^92IiS=jeVj=-u{V0I<@nfr89sC35%H#*k`Q(fx}Pdfc#HN{Nqy) z;jd!TX6ee-o?Z;fL7Gay?f)#W;@>p6e=}oKmZgu5(q6O$8YS@@u4F5G&JBr3*mV`@`w^jXX__Wxxx&Dg$g)0x&OKUs1aD zgP{)OkXnWApGX%0vHMQRs0pXj2pmH}l1-DPu2!K*x&(xf;k}nQM`YHWePB<}OH-eH zn50BNB;Ng__wctD1%7ah(0ZInx}0J~?l0Tdw)lx@N_E8Fn6Cgcrps~5N9P2wj&wRF zhC(yoN;4@1Ss^TahAhs;D!>lY(&1lwUIpfzfVf;AzfN=@daTdVH9&-Vze`@;Bh>y~ zEvh3Vg6jZSzqsl0Z|68q56I=K^-DzwTGVkTxB;lda&v;MnH+ypw#=k`Qc4Cw(BNm~ z3Uq8N&Rc4ujIRoB0`l0`p2ZvfT}GTYofH&DHKF3_y#>rYmHhs6&biz1mZv$2THbA7 zT}gZ3V1=r8*%2?>B^#rBCLc{8`9ga+Lfrw>X|q$oanc1oN=v2j=(`K3{b5C)mEAqj z@u)G)9Y^gRF!#OMktKOsb=EvNsn}EfGJ{d~0d;0f>98RUrWxzR2#Lk}0Fal~%&z@% zKs`36`aG~h6(!Y&xzZE~Z z|E6PN1N2TAh5QL1cejo%dcE9FoH5cf60TmK0`hdAN3B(+Z>%8n)=~F}k?dOa3{V%$ zr$ZB+IG+ftM=70Ntw=1c=fFBKw6(=DgYzK_0+#9_7S)Rsv+mTp(ZAdiaT)2QPlF~) zm#y-bz`S@c&xK6Owy{yuv#3zHqIw0Wh`Y#~&c`OdT*mI6xYIC{{8YR+OKx}JQc--ID9r?slDt*ly=|3PuhFAAET$b}D z8@&n?!E?j>4p{fH7tVOAn8C-H%cH2`vUP|3g3o%*Hd34Z*&&(60W9@+U+EX_zo6WS_= zjr~|C|N9;RceCOH=tujzXR%W_@wdu+uM2a8tadGO8$wR2R{J0 zZ=UOz-cMZ|s8OX2X%}mfle7Gehlm{C`&F5~MjU=!lcbfZi3InvG=NM_>)YM=VR=9a$C4FAkQ>x+^%Z-+59x1m?_q&PwZgp(gG^>RrMydr*fWe(>W@)Gx60@I$%9; z`{PNEgAMp*A$2GZ#tuF#&}8!vQh9^0>M0hS3m{Kdnf=eaif;s$ z&bUfx*wi{OOBsN$*<)3Kd3oLVlM@4oVGt`#UZ|2X0_)0eTg>0LdFZAbhREO3p`@R3 znE;V|Y3vg7zU*wcQ^!Wh9KYLUPO;{%x)Te`eJT!j>iSlYvWaX4vjB2?%=X*M4>h)D zoototrB*>Wb6Ej(zxDR|(yul7C?t1UINGTRMr8xk?o1c0k2# zJAJ$IzqeU6=pQdj zN9J^YPl>QncV@Hn3m`5}Z?rwEXC}vkq_!Cxq@ut&mpIeB`uCH9Em!*xKB*Y|_@`gI zD-|8`hTq_jl9lwRzZv(+;($sXalh%Nw^4RcN`YxLfu>yvKqY((d9ZRt5f-JZdtwa? z18!d{38>f>Wp*v?v6w~4QpTC49&*nQN&)K9_0|5hpW1n|5=I(!^3uS(?A&qVK2mb9_ zyvJ5M?=wRIQ?#L4iIFBi?j8HS&7vWkkDb?b2xZsMHe>9u8CZ#JtG_w$xQkd5vdoAM zbbqu)>pIxH>VQ1bb@jmx{~WR3b?7y;>M6Fa2B3})vSlj!rJu2NoZx91L4%}bigjuG z_xeme$gwK-eY40@Ws<1ttVyZ`grLR&y(4=3;CBv=G5qk?2G+e7e($Eab`U$LODXMY z+A4~}@0#M^{D?0l_jF^;)1efhY2wB4uLG>31y3fmEU-u1*z`$F>bwV;DuJlG-+~r%eDAJ@@sTRf9_N zQK&+dYEs8AMl}UgTx`br+eR%Cr~#WrxU%#$v!rG~*qOg=k7dKEvtifQ4qXcP?V>rb z5*j|~UUqOv2drU6x6}d%A;F<5|F~)A9b?!nDhBRqXa(e@V;9>z`M`N2yFPJ7sFo?H zUi~_Be);kL;2-(CEFi46)LuWXXq>ns2%;{#%x(p(fw_Bn`+1eNaQ+I8U0RvEXTBIXVVCQgHLESKE{2a;^m`M|A1~3zU0*QRtagCfkuZH>l6nFnEaUrbLwu|KyjiMBuvxu;8PlT7^lshlteeCtaRpHhd-V^~suAcSNd z-L_og!Tjzd*OBtqZD=*9nZ;u90MwnPTeF?ce1>hiqKvA&1hWPJD>>bLC!gwX1yzdV zLCwPD;0c7aH%_>ne36m0Ml{gX$%!>P5LgML8c88J1b^L-HAcJdg&p$()PdEm(H+en zjjI$Pl|`K||3Sb!Y1{YbrU-*iw$u-JDu428*j-^TAR~RV+^aO=i~Z=)OA@W)VwZ;i z>%y?!w>`&X6zg6$7nD(>T}Y#=9B)9zm?o6%SI_RJoGOD*O9;;G&=l)-6k1)bqQTVC zsaFvkw`&ed3Et>_W2WY->cCwF0)YiY)t*@z4v1i<5nDfhvom8jGSTV)=fMY*oI1jjI(34DcA;H1(1!Vk>p%s0rS{%L0qY4g_{D5?L=HR-yYh>g1A#4voOeRFE_Z2pbO8@)`EBBpYR2y{N}V zn#NI-M+58pk3*GWg8an}rtT0*Pt=+Y*3%e3?hVUaeMwJ_p7g!YXk!@*s0(Rp55C#7 z0$-Y1{*>nmv=YQt1pw;onM&1qwp=2A1A%xzaxRwsrRW(G);evDn51z) zSQq)<2;YBm7zZC$AdQ%66z(+R0ePh3W+$&FcFXu)op2_!XgAUjm@Ls*D`9Beh0+P;>o3x>-A$Dw5Ki{t4V;0S$TDL z&h-J}g+O22G-k4$?DYNvm}e`vpFX$2;C*kY>q#wg?!=u8$k=H2-?QuyeY6gn092dO zpFjw@+q}`#Q(cWSiK9=q6z)bnCB@I1t4hq^y3+v};S{~&LW5ww?uc4-Z&=fc9qVoeppH*3 zc)QQB*JA4gF;r_6TQ?Js7o!U9C_mvR{+f!)GE1|7u<3l7U3VkT2}%<=s7w{)p)?y1 zVe89nxi&nJolCOjIRu%+iee71&b|-HKcstG`z*Qzqeg)QHWyeYYndWnmRRF}Ysy-p z879pG#KwpBTU@{P$Zqy&+tNx9XMaAR_8+x1S-Y`_1Jjqz>F9Zy2<%BgAWeS=~js%cb!@S$eo^zDi?Pbyg>_lOogJAAyVy9 zK*hd%vapu_aL09`ZLel&86d)@Ebem0KP^B0Z2QvyxWW7lsQ5o~Clw!M@Tj`JXVH{$ zoAh!(#<+AT^NXn>IH;ZPB8jPOnFL(_K*9<@+?luU*_nN<*)E~Ch7L5X<6yC_Okwp3 zUR~xxPVs`sSA&{C9J5tG+}qgs%%mX(yR~d3@>f}|xyaQiF-yOqfAiAu_8q4B;yb(s zSjj=|-(6O6o=wmPkEW8(S_`aev2QCUz5j{3&xYM22$+$V8uUrZ;4QdN!o@@jllV-> ziL?#~d%uqDktZODy++ohk_L+#_w_)ESlIrr&sQ_Em86dqZ9AGNsIG$nb#XwS3QaO5 ziaVKJ74+Cyt4^%l4ZysTzvin#ErM7_bu8*A*Sd{BIuLUD?t#b>g06#uPFYA5QV1Y+ z_I?tWH`L7mgwmt6u_T28A}q9Yws$xFvL9}md2Z%50qW7A#HvAu{^VDT;8OtEmX^6;0ol7ARb<$t5bTR{K- literal 0 HcmV?d00001 diff --git a/CDARTS_segmentation/train/launch.py b/CDARTS_segmentation/train/launch.py new file mode 100644 index 0000000..0e24156 --- /dev/null +++ b/CDARTS_segmentation/train/launch.py @@ -0,0 +1,274 @@ +from __future__ import absolute_import, division, print_function, unicode_literals + +""" +Modified by Xiyang for effortlessly launching on Azure ML + +""" + +r""" +`torch.distributed.launch` is a module that spawns up multiple distributed +training processes on each of the training nodes. + +The utility can be used for single-node distributed training, in which one or +more processes per node will be spawned. The utility can be used for either +CPU training or GPU training. If the utility is used for GPU training, +each distributed process will be operating on a single GPU. This can achieve +well-improved single-node training performance. It can also be used in +multi-node distributed training, by spawning up multiple processes on each node +for well-improved multi-node distributed training performance as well. +This will especially be benefitial for systems with multiple Infiniband +interfaces that have direct-GPU support, since all of them can be utilized for +aggregated communication bandwidth. + +In both cases of single-node distributed training or multi-node distributed +training, this utility will launch the given number of processes per node +(``--nproc_per_node``). If used for GPU training, this number needs to be less +or equal to the number of GPUs on the current system (``nproc_per_node``), +and each process will be operating on a single GPU from *GPU 0 to +GPU (nproc_per_node - 1)*. + +**How to use this module:** + +1. Single-Node multi-process distributed training + +:: + + >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE + YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other + arguments of your training script) + +2. Multi-Node multi-process distributed training: (e.g. two nodes) + + +Node 1: *(IP: 192.168.1.1, and has a free port: 1234)* + +:: + + >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE + --nnodes=2 --node_rank=0 --master_addr="192.168.1.1" + --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 + and all other arguments of your training script) + +Node 2: + +:: + + >>> python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE + --nnodes=2 --node_rank=1 --master_addr="192.168.1.1" + --master_port=1234 YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 + and all other arguments of your training script) + +3. To look up what optional arguments this module offers: + +:: + + >>> python -m torch.distributed.launch --help + + +**Important Notices:** + +1. This utility and multi-process distributed (single-node or +multi-node) GPU training currently only achieves the best performance using +the NCCL distributed backend. Thus NCCL backend is the recommended backend to +use for GPU training. + +2. In your training program, you must parse the command-line argument: +``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by this module. +If your training program uses GPUs, you should ensure that your code only +runs on the GPU device of LOCAL_PROCESS_RANK. This can be done by: + +Parsing the local_rank argument + +:: + + >>> import argparse + >>> parser = argparse.ArgumentParser() + >>> parser.add_argument("--local_rank", type=int) + >>> args = parser.parse_args() + +Set your device to local rank using either + +:: + + >>> torch.cuda.set_device(arg.local_rank) # before your code runs + +or + +:: + + >>> with torch.cuda.device(arg.local_rank): + >>> # your code to run + +3. In your training program, you are supposed to call the following function +at the beginning to start the distributed backend. You need to make sure that +the init_method uses ``env://``, which is the only supported ``init_method`` +by this module. + +:: + + torch.distributed.init_process_group(backend='YOUR BACKEND', + init_method='env://') + +4. In your training program, you can either use regular distributed functions +or use :func:`torch.nn.parallel.DistributedDataParallel` module. If your +training program uses GPUs for training and you would like to use +:func:`torch.nn.parallel.DistributedDataParallel` module, +here is how to configure it. + +:: + + model = torch.nn.parallel.DistributedDataParallel(model, + device_ids=[arg.local_rank], + output_device=arg.local_rank) + +Please ensure that ``device_ids`` argument is set to be the only GPU device id +that your code will be operating on. This is generally the local rank of the +process. In other words, the ``device_ids`` needs to be ``[args.local_rank]``, +and ``output_device`` needs to be ``args.local_rank`` in order to use this +utility + +5. Another way to pass ``local_rank`` to the subprocesses via environment variable +``LOCAL_RANK``. This behavior is enabled when you launch the script with +``--use_env=True``. You must adjust the subprocess example above to replace +``args.local_rank`` with ``os.environ['LOCAL_RANK']``; the launcher +will not pass ``--local_rank`` when you specify this flag. + +.. warning:: + + ``local_rank`` is NOT globally unique: it is only unique per process + on a machine. Thus, don't use it to decide if you should, e.g., + write to a networked filesystem. See + https://github.com/pytorch/pytorch/issues/12042 for an example of + how things can go wrong if you don't do this correctly. + +""" + +import sys +import subprocess +import os +from argparse import ArgumentParser, REMAINDER + + +NODE_RANK = os.environ['AZ_BATCHAI_TASK_INDEX'] \ + if 'AZ_BATCHAI_TASK_INDEX' in os.environ else 0 +NODE_RANK = int(NODE_RANK) +MASTER_ADDR, MASTER_PORT = os.environ['AZ_BATCH_MASTER_NODE'].split(':') \ + if 'AZ_BATCH_MASTER_NODE' in os.environ else ("127.0.0.1", 29500) +MASTER_PORT = int(MASTER_PORT) + +def parse_args(): + """ + Helper function parsing the command line options + @retval ArgumentParser + """ + parser = ArgumentParser(description="PyTorch distributed training launch " + "helper utility that will spawn up " + "multiple distributed processes") + + # Optional arguments for the launch helper + parser.add_argument("--nnodes", type=int, default=1, + help="The number of nodes to use for distributed " + "training") + parser.add_argument("--node_rank", type=int, default=NODE_RANK, + help="The rank of the node for multi-node distributed " + "training") + parser.add_argument("--nproc_per_node", type=int, default=1, + help="The number of processes to launch on each node, " + "for GPU training, this is recommended to be set " + "to the number of GPUs in your system so that " + "each process can be bound to a single GPU.") + parser.add_argument("--master_addr", default=MASTER_ADDR, type=str, + help="Master node (rank 0)'s address, should be either " + "the IP address or the hostname of node 0, for " + "single node multi-proc training, the " + "--master_addr can simply be 127.0.0.1") + parser.add_argument("--master_port", default=MASTER_PORT, type=int, + help="Master node (rank 0)'s free port that needs to " + "be used for communication during distributed " + "training") + parser.add_argument("--use_env", default=False, action="store_true", + help="Use environment variable to pass " + "'local rank'. For legacy reasons, the default value is False. " + "If set to True, the script will not pass " + "--local_rank as argument, and will instead set LOCAL_RANK.") + parser.add_argument("-m", "--module", default=False, action="store_true", + help="Changes each process to interpret the launch script " + "as a python module, executing with the same behavior as" + "'python -m'.") + parser.add_argument("--no_python", default=False, action="store_true", + help="Do not prepend the training script with \"python\" - just exec " + "it directly. Useful when the script is not a Python script.") + + # positional + parser.add_argument("training_script", type=str, + help="The full path to the single GPU training " + "program/script to be launched in parallel, " + "followed by all the arguments for the " + "training script") + + # rest from the training program + parser.add_argument('training_script_args', nargs=REMAINDER) + return parser.parse_args() + +def main(): + args = parse_args() + + # world size in terms of number of processes + dist_world_size = args.nproc_per_node * args.nnodes + + # set PyTorch distributed related environmental variables + current_env = os.environ.copy() + current_env["MASTER_ADDR"] = args.master_addr + current_env["MASTER_PORT"] = str(args.master_port) + current_env["WORLD_SIZE"] = str(dist_world_size) + + processes = [] + + if 'OMP_NUM_THREADS' not in os.environ and args.nproc_per_node > 1: + current_env["OMP_NUM_THREADS"] = str(1) + print("*****************************************\n" + "Setting OMP_NUM_THREADS environment variable for each process " + "to be {} in default, to avoid your system being overloaded, " + "please further tune the variable for optimal performance in " + "your application as needed. \n" + "*****************************************".format(current_env["OMP_NUM_THREADS"])) + + print('Launching Node', args.node_rank) + for local_rank in range(0, args.nproc_per_node): + # each process's rank + dist_rank = args.nproc_per_node * args.node_rank + local_rank + current_env["RANK"] = str(dist_rank) + current_env["LOCAL_RANK"] = str(local_rank) + + # spawn the processes + with_python = not args.no_python + cmd = [] + if with_python: + cmd = [sys.executable, "-u"] + if args.module: + cmd.append("-m") + else: + if not args.use_env: + raise ValueError("When using the '--no_python' flag, you must also set the '--use_env' flag.") + if args.module: + raise ValueError("Don't use both the '--no_python' flag and the '--module' flag at the same time.") + + cmd.append(args.training_script) + + if not args.use_env: + cmd.append("--local_rank={}".format(local_rank)) + + cmd.extend(args.training_script_args) + + process = subprocess.Popen(cmd, env=current_env) + processes.append(process) + + for process in processes: + process.wait() + if process.returncode != 0: + raise subprocess.CalledProcessError(returncode=process.returncode, + cmd=cmd) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/CDARTS_segmentation/train/layers.py b/CDARTS_segmentation/train/layers.py new file mode 100644 index 0000000..80fccc8 --- /dev/null +++ b/CDARTS_segmentation/train/layers.py @@ -0,0 +1,116 @@ +import torch +import torch.distributed as dist +from torch import nn +from torch.autograd.function import Function +from torch.nn import functional as F + +class _NewEmptyTensorOp(torch.autograd.Function): + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return _NewEmptyTensorOp.apply(grad, shape), None + +class BatchNorm2d(torch.nn.BatchNorm2d): + """ + For torch < 1.4 + A wrapper around :class:`torch.nn.BatchNorm2d` to support zero-size tensor. + """ + + def forward(self, x): + if x.numel() > 0: + return super(BatchNorm2d, self).forward(x) + # get output shape + output_shape = x.shape + return _NewEmptyTensorOp.apply(x, output_shape) + + +class AllReduce(Function): + @staticmethod + def forward(ctx, input): + input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())] + # Use allgather instead of allreduce since I don't trust in-place operations .. + dist.all_gather(input_list, input, async_op=False) + inputs = torch.stack(input_list, dim=0) + return torch.sum(inputs, dim=0) + + @staticmethod + def backward(ctx, grad_output): + dist.all_reduce(grad_output, async_op=False) + return grad_output + +class NaiveSyncBatchNorm(BatchNorm2d): + """ + In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient + when the batch size on each worker is different. + (e.g., when scale augmentation is used, or when it is applied to mask head). + This is a slower but correct alternative to `nn.SyncBatchNorm`. + Note: + There isn't a single definition of Sync BatchNorm. + When ``stats_mode==""``, this module computes overall statistics by using + statistics of each worker with equal weight. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (N, H, W). This mode does not support inputs with zero batch size. + When ``stats_mode=="N"``, this module computes overall statistics by weighting + the statistics of each worker by their ``N``. The result is true statistics + of all samples (as if they are all on one worker) only when all workers + have the same (H, W). It is slower than ``stats_mode==""``. + Even though the result of this module may not be the true statistics of all samples, + it may still be reasonable because it might be preferrable to assign equal weights + to all workers, regardless of their (H, W) dimension, instead of putting larger weight + on larger images. From preliminary experiments, little difference is found between such + a simplified implementation and an accurate computation of overall mean & variance. + """ + + def __init__(self, *args, stats_mode="", **kwargs): + super().__init__(*args, **kwargs) + assert stats_mode in ["", "N"] + self._stats_mode = stats_mode + + def forward(self, input): + if not self.training: + return super().forward(input) + + if dist.get_world_size() == 1: + return super().forward(input) + + B, C = input.shape[0], input.shape[1] + + mean = torch.mean(input, dim=[0, 2, 3]) + meansqr = torch.mean(input * input, dim=[0, 2, 3]) + + if self._stats_mode == "": + assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' + vec = torch.cat([mean, meansqr], dim=0) + vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size()) + mean, meansqr = torch.split(vec, C) + momentum = self.momentum + else: + if B == 0: + vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype) + vec = vec + input.sum() # make sure there is gradient w.r.t input + else: + vec = torch.cat( + [mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0 + ) + vec = AllReduce.apply(vec * B) + + total_batch = vec[-1].detach() + momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0 + total_batch = torch.max(total_batch, torch.ones_like(total_batch)) # avoid div-by-zero + mean, meansqr, _ = torch.split(vec / total_batch, C) + + var = meansqr - mean * mean + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + + self.running_mean += momentum * (mean.detach() - self.running_mean) + self.running_var += momentum * (var.detach() - self.running_var) + return input * scale + bias \ No newline at end of file diff --git a/CDARTS_segmentation/train/loss.py b/CDARTS_segmentation/train/loss.py new file mode 100644 index 0000000..0f56494 --- /dev/null +++ b/CDARTS_segmentation/train/loss.py @@ -0,0 +1,81 @@ +import torch.nn as nn +import torch.nn.functional as F +import torch + + +class CrossEntropyLoss2d(nn.Module): + def __init__(self, weight=None, size_average=True, ignore_index=-100): + super(CrossEntropyLoss2d, self).__init__() + self.nll_loss = nn.NLLLoss(weight, size_average, ignore_index) + + def forward(self, inputs, targets): + return self.nll_loss(F.log_softmax(inputs, dim=1), targets) + + +def one_hot(index, classes): + # index is not flattened (pypass ignore) ############ + # size = index.size()[:1] + (classes,) + index.size()[1:] + # view = index.size()[:1] + (1,) + index.size()[1:] + ##################################################### + # index is flatten (during ignore) ################## + size = index.size()[:1] + (classes,) + view = index.size()[:1] + (1,) + ##################################################### + + mask = torch.Tensor(size).fill_(0).cuda() + index = index.view(view) + ones = 1. + + return mask.scatter_(1, index, ones) + + +class FocalLoss(nn.CrossEntropyLoss): + ''' Focal loss for classification tasks on imbalanced datasets ''' + + def __init__(self, gamma=2, alpha=None, ignore_index=-100, reduction='mean'): + super().__init__(weight=alpha, ignore_index=ignore_index, reduction='mean') + self.reduction = reduction + self.gamma = gamma + + def forward(self, input_, target): + cross_entropy = super().forward(input_, target) + # Temporarily mask out ignore index to '0' for valid gather-indices input. + # This won't contribute final loss as the cross_entropy contribution + # for these would be zero. + target = target * (target != self.ignore_index).long() + input_prob = torch.gather(F.softmax(input_, 1), 1, target.unsqueeze(1)) + loss = torch.pow(1 - input_prob, self.gamma) * cross_entropy + if self.reduction == 'mean': return torch.mean(loss) + elif self.reduction == 'sum': return torch.sum(loss) + else: return loss + + +class SoftCrossEntropyLoss2d(nn.Module): + def __init__(self): + super(SoftCrossEntropyLoss2d, self).__init__() + + def forward(self, inputs, targets): + loss = 0 + inputs = -F.log_softmax(inputs, dim=1) + for index in range(inputs.size()[0]): + loss += F.conv2d(inputs[range(index, index+1)], targets[range(index, index+1)])/(targets.size()[2] * + targets.size()[3]) + return loss + +class OhemCELoss(nn.Module): + def __init__(self, thresh, n_min=0.1, ignore_lb=255, *args, **kwargs): + super(OhemCELoss, self).__init__() + self.thresh = -torch.log(torch.tensor(thresh, dtype=torch.float)).cuda() + self.n_min = n_min + self.ignore_lb = ignore_lb + self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_lb, reduction='none') + + def forward(self, logits, labels): + loss = self.criteria(logits, labels).view(-1) + loss, _ = torch.sort(loss, descending=True) + n_min = int(self.n_min * len(loss)) + if loss[n_min] > self.thresh: + loss = loss[loss>self.thresh] + else: + loss = loss[:n_min] + return torch.mean(loss) \ No newline at end of file diff --git a/CDARTS_segmentation/train/operations.py b/CDARTS_segmentation/train/operations.py new file mode 100644 index 0000000..13b45f1 --- /dev/null +++ b/CDARTS_segmentation/train/operations.py @@ -0,0 +1,948 @@ +__all__ = ['ConvNorm', 'BasicResidual1x', 'BasicResidual_downup_1x', 'BasicResidual2x', 'BasicResidual_downup_2x', 'FactorizedReduce', 'OPS', 'OPS_name', 'OPS_Class', 'Self_Attn'] + +from pdb import set_trace as bp +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from thop import profile +import sys +import os.path as osp +from easydict import EasyDict as edict +from torch import nn, einsum +from einops import rearrange + +C = edict() +"""please config ROOT_dir and user when u first using""" +# C.repo_name = 'FasterSeg' +C.abs_dir = osp.realpath(".") +C.root_dir = osp.realpath("..") +C.this_dir = C.abs_dir.split(osp.sep)[-1] +# C.root_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)] +"""Path Config""" +def add_path(path): + if path not in sys.path: + sys.path.insert(0, path) + +add_path(osp.join(C.root_dir, 'tools')) +try: + from utils.darts_utils import compute_latency_ms_tensorrt as compute_latency + print("use TensorRT for latency test") +except: + from utils.darts_utils import compute_latency_ms_pytorch as compute_latency + print("use PyTorch for latency test") +from slimmable_ops import USConv2d, USBatchNorm2d +from layers import NaiveSyncBatchNorm + + +latency_lookup_table = {} +# table_file_name = "latency_lookup_table.npy" +# if osp.isfile(table_file_name): +# latency_lookup_table = np.load(table_file_name).item() + + +# BatchNorm2d = nn.BatchNorm2d +BatchNorm2d = NaiveSyncBatchNorm + +def drop_path_(x, drop_prob, training): + if training and drop_prob > 0.: + keep_prob = 1. - drop_prob + # per data point mask; assuming x in cuda. + mask = torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) + x = torch.div(x, keep_prob) + x = torch.mul(x, mask) + # x.div_(keep_prob).mul_(mask) + return x + +class DropPath_(nn.Module): + def __init__(self, p=0.): + """ [!] DropPath is inplace module + Args: + p: probability of an path to be zeroed. + """ + super().__init__() + self.p = p + + def extra_repr(self): + return 'p={}, inplace'.format(self.p) + + def forward(self, x): + drop_path_(x, self.p, self.training) + + return x + + def forward_latency(self, size): + c_in, h_in, w_in = size + latency = 0 + return latency, (c_in, h_in, w_in) + +class ConvNorm(nn.Module): + ''' + conv => norm => activation + use native nn.Conv2d, not slimmable + ''' + def __init__(self, C_in, C_out, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False, slimmable=True, width_mult_list=[1.]): + super(ConvNorm, self).__init__() + self.C_in = C_in + self.C_out = C_out + self.kernel_size = kernel_size + assert stride in [1, 2] + self.stride = stride + if padding is None: + # assume h_out = h_in / s + self.padding = int(np.ceil((dilation * (kernel_size - 1) + 1 - stride) / 2.)) + else: + self.padding = padding + self.dilation = dilation + assert type(groups) == int + if kernel_size == 1: + self.groups = 1 + else: + self.groups = groups + self.bias = bias + self.slimmable = slimmable + self.width_mult_list = width_mult_list + self.ratio = (1., 1.) + + if slimmable: + self.conv = nn.Sequential( + USConv2d(C_in, C_out, kernel_size, stride, padding=self.padding, dilation=dilation, groups=self.groups, bias=bias, width_mult_list=width_mult_list), + USBatchNorm2d(C_out, width_mult_list), + nn.ReLU(inplace=True), + ) + else: + self.conv = nn.Sequential( + nn.Conv2d(C_in, C_out, kernel_size, stride, padding=self.padding, dilation=dilation, groups=self.groups, bias=bias), + # nn.BatchNorm2d(C_out), + BatchNorm2d(C_out), + nn.ReLU(inplace=True), + ) + + def set_ratio(self, ratio): + assert self.slimmable + assert len(ratio) == 2 + self.ratio = ratio + self.conv[0].set_ratio(ratio) + self.conv[1].set_ratio(ratio[1]) + + @staticmethod + def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False): + layer = ConvNorm(C_in, C_out, kernel_size, stride, padding, dilation, groups, bias, slimmable=False) + flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False) + return flops + + @staticmethod + def _latency(h, w, C_in, C_out, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False): + layer = ConvNorm(C_in, C_out, kernel_size, stride, padding, dilation, groups, bias, slimmable=False) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + c_in, h_in, w_in = size + if self.slimmable: + assert c_in == int(self.C_in * self.ratio[0]), "c_in %d, self.C_in * self.ratio[0] %d"%(c_in, self.C_in * self.ratio[0]) + c_out = int(self.C_out * self.ratio[1]) + else: + assert c_in == self.C_in, "c_in %d, self.C_in %d"%(c_in, self.C_in) + c_out = self.C_out + if self.stride == 1: + h_out = h_in; w_out = w_in + else: + h_out = h_in // 2; w_out = w_in // 2 + name = "ConvNorm_H%d_W%d_Cin%d_Cout%d_kernel%d_stride%d"%(h_in, w_in, c_in, c_out, self.kernel_size, self.stride) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + else: + print("not found in latency_lookup_table:", name) + latency = ConvNorm._latency(h_in, w_in, c_in, c_out, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias) + latency_lookup_table[name] = latency + np.save(table_file_name, latency_lookup_table) + return latency, (c_out, h_out, w_out) + + def forward(self, x): + assert x.size()[1] == self.C_in, "{} {}".format(x.size()[1], self.C_in) + x = self.conv(x) + return x + + +class BasicResidual1x(nn.Module): + def __init__(self, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1, slimmable=True, width_mult_list=[1.]): + super(BasicResidual1x, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + groups = 1 + self.C_in = C_in + self.C_out = C_out + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.groups = groups + self.slimmable = slimmable + self.width_mult_list = width_mult_list + assert stride in [1, 2] + if self.stride == 2: self.dilation = 1 + self.ratio = (1., 1.) + + self.relu = nn.ReLU(inplace=True) + if slimmable: + self.conv1 = USConv2d(C_in, C_out, 3, stride, padding=dilation, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list) + self.bn1 = USBatchNorm2d(C_out, width_mult_list) + else: + self.conv1 = nn.Conv2d(C_in, C_out, 3, stride, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn1 = nn.BatchNorm2d(C_out) + self.bn1 = BatchNorm2d(C_out) + + def set_ratio(self, ratio): + assert len(ratio) == 2 + self.ratio = ratio + self.conv1.set_ratio(ratio) + self.bn1.set_ratio(ratio[1]) + + @staticmethod + def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + layer = BasicResidual1x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False) + return flops + + @staticmethod + def _latency(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + layer = BasicResidual1x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + c_in, h_in, w_in = size + if self.slimmable: + assert c_in == int(self.C_in * self.ratio[0]), "c_in %d, int(self.C_in * self.ratio[0]) %d"%(c_in, int(self.C_in * self.ratio[0])) + c_out = int(self.C_out * self.ratio[1]) + else: + assert c_in == self.C_in, "c_in %d, self.C_in %d"%(c_in, self.C_in) + c_out = self.C_out + if self.stride == 1: + h_out = h_in; w_out = w_in + else: + h_out = h_in // 2; w_out = w_in // 2 + name = "BasicResidual1x_H%d_W%d_Cin%d_Cout%d_stride%d_dilation%d"%(h_in, w_in, c_in, c_out, self.stride, self.dilation) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + else: + print("not found in latency_lookup_table:", name) + latency = BasicResidual1x._latency(h_in, w_in, c_in, c_out, self.kernel_size, self.stride, self.dilation, self.groups) + latency_lookup_table[name] = latency + np.save(table_file_name, latency_lookup_table) + return latency, (c_out, h_out, w_out) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + return out + + +class BasicResidual_downup_1x(nn.Module): + def __init__(self, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1, slimmable=True, width_mult_list=[1.]): + super(BasicResidual_downup_1x, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + groups = 1 + self.C_in = C_in + self.C_out = C_out + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.groups = groups + self.slimmable = slimmable + self.width_mult_list = width_mult_list + assert stride in [1, 2] + if self.stride == 2: self.dilation = 1 + self.ratio = (1., 1.) + + self.relu = nn.ReLU(inplace=True) + if slimmable: + self.conv1 = USConv2d(C_in, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list) + self.bn1 = USBatchNorm2d(C_out, width_mult_list) + if self.stride==1: + self.downsample = nn.Sequential( + USConv2d(C_in, C_out, 1, 1, padding=0, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list), + USBatchNorm2d(C_out, width_mult_list) + ) + else: + self.conv1 = nn.Conv2d(C_in, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn1 = nn.BatchNorm2d(C_out) + self.bn1 = BatchNorm2d(C_out) + if self.stride==1: + self.downsample = nn.Sequential( + nn.Conv2d(C_in, C_out, 1, 1, padding=0, dilation=dilation, groups=groups, bias=False), + BatchNorm2d(C_out) + ) + + def set_ratio(self, ratio): + assert len(ratio) == 2 + self.ratio = ratio + self.conv1.set_ratio(ratio) + self.bn1.set_ratio(ratio[1]) + + @staticmethod + def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + assert stride in [1, 2] + layer = BasicResidual_downup_1x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False) + return flops + + @staticmethod + def _latency(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + assert stride in [1, 2] + layer = BasicResidual_downup_1x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + c_in, h_in, w_in = size + if self.slimmable: + assert c_in == int(self.C_in * self.ratio[0]), "c_in %d, int(self.C_in * self.ratio[0]) %d"%(c_in, int(self.C_in * self.ratio[0])) + c_out = int(self.C_out * self.ratio[1]) + else: + assert c_in == self.C_in, "c_in %d, self.C_in %d"%(c_in, self.C_in) + c_out = self.C_out + if self.stride == 1: + h_out = h_in; w_out = w_in + else: + h_out = h_in // 2; w_out = w_in // 2 + name = "BasicResidual_downup_1x_H%d_W%d_Cin%d_Cout%d_stride%d_dilation%d"%(h_in, w_in, c_in, c_out, self.stride, self.dilation) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + else: + print("not found in latency_lookup_table:", name) + latency = BasicResidual_downup_1x._latency(h_in, w_in, c_in, c_out, self.kernel_size, self.stride, self.dilation, self.groups) + latency_lookup_table[name] = latency + np.save(table_file_name, latency_lookup_table) + return latency, (c_out, h_out, w_out) + + def forward(self, x): + out = F.interpolate(x, size=(int(x.size(2))//2, int(x.size(3))//2), mode='bilinear', align_corners=False) + out = self.conv1(out) + out = self.bn1(out) + if self.stride == 1: + out = F.interpolate(out, size=(int(x.size(2)), int(x.size(3))), mode='bilinear', align_corners=False) + out = out + self.downsample(x) + out = self.relu(out) + return out + + +class BasicResidual2x(nn.Module): + def __init__(self, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1, slimmable=True, width_mult_list=[1.]): + super(BasicResidual2x, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + groups = 1 + self.C_in = C_in + self.C_out = C_out + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.groups = groups + self.slimmable = slimmable + self.width_mult_list = width_mult_list + assert stride in [1, 2] + if self.stride == 2: self.dilation = 1 + self.ratio = (1., 1.) + + self.relu = nn.ReLU(inplace=True) + if self.slimmable: + self.conv1 = USConv2d(C_in, C_out, 3, stride, padding=dilation, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list) + self.bn1 = USBatchNorm2d(C_out, width_mult_list) + self.conv2 = USConv2d(C_out, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list) + self.bn2 = USBatchNorm2d(C_out, width_mult_list) + else: + self.conv1 = nn.Conv2d(C_in, C_out, 3, stride, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn1 = nn.BatchNorm2d(C_out) + self.bn1 = BatchNorm2d(C_out) + self.conv2 = nn.Conv2d(C_out, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn2 = nn.BatchNorm2d(C_out) + self.bn2 = BatchNorm2d(C_out) + + def set_ratio(self, ratio): + assert len(ratio) == 2 + self.ratio = ratio + self.conv1.set_ratio(ratio) + self.bn1.set_ratio(ratio[1]) + self.conv2.set_ratio((ratio[1], ratio[1])) + self.bn2.set_ratio(ratio[1]) + + @staticmethod + def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + layer = BasicResidual2x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False) + return flops + + @staticmethod + def _latency(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + layer = BasicResidual2x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + c_in, h_in, w_in = size + if self.slimmable: + assert c_in == int(self.C_in * self.ratio[0]) + c_out = int(self.C_out * self.ratio[1]) + else: + assert c_in == self.C_in, "c_in %d, self.C_in%d"%(c_in, self.C_in) + c_out = self.C_out + if self.stride == 1: + h_out = h_in; w_out = w_in + else: + h_out = h_in // 2; w_out = w_in // 2 + name = "BasicResidual2x_H%d_W%d_Cin%d_Cout%d_stride%d_dilation%d"%(h_in, w_in, c_in, c_out, self.stride, self.dilation) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + else: + print("not found in latency_lookup_table:", name) + latency = BasicResidual2x._latency(h_in, w_in, c_in, c_out, self.kernel_size, self.stride, self.dilation, self.groups) + latency_lookup_table[name] = latency + np.save(table_file_name, latency_lookup_table) + return latency, (c_out, h_out, w_out) + + def forward(self, x): + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + return out + + +class BasicResidual_downup_2x(nn.Module): + def __init__(self, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1, slimmable=True, width_mult_list=[1.]): + super(BasicResidual_downup_2x, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + groups = 1 + self.C_in = C_in + self.C_out = C_out + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.groups = groups + self.slimmable = slimmable + self.width_mult_list = width_mult_list + assert stride in [1, 2] + if self.stride == 2: self.dilation = 1 + self.ratio = (1., 1.) + + self.relu = nn.ReLU(inplace=True) + if self.slimmable: + self.conv1 = USConv2d(C_in, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list) + self.bn1 = USBatchNorm2d(C_out, width_mult_list) + self.conv2 = USConv2d(C_out, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list) + self.bn2 = USBatchNorm2d(C_out, width_mult_list) + if self.stride==1: + self.downsample = nn.Sequential( + USConv2d(C_in, C_out, 1, 1, padding=0, dilation=dilation, groups=groups, bias=False, width_mult_list=width_mult_list), + USBatchNorm2d(C_out, width_mult_list) + ) + else: + self.conv1 = nn.Conv2d(C_in, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn1 = nn.BatchNorm2d(C_out) + self.bn1 = BatchNorm2d(C_out) + self.conv2 = nn.Conv2d(C_out, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn2 = nn.BatchNorm2d(C_out) + self.bn2 = BatchNorm2d(C_out) + if self.stride==1: + self.downsample = nn.Sequential( + nn.Conv2d(C_in, C_out, 1, 1, padding=0, dilation=dilation, groups=groups, bias=False), + BatchNorm2d(C_out) + ) + + def set_ratio(self, ratio): + assert len(ratio) == 2 + self.ratio = ratio + self.conv1.set_ratio(ratio) + self.bn1.set_ratio(ratio[1]) + self.conv2.set_ratio((ratio[1], ratio[1])) + self.bn2.set_ratio(ratio[1]) + + @staticmethod + def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + assert stride in [1, 2] + layer = BasicResidual_downup_2x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False) + return flops + + @staticmethod + def _latency(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + assert stride in [1, 2] + layer = BasicResidual_downup_2x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + c_in, h_in, w_in = size + if self.slimmable: + assert c_in == int(self.C_in * self.ratio[0]) + c_out = int(self.C_out * self.ratio[1]) + else: + assert c_in == self.C_in, "c_in %d, self.C_in%d"%(c_in, self.C_in) + c_out = self.C_out + if self.stride == 1: + h_out = h_in; w_out = w_in + else: + h_out = h_in // 2; w_out = w_in // 2 + name = "BasicResidual2x_H%d_W%d_Cin%d_Cout%d_stride%d_dilation%d"%(h_in, w_in, c_in, c_out, self.stride, self.dilation) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + else: + print("not found in latency_lookup_table:", name) + latency = BasicResidual2x._latency(h_in, w_in, c_in, c_out, self.kernel_size, self.stride, self.dilation, self.groups) + latency_lookup_table[name] = latency + np.save(table_file_name, latency_lookup_table) + return latency, (c_out, h_out, w_out) + + def forward(self, x): + out = F.interpolate(x, size=(int(x.size(2))//2, int(x.size(3))//2), mode='bilinear', align_corners=False) + out = self.conv1(out) + out = self.bn1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.bn2(out) + if self.stride == 1: + out = F.interpolate(out, size=(int(x.size(2)), int(x.size(3))), mode='bilinear', align_corners=False) + out = out + self.downsample(x) + out = self.relu(out) + return out + + +class FactorizedReduce(nn.Module): + def __init__(self, C_in, C_out, stride=1, slimmable=True, width_mult_list=[1.]): + super(FactorizedReduce, self).__init__() + assert stride in [1, 2] + assert C_out % 2 == 0 + self.C_in = C_in + self.C_out = C_out + self.stride = stride + self.slimmable = slimmable + self.width_mult_list = width_mult_list + self.ratio = (1., 1.) + if stride == 1 and slimmable: + self.conv1 = USConv2d(C_in, C_out, 1, stride=1, padding=0, bias=False, width_mult_list=width_mult_list) + self.bn = USBatchNorm2d(C_out, width_mult_list) + self.relu = nn.ReLU(inplace=True) + elif stride == 2: + self.relu = nn.ReLU(inplace=True) + if slimmable: + self.conv1 = USConv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False, width_mult_list=width_mult_list) + self.conv2 = USConv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False, width_mult_list=width_mult_list) + self.bn = USBatchNorm2d(C_out, width_mult_list) + else: + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = BatchNorm2d(C_out) + + def set_ratio(self, ratio): + assert len(ratio) == 2 + if self.stride == 1: + self.ratio = ratio + self.conv1.set_ratio(ratio) + self.bn.set_ratio(ratio[1]) + elif self.stride == 2: + self.ratio = ratio + self.conv1.set_ratio(ratio) + self.conv2.set_ratio(ratio) + self.bn.set_ratio(ratio[1]) + + @staticmethod + def _flops(h, w, C_in, C_out, stride=1): + layer = FactorizedReduce(C_in, C_out, stride, slimmable=False) + flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False) + return flops + + @staticmethod + def _latency(h, w, C_in, C_out, stride=1): + layer = FactorizedReduce(C_in, C_out, stride, slimmable=False) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + c_in, h_in, w_in = size + if self.slimmable: + assert c_in == int(self.C_in * self.ratio[0]) + c_out = int(self.C_out * self.ratio[1]) + else: + assert c_in == self.C_in + c_out = self.C_out + if self.stride == 1: + h_out = h_in; w_out = w_in + else: + h_out = h_in // 2; w_out = w_in // 2 + name = "FactorizedReduce_H%d_W%d_Cin%d_Cout%d_stride%d"%(h_in, w_in, c_in, c_out, self.stride) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + else: + print("not found in latency_lookup_table:", name) + latency = FactorizedReduce._latency(h_in, w_in, c_in, c_out, self.stride) + latency_lookup_table[name] = latency + np.save(table_file_name, latency_lookup_table) + return latency, (c_out, h_out, w_out) + + def forward(self, x): + if self.stride == 2: + out = torch.cat([self.conv1(x), self.conv2(x[:,:,1:,1:])], dim=1) + out = self.bn(out) + out = self.relu(out) + return out + else: + if self.slimmable: + out = self.conv1(x) + out = self.bn(out) + out = self.relu(out) + return out + else: + return x + + +def pair(x): + return (x, x) if not isinstance(x, tuple) else x + +def expand_dim(t, dim, k): + t = t.unsqueeze(dim = dim) + expand_shape = [-1] * len(t.shape) + expand_shape[dim] = k + return t.expand(*expand_shape) + +def rel_to_abs(x): + b, h, l, _, device, dtype = *x.shape, x.device, x.dtype + dd = {'device': device, 'dtype': dtype} + col_pad = torch.zeros((b, h, l, 1), **dd) + x = torch.cat((x, col_pad), dim = 3) + flat_x = rearrange(x, 'b h l c -> b h (l c)') + flat_pad = torch.zeros((b, h, l - 1), **dd) + flat_x_padded = torch.cat((flat_x, flat_pad), dim = 2) + final_x = flat_x_padded.reshape(b, h, l + 1, 2 * l - 1) + final_x = final_x[:, :, :l, (l-1):] + return final_x + +def relative_logits_1d(q, rel_k): + b, heads, h, w, dim = q.shape + logits = einsum('b h x y d, r d -> b h x y r', q, rel_k) + logits = rearrange(logits, 'b h x y r -> b (h x) y r') + logits = rel_to_abs(logits) + logits = logits.reshape(b, heads, h, w, w) + logits = expand_dim(logits, dim = 3, k = h) + return logits + +# positional embeddings + +class AbsPosEmb(nn.Module): + def __init__( + self, + fmap_size, + dim_head + ): + super().__init__() + height, width = pair(fmap_size) + scale = dim_head ** -0.5 + self.height = nn.Parameter(torch.randn(height, dim_head) * scale) + self.width = nn.Parameter(torch.randn(width, dim_head) * scale) + + def forward(self, q): + emb = rearrange(self.height, 'h d -> h () d') + rearrange(self.width, 'w d -> () w d') + emb = rearrange(emb, ' h w d -> (h w) d') + logits = einsum('b h i d, j d -> b h i j', q, emb) + return logits + +class RelPosEmb(nn.Module): + def __init__( + self, + fmap_size, + dim_head + ): + super().__init__() + height, width = pair(fmap_size) + scale = dim_head ** -0.5 + self.fmap_size = fmap_size + self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale) + self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale) + + def forward(self, q): + h, w = self.fmap_size + + q = rearrange(q, 'b h (x y) d -> b h x y d', x = h, y = w) + rel_logits_w = relative_logits_1d(q, self.rel_width) + rel_logits_w = rearrange(rel_logits_w, 'b h x i y j-> b h (x y) (i j)') + + q = rearrange(q, 'b h x y d -> b h y x d') + rel_logits_h = relative_logits_1d(q, self.rel_height) + rel_logits_h = rearrange(rel_logits_h, 'b h x i y j -> b h (y x) (j i)') + return rel_logits_w + rel_logits_h + +# classes + +class Attention(nn.Module): + def __init__( + self, + *, + dim, + fmap_size, + heads = 4, + dim_head = 128, + rel_pos_emb = False + ): + super().__init__() + self.heads = heads + + self.scale = dim_head ** -0.5 + inner_dim = heads * dim_head + + self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias = False) + + rel_pos_class = AbsPosEmb if not rel_pos_emb else RelPosEmb + self.pos_emb = rel_pos_class(fmap_size, dim_head) + + def forward(self, fmap): + heads, b, c, h, w = self.heads, *fmap.shape + + q, k, v = self.to_qkv(fmap).chunk(3, dim = 1) + q, k, v = map(lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = heads), (q, k, v)) + + q *= self.scale + + sim = einsum('b h i d, b h j d -> b h i j', q, k) + sim += self.pos_emb(q) + + attn = sim.softmax(dim = -1) + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w) + return out + +class Self_Attn(nn.Module): + def __init__( + self, + *, + dim, + fmap_size, + dim_out, + proj_factor, + downsample, + slimmable=True, + width_mult_list=[1.], + heads = 4, + dim_head = 128, + rel_pos_emb = False, + activation = nn.ReLU(inplace=True) + ): + super().__init__() + + # shortcut + + # contraction and expansion + self.slimmable = slimmable + self.width_mult_list = width_mult_list + + + if slimmable: + kernel_size, stride, padding = (3, 2, 1) if downsample else (1, 1, 0) + self.sk = False + self.shortcut = nn.Sequential( + USConv2d(dim, dim_out, kernel_size, padding=padding, stride=stride, dilation=1, groups=1, bias=False, width_mult_list=width_mult_list), + USBatchNorm2d(dim_out, width_mult_list), + activation + ) + else: + if dim != dim_out or downsample: + self.sk = False + kernel_size, stride, padding = (3, 2, 1) if downsample else (1, 1, 0) + + self.shortcut = nn.Sequential( + nn.Conv2d(dim, dim_out, kernel_size, stride = stride, padding = padding, bias = False), + BatchNorm2d(dim_out), + activation + ) + else: + self.sk = True + self.shortcut = nn.Identity() + + self.mix_bn1 = nn.ModuleList([]) + self.mix_bn2 = nn.ModuleList([]) + self.mix_bn3 = nn.ModuleList([]) + + # attn_dim_in = dim_out // proj_factor + attn_dim_in = dim_out + # attn_dim_out = heads * dim_head + attn_dim_out = attn_dim_in + + if self.slimmable: + self.mix_bn1.append(USBatchNorm2d(dim_out, width_mult_list)) + self.mix_bn2.append(USBatchNorm2d(dim_out, width_mult_list)) + self.mix_bn3.append(USBatchNorm2d(dim_out, width_mult_list)) + nn.init.zeros_(self.mix_bn3[0].weight) + else: + self.mix_bn1.append(BatchNorm2d(dim_out)) + self.mix_bn2.append(BatchNorm2d(dim_out)) + self.mix_bn3.append(BatchNorm2d(dim_out)) + nn.init.zeros_(self.mix_bn3[0].weight) + + if self.slimmable: + self.net1 = USConv2d(dim, attn_dim_in, 1, padding=0, stride=1, dilation=1, groups=1, bias=False, width_mult_list=width_mult_list) + + self.net2 = nn.Sequential( + activation, + ATT(attn_dim_in, slimmable=True, width_mult_list=width_mult_list), + nn.AvgPool2d((2, 2)) if downsample else nn.Identity() + ) + + self.net3 = nn.Sequential( + activation, + USConv2d(attn_dim_out, dim_out, 1, padding=0, stride=1, dilation=1, groups=1, bias=False, width_mult_list=width_mult_list), + ) + + else: + self.net1 = nn.Conv2d(dim, attn_dim_in, 1, bias = False) + + self.net2 = nn.Sequential( + activation, + ATT(attn_dim_in, slimmable=False), + nn.AvgPool2d((2, 2)) if downsample else nn.Identity() + ) + + self.net3 = nn.Sequential( + activation, + nn.Conv2d(attn_dim_out, dim_out, 1, bias = False), + ) + + # init last batch norm gamma to zero + + # nn.init.zeros_(self.net[-1].weight) + + # final activation + + self.activation = activation + + def set_ratio(self, ratio): + if not self.sk: + self.shortcut[0].set_ratio(ratio) + self.shortcut[1].set_ratio(ratio[1]) + + for i in range(len(self.mix_bn1)): + self.mix_bn1[i].set_ratio(ratio[1]) + self.mix_bn2[i].set_ratio(ratio[1]) + self.mix_bn3[i].set_ratio(ratio[1]) + + self.net1.set_ratio(ratio) + self.net2[1].set_ratio((ratio[1], ratio[1])) + self.net3[1].set_ratio((ratio[1], ratio[1])) + + @staticmethod + def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + assert stride in [1, 2] + layer = Self_Attn(dim=C_in, fmap_size=(128, 256), dim_out=C_out, downsample=(stride==2)) + flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False) + return flops + + @staticmethod + def _latency(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + assert stride in [1, 2] + layer = Self_Attn(dim=C_in, fmap_size=(128, 256), dim_out=C_out, downsample=(stride==2)) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + c_in, h_in, w_in = size + if self.slimmable: + assert c_in == int(self.C_in * self.ratio[0]) + c_out = int(self.C_out * self.ratio[1]) + else: + assert c_in == self.C_in, "c_in %d, self.C_in%d"%(c_in, self.C_in) + c_out = self.C_out + if self.stride == 1: + h_out = h_in; w_out = w_in + else: + h_out = h_in // 2; w_out = w_in // 2 + name = "Self_Attn_H%d_W%d_Cin%d_Cout%d_stride%d_dilation%d"%(h_in, w_in, c_in, c_out, self.stride, self.dilation) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + else: + print("not found in latency_lookup_table:", name) + latency = Self_Attn._latency(h_in, w_in, c_in, c_out, self.kernel_size, self.stride, self.dilation, self.groups) + latency_lookup_table[name] = latency + np.save(table_file_name, latency_lookup_table) + return latency, (c_out, h_out, w_out) + + def forward(self, x): + branch = 0 + shortcut = self.shortcut(x) + x = self.net1(x) + x = self.mix_bn1[branch](x) + x = self.net2(x) + x = self.mix_bn2[branch](x) + x = self.net3(x) + x = self.mix_bn3[branch](x) + x += shortcut + return self.activation(x) + +class ATT(nn.Module): + """ Self attention Layer""" + def __init__(self, in_dim, slimmable=True, width_mult_list=[1.]): + super(ATT, self).__init__() + self.chanel_in = in_dim + + self.slimmable = slimmable + self.width_mult_list = width_mult_list + self.ratio = (1., 1.) + + if self.slimmable: + self.query_conv = USConv2d(in_dim , in_dim//8 , 1, padding=0, stride=1, bias=False, width_mult_list=width_mult_list) + self.key_conv = USConv2d(in_dim , in_dim//8 , 1, padding=0, stride=1, bias=False, width_mult_list=width_mult_list) + self.value_conv = USConv2d(in_dim , in_dim , 1, padding=0, stride=1, bias=False, width_mult_list=width_mult_list) + else: + self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1) + self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1) + self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1) + self.gamma = nn.Parameter(torch.zeros(1)) + + self.softmax = nn.Softmax(dim=-1) # + + def set_ratio(self, ratio): + assert len(ratio) == 2 + self.ratio = ratio + self.query_conv.set_ratio(ratio) + self.key_conv.set_ratio(ratio) + self.value_conv.set_ratio(ratio) + + def forward(self,x): + """ + inputs : + x : input feature maps( B X C X W X H) + returns : + out : self attention value + input feature + attention: B X N X N (N is Width*Height) + """ + m_batchsize, C, width, height = x.size() + proj_query = self.query_conv(x).view(m_batchsize, -1,width*height).permute(0,2,1) # B X CX(N) + proj_key = self.key_conv(x).view(m_batchsize,-1, width*height) # B X C x (*W*H) + energy = torch.bmm(proj_query, proj_key) # transpose check + attention = self.softmax(energy) # BX (N) X (N) + proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N + + out = torch.bmm(proj_value, attention.permute(0,2,1) ) + out = out.view(m_batchsize, C, width,height) + + out = self.gamma*out + x + return out + +from collections import OrderedDict +OPS = { + 'skip' : lambda C_in, C_out, stride, slimmable, width_mult_list, fmap_size: FactorizedReduce(C_in, C_out, stride, slimmable, width_mult_list), + 'conv' : lambda C_in, C_out, stride, slimmable, width_mult_list, fmap_size: BasicResidual1x(C_in, C_out, kernel_size=3, stride=stride, dilation=1, slimmable=slimmable, width_mult_list=width_mult_list), + 'conv_downup' : lambda C_in, C_out, stride, slimmable, width_mult_list, fmap_size: BasicResidual_downup_1x(C_in, C_out, kernel_size=3, stride=stride, dilation=1, slimmable=slimmable, width_mult_list=width_mult_list), + 'conv_2x' : lambda C_in, C_out, stride, slimmable, width_mult_list, fmap_size: BasicResidual2x(C_in, C_out, kernel_size=3, stride=stride, dilation=1, slimmable=slimmable, width_mult_list=width_mult_list), + 'conv_2x_downup' : lambda C_in, C_out, stride, slimmable, width_mult_list, fmap_size: BasicResidual_downup_2x(C_in, C_out, kernel_size=3, stride=stride, dilation=1, slimmable=slimmable, width_mult_list=width_mult_list), + 'sa': lambda C_in, C_out, stride, slimmable, width_mult_list, fmap_size: Self_Attn(dim=C_in, fmap_size=(128, 256), dim_out=C_out, proj_factor=1, downsample=(stride==2), slimmable=slimmable, width_mult_list=width_mult_list) +} + +OPS_name = ["FactorizedReduce", "BasicResidual1x", "BasicResidual_downup_1x", "BasicResidual2x", "BasicResidual_downup_2x", "Self_Attn"] + +OPS_Class = OrderedDict() +OPS_Class['skip'] = FactorizedReduce +OPS_Class['conv'] = BasicResidual1x +OPS_Class['conv_downup'] = BasicResidual_downup_1x +OPS_Class['conv_2x'] = BasicResidual2x +OPS_Class['conv_2x_downup'] = BasicResidual_downup_2x +OPS_Class['sa'] = Self_Attn diff --git a/CDARTS_segmentation/train/run_det2.sh b/CDARTS_segmentation/train/run_det2.sh new file mode 100644 index 0000000..fc355d8 --- /dev/null +++ b/CDARTS_segmentation/train/run_det2.sh @@ -0,0 +1,3 @@ +export DETECTRON2_DATASETS="/home/hongyuan/data/" +NGPUS=8 +python -m torch.distributed.launch --nproc_per_node=$NGPUS train_autos4_det2.py --world_size $NGPUS --seed 12367 \ No newline at end of file diff --git a/CDARTS_segmentation/train/seg_metrics.py b/CDARTS_segmentation/train/seg_metrics.py new file mode 100644 index 0000000..79afbaa --- /dev/null +++ b/CDARTS_segmentation/train/seg_metrics.py @@ -0,0 +1,98 @@ +import numpy as np +import torch + + +class Seg_Metrics(object): + def __init__(self, n_classes=19): + self.n_classes = n_classes + self.total_inter = np.zeros(n_classes) + self.total_union = np.zeros(n_classes) + + def update(self, inter, union, N): + self.total_inter += inter * N + self.total_union += union * N + + def get_scores(self): + idx = self.total_union > 0 + IoU = 1.0 * self.total_inter[idx] / (np.spacing(1) + self.total_union[idx]) + mIoU = IoU.mean() + return mIoU + + def reset(self): + self.total_inter = np.zeros(n_classes) + self.total_union = np.zeros(n_classes) + + +def batch_pix_accuracy(predict, target): + """Batch Pixel Accuracy + Args: + predict: input 4D tensor + target: label 3D tensor + """ + _, predict = torch.max(predict, 1) + predict = predict.cpu().numpy() + 1 + target = target.cpu().numpy() + 1 + pixel_labeled = np.sum(target > 0) + pixel_correct = np.sum((predict == target)*(target > 0)) + assert pixel_correct <= pixel_labeled, \ + "Correct area should be smaller than Labeled" + return pixel_correct, pixel_labeled + + +def batch_intersection_union(predict, target, nclass): + """Batch Intersection of Union + Args: + predict: input 4D tensor + target: label 3D tensor + nclass: number of categories (int) + """ + _, predict = torch.max(predict, 1) + mini = 1 + maxi = nclass + nbins = nclass + predict = predict.cpu().numpy() + 1 + target = target.cpu().numpy() + 1 + + k = (target >= 1) & (target <= nclass) + # predict = predict * (target > 0).astype(predict.dtype) + predict = predict * k.astype(predict.dtype) + intersection = predict * (predict == target) + # areas of intersection and union + area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi)) + area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi)) + area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi)) + area_union = area_pred + area_lab - area_inter + assert (area_inter <= area_union).all(), \ + "Intersection area should be smaller than Union area" + return area_inter, area_union + + +# ref https://github.com/CSAILVision/sceneparsing/blob/master/evaluationCode/utils_eval.py +def pixel_accuracy(im_pred, im_lab): + im_pred = np.asarray(im_pred) + im_lab = np.asarray(im_lab) + + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + pixel_labeled = np.sum(im_lab > 0) + pixel_correct = np.sum((im_pred == im_lab) * (im_lab > 0)) + #pixel_accuracy = 1.0 * pixel_correct / pixel_labeled + return pixel_correct, pixel_labeled + + +def intersection_and_union(im_pred, im_lab, num_class): + im_pred = np.asarray(im_pred) + im_lab = np.asarray(im_lab) + # Remove classes from unlabeled pixels in gt image. + im_pred = im_pred * (im_lab > 0) + # Compute area intersection: + intersection = im_pred * (im_pred == im_lab) + area_inter, _ = np.histogram(intersection, bins=num_class-1, + range=(1, num_class - 1)) + # Compute area union: + area_pred, _ = np.histogram(im_pred, bins=num_class-1, + range=(1, num_class - 1)) + area_lab, _ = np.histogram(im_lab, bins=num_class-1, + range=(1, num_class - 1)) + area_union = area_pred + area_lab - area_inter + return area_inter, area_union diff --git a/CDARTS_segmentation/train/seg_oprs.py b/CDARTS_segmentation/train/seg_oprs.py new file mode 100644 index 0000000..7000f01 --- /dev/null +++ b/CDARTS_segmentation/train/seg_oprs.py @@ -0,0 +1,558 @@ +import numpy as np +try: + from utils.darts_utils import compute_latency_ms_tensorrt as compute_latency + print("use TensorRT for latency test") +except: + from utils.darts_utils import compute_latency_ms_pytorch as compute_latency + print("use PyTorch for latency test") +import torch +import torch.nn as nn + +import os.path as osp +latency_lookup_table = {} +# table_file_name = "latency_lookup_table.npy" +# if osp.isfile(table_file_name): +# latency_lookup_table = np.load(table_file_name).item() + +import torch.nn.functional as F +from collections import OrderedDict +from layers import NaiveSyncBatchNorm +from operations import ConvNorm +from att_sa import Self_Attn +BatchNorm2d = NaiveSyncBatchNorm + +class ConvBnRelu(nn.Module): + def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1, + groups=1, has_bn=True, norm_layer=nn.BatchNorm2d, bn_eps=1e-5, + has_relu=True, inplace=True, has_bias=False): + super(ConvBnRelu, self).__init__() + groups = 1 + self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize, + stride=stride, padding=pad, + dilation=dilation, groups=groups, bias=has_bias) + self.has_bn = has_bn + if self.has_bn: + self.bn = norm_layer(out_planes, eps=bn_eps) + self.has_relu = has_relu + if self.has_relu: + self.relu = nn.ReLU(inplace=inplace) + + def forward(self, x): + x = self.conv(x) + if self.has_bn: + x = self.bn(x) + if self.has_relu: + x = self.relu(x) + + return x + + +class SeparableConvBnRelu(nn.Module): + def __init__(self, in_channels, out_channels, + kernel_size=1, stride=1, padding=0, dilation=1, + has_relu=True, norm_layer=nn.BatchNorm2d): + super(SeparableConvBnRelu, self).__init__() + + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, + padding, dilation, groups=in_channels, + bias=False) + self.bn = norm_layer(in_channels) + self.point_wise_cbr = ConvBnRelu(in_channels, out_channels, 1, 1, 0, + has_bn=True, norm_layer=norm_layer, + has_relu=has_relu, has_bias=False) + + def forward(self, x): + x = self.conv1(x) + x = self.bn(x) + x = self.point_wise_cbr(x) + return x + + +class GlobalAvgPool2d(nn.Module): + def __init__(self): + """Global average pooling over the input's spatial dimensions""" + super(GlobalAvgPool2d, self).__init__() + + def forward(self, inputs): + in_size = inputs.size() + inputs = inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) + inputs = inputs.view(in_size[0], in_size[1], 1, 1) + + return inputs + + +class SELayer(nn.Module): + def __init__(self, in_planes, out_planes, reduction=16): + super(SELayer, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(in_planes, out_planes // reduction), + nn.ReLU(inplace=True), + nn.Linear(out_planes // reduction, out_planes), + nn.Sigmoid() + ) + self.out_planes = out_planes + + def forward(self, x): + b, c, _, _ = x.size() + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, self.out_planes, 1, 1) + return y + + +# For DFN +class ChannelAttention(nn.Module): + def __init__(self, in_planes, out_planes, reduction): + super(ChannelAttention, self).__init__() + self.channel_attention = SELayer(in_planes, out_planes, reduction) + + def forward(self, x1, x2): + fm = torch.cat([x1, x2], 1) + channel_attetion = self.channel_attention(fm) + fm = x1 * channel_attetion + x2 + + return fm + + +class BNRefine(nn.Module): + def __init__(self, in_planes, out_planes, ksize, has_bias=False, + has_relu=False, norm_layer=nn.BatchNorm2d, bn_eps=1e-5): + super(BNRefine, self).__init__() + self.conv_bn_relu = ConvBnRelu(in_planes, out_planes, ksize, 1, + ksize // 2, has_bias=has_bias, + norm_layer=norm_layer, bn_eps=bn_eps) + self.conv_refine = nn.Conv2d(out_planes, out_planes, kernel_size=ksize, + stride=1, padding=ksize // 2, dilation=1, + bias=has_bias) + self.has_relu = has_relu + if self.has_relu: + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + t = self.conv_bn_relu(x) + t = self.conv_refine(t) + if self.has_relu: + return self.relu(t + x) + return t + x + + +class RefineResidual(nn.Module): + def __init__(self, in_planes, out_planes, ksize, has_bias=False, + has_relu=False, norm_layer=nn.BatchNorm2d, bn_eps=1e-5): + super(RefineResidual, self).__init__() + self.conv_1x1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, + stride=1, padding=0, dilation=1, + bias=has_bias) + self.cbr = ConvBnRelu(out_planes, out_planes, ksize, 1, + ksize // 2, has_bias=has_bias, + norm_layer=norm_layer, bn_eps=bn_eps) + self.conv_refine = nn.Conv2d(out_planes, out_planes, kernel_size=ksize, + stride=1, padding=ksize // 2, dilation=1, + bias=has_bias) + self.has_relu = has_relu + if self.has_relu: + self.relu = nn.ReLU(inplace=False) + + def forward(self, x): + x = self.conv_1x1(x) + t = self.cbr(x) + t = self.conv_refine(t) + if self.has_relu: + return self.relu(t + x) + return t + x + + +# For BiSeNet +class AttentionRefinement(nn.Module): + def __init__(self, in_planes, out_planes, + norm_layer=nn.BatchNorm2d): + super(AttentionRefinement, self).__init__() + self.conv_3x3 = ConvBnRelu(in_planes, out_planes, 3, 1, 1, + has_bn=True, norm_layer=norm_layer, + has_relu=True, has_bias=False) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + ConvBnRelu(out_planes, out_planes, 1, 1, 0, + has_bn=True, norm_layer=norm_layer, + has_relu=False, has_bias=False), + nn.Sigmoid() + ) + + def forward(self, x): + fm = self.conv_3x3(x) + fm_se = self.channel_attention(fm) + fm = fm * fm_se + + return fm + + +class FeatureFusion(nn.Module): + def __init__(self, in_planes, out_planes, reduction=1, Fch=16, scale=4, branch=2, norm_layer=nn.BatchNorm2d): + super(FeatureFusion, self).__init__() + self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0, + has_bn=True, norm_layer=norm_layer, + has_relu=True, has_bias=False) + # self.channel_attention = nn.Sequential( + # nn.AdaptiveAvgPool2d(1), + # ConvBnRelu(out_planes, out_planes // reduction, 1, 1, 0, + # has_bn=False, norm_layer=norm_layer, + # has_relu=True, has_bias=False), + # ConvBnRelu(out_planes // reduction, out_planes, 1, 1, 0, + # has_bn=False, norm_layer=norm_layer, + # has_relu=False, has_bias=False), + # nn.Sigmoid() + # ) + self._Fch = Fch + self._scale = scale + self._branch = branch + + @staticmethod + def _latency(h, w, C_in, C_out): + layer = FeatureFusion(C_in, C_out) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + name = "ff_H%d_W%d_C%d"%(size[1], size[2], size[0]) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + return latency, size + else: + print("not found in latency_lookup_table:", name) + latency = FeatureFusion._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._scale*self._Fch*self._branch) + latency_lookup_table[name] = latency + np.save("latency_lookup_table.npy", latency_lookup_table) + return latency, size + + def forward(self, fm): + # fm is already a concatenation of multiple scales + fm = self.conv_1x1(fm) + return fm + # fm_se = self.channel_attention(fm) + # output = fm + fm * fm_se + # return output + + +class Head(nn.Module): + def __init__(self, in_planes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)): + super(Head, self).__init__() + if in_planes <= 64: + mid_planes = in_planes + elif in_planes <= 256: + if is_aux: + mid_planes = in_planes + else: + mid_planes = in_planes + else: + # in_planes > 256: + if is_aux: + mid_planes = in_planes // 2 + else: + mid_planes = in_planes // 2 + + self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=mid_planes, proj_factor=4, downsample=False) + # self.conv_3x3 = ConvBnRelu(in_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0) + self._in_planes = in_planes + self._out_planes = out_planes + self._Fch = Fch + self._scale = scale + self._branch = branch + + @staticmethod + def _latency(h, w, C_in, C_out=19): + layer = Head(C_in, C_out) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes) + name = "head_H%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + return latency, (self._out_planes, size[1], size[2]) + else: + print("not found in latency_lookup_table:", name) + latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes) + latency_lookup_table[name] = latency + np.save("latency_lookup_table.npy", latency_lookup_table) + return latency, (self._out_planes, size[1], size[2]) + + def forward(self, x): + # fm = self.conv_3x3(x) + fm = self.att_sa(x) + output = self.conv_1x1(fm) + return output + +class Decoder(nn.Module): + def __init__(self, in_planes, low_level_inplanes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)): + super(Decoder, self).__init__() + C_low = 48 + self.feature_projection = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False) + + # in_planes = in_planes + C_low + if in_planes <= 64: + mid_planes = in_planes + elif in_planes <= 256: + if is_aux: + mid_planes = in_planes + else: + mid_planes = in_planes + else: + # in_planes > 256: + if is_aux: + mid_planes = in_planes // 2 + else: + mid_planes = in_planes // 2 + + + self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=mid_planes, proj_factor=4, downsample=False) + self.conv_3x3 = ConvBnRelu(mid_planes + C_low, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0) + self._in_planes = in_planes + self._out_planes = out_planes + self._Fch = Fch + self._scale = scale + self._branch = branch + + @staticmethod + def _latency(h, w, C_in, C_out=19): + layer = Head(C_in, C_out) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes) + name = "head_H%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + return latency, (self._out_planes, size[1], size[2]) + else: + print("not found in latency_lookup_table:", name) + latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes) + latency_lookup_table[name] = latency + np.save("latency_lookup_table.npy", latency_lookup_table) + return latency, (self._out_planes, size[1], size[2]) + + def forward(self, x, low_level_feat): + low_level_feat = self.feature_projection(low_level_feat) + x = self.att_sa(x) + x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=False) + x = torch.cat((x, low_level_feat), dim=1) + # x = self.att_sa(x) + x = self.conv_3x3(x) + output = self.conv_1x1(x) + return output + +class BasicResidual_downup_2x(nn.Module): + def __init__(self, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1): + super(BasicResidual_downup_2x, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + groups = 1 + self.C_in = C_in + self.C_out = C_out + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + self.groups = groups + assert stride in [1, 2] + if self.stride == 2: self.dilation = 1 + + self.relu = nn.ReLU(inplace=True) + self.conv1 = nn.Conv2d(C_in, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn1 = nn.BatchNorm2d(C_out) + self.bn1 = BatchNorm2d(C_out) + self.conv2 = nn.Conv2d(C_out, C_out, 3, 1, padding=dilation, dilation=dilation, groups=groups, bias=False) + # self.bn2 = nn.BatchNorm2d(C_out) + self.bn2 = BatchNorm2d(C_out) + if self.stride==1: + self.downsample = nn.Sequential( + nn.Conv2d(C_in, C_out, 1, 1, padding=0, dilation=dilation, groups=groups, bias=False), + BatchNorm2d(C_out) + ) + + def forward(self, x): + out = F.interpolate(x, size=(int(x.size(2))//2, int(x.size(3))//2), mode='bilinear', align_corners=False) + out = self.conv1(out) + out = self.bn1(out) + out = self.relu(out) + out = self.conv2(out) + out = self.bn2(out) + if self.stride == 1: + out = F.interpolate(out, size=(int(x.size(2)), int(x.size(3))), mode='bilinear', align_corners=False) + out = out + self.downsample(x) + out = self.relu(out) + return out + +class PanopticHead(nn.Module): + def __init__(self, in_planes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)): + super(PanopticHead, self).__init__() + if in_planes <= 64: + mid_planes = in_planes + elif in_planes <= 256: + if is_aux: + mid_planes = in_planes + else: + mid_planes = in_planes + else: + # in_planes > 256: + if is_aux: + mid_planes = in_planes // 2 + else: + mid_planes = in_planes // 2 + + decoder2_planes = mid_planes // 2 + + self.att_sa = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=in_planes, proj_factor=4, downsample=False) + self.decoder1 = BasicResidual_downup_2x(in_planes, mid_planes, 3, 1, 1) + self.conv_3x3 = ConvBnRelu(mid_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0) + self._in_planes = in_planes + self._out_planes = out_planes + self._Fch = Fch + self._scale = scale + self._branch = branch + + # self.att_sa2 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False) + self.decoder2 = BasicResidual_downup_2x(in_planes, decoder2_planes, 3, 1, 1) + self.center_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.center_conv_1x1 = nn.Conv2d(mid_planes, 1, kernel_size=1, stride=1, padding=0) + + self.offset_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.offset_conv_1x1 = nn.Conv2d(mid_planes, 2, kernel_size=1, stride=1, padding=0) + + @staticmethod + def _latency(h, w, C_in, C_out=19): + layer = PanopticHead(C_in, C_out) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes) + name = "panoptichead%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + return latency, (self._out_planes, size[1], size[2]) + else: + print("not found in latency_lookup_table:", name) + latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes) + latency_lookup_table[name] = latency + np.save("latency_lookup_table.npy", latency_lookup_table) + return latency, (self._out_planes, size[1], size[2]) + + def forward(self, x): + output_dict = OrderedDict() + xs = self.att_sa(x) + + # semantic = self.att_sa1(x) + semantic = self.decoder1(xs) + semantic = self.conv_3x3(semantic) + semantic = self.conv_1x1(semantic) + + # other = self.att_sa2(x) + other = self.decoder2(x) + center = self.center_conv_3x3(other) + center = self.center_conv_1x1(center) + + offset = self.offset_conv_3x3(other) + offset = self.offset_conv_1x1(offset) + + output_dict['semantic'] = semantic + output_dict['center'] = center + output_dict['offset'] = offset + + return output_dict + +class PanopticHeadDecoder(nn.Module): + def __init__(self, in_planes, low_level_inplanes, out_planes=19, Fch=16, scale=4, branch=2, is_aux=False, norm_layer=nn.BatchNorm2d, fmap_size=(128, 256)): + super(PanopticHeadDecoder, self).__init__() + + C_low = 48 + self.feature_projection = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False) + self.feature_projection_sem = ConvNorm(low_level_inplanes, C_low, kernel_size=1, stride=1, padding=0, bias=False, groups=1, slimmable=False) + # in_planes = in_planes + C_low + + if in_planes <= 64: + mid_planes = in_planes + elif in_planes <= 256: + if is_aux: + mid_planes = in_planes + else: + mid_planes = in_planes + else: + # in_planes > 256: + if is_aux: + mid_planes = in_planes // 2 + else: + mid_planes = in_planes // 2 + + decoder2_planes = mid_planes // 2 + + self.att_sa = Self_Attn(dim=in_planes, fmap_size=fmap_size, dim_out=in_planes, proj_factor=4, downsample=False) + + # self.att_sa1 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False) + self.decoder1 = BasicResidual_downup_2x(in_planes+C_low, mid_planes, 3, 1, 1) + self.conv_3x3 = ConvBnRelu(mid_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.conv_1x1 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, stride=1, padding=0) + self._in_planes = in_planes + self._out_planes = out_planes + self._Fch = Fch + self._scale = scale + self._branch = branch + + # self.att_sa2 = Self_Attn(dim=in_planes, fmap_size=(128, 256), dim_out=mid_planes, proj_factor=4, downsample=False) + self.decoder2 = BasicResidual_downup_2x(in_planes+C_low, decoder2_planes, 3, 1, 1) + self.center_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.center_conv_1x1 = nn.Conv2d(mid_planes, 1, kernel_size=1, stride=1, padding=0) + + self.offset_conv_3x3 = ConvBnRelu(decoder2_planes, mid_planes, 3, 1, 1, has_bn=True, norm_layer=norm_layer, has_relu=True, has_bias=False) + self.offset_conv_1x1 = nn.Conv2d(mid_planes, 2, kernel_size=1, stride=1, padding=0) + + @staticmethod + def _latency(h, w, C_in, C_out=19): + layer = PanopticHead(C_in, C_out) + latency = compute_latency(layer, (1, C_in, h, w)) + return latency + + def forward_latency(self, size): + assert size[0] == self._in_planes, "size[0] %d, self._in_planes %d"%(size[0], self._in_planes) + name = "panopticheaddecoder%d_W%d_Cin%d_Cout%d"%(size[1], size[2], size[0], self._out_planes) + if name in latency_lookup_table: + latency = latency_lookup_table[name] + return latency, (self._out_planes, size[1], size[2]) + else: + print("not found in latency_lookup_table:", name) + latency = Head._latency(size[1], size[2], self._scale*self._Fch*self._branch, self._out_planes) + latency_lookup_table[name] = latency + np.save("latency_lookup_table.npy", latency_lookup_table) + return latency, (self._out_planes, size[1], size[2]) + + def forward(self, x, low_level_feat): + output_dict = OrderedDict() + + + xs = self.att_sa(x) + low_level_feat_sem = self.feature_projection_sem(low_level_feat) + xs = F.interpolate(xs, size=low_level_feat_sem.size()[2:], mode='bilinear', align_corners=False) + xs = torch.cat((xs, low_level_feat_sem), dim=1) + + semantic = self.decoder1(xs) + semantic = self.conv_3x3(semantic) + semantic = self.conv_1x1(semantic) + + low_level_feat = self.feature_projection(low_level_feat) + x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=False) + x = torch.cat((x, low_level_feat), dim=1) + + other = self.decoder2(x) + center = self.center_conv_3x3(other) + center = self.center_conv_1x1(center) + + offset = self.offset_conv_3x3(other) + offset = self.offset_conv_1x1(offset) + + output_dict['semantic'] = semantic + output_dict['center'] = center + output_dict['offset'] = offset + + return output_dict diff --git a/CDARTS_segmentation/train/slimmable_ops.py b/CDARTS_segmentation/train/slimmable_ops.py new file mode 100644 index 0000000..b79f66f --- /dev/null +++ b/CDARTS_segmentation/train/slimmable_ops.py @@ -0,0 +1,72 @@ +import torch.nn as nn +from pdb import set_trace as bp +from layers import NaiveSyncBatchNorm + +BatchNorm2d = NaiveSyncBatchNorm + +def make_divisible(v, divisor=8, min_value=1): + """ + forked from slim: + https://github.com/tensorflow/models/blob/\ + 0344c5503ee55e24f0de7f37336a6e08f10976fd/\ + research/slim/nets/mobilenet/mobilenet.py#L62-L69 + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class USConv2d(nn.Conv2d): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, depthwise=False, bias=True, width_mult_list=[1.]): + super(USConv2d, self).__init__( + in_channels, out_channels, + kernel_size, stride=stride, padding=padding, dilation=dilation, + groups=groups, bias=bias) + self.depthwise = depthwise + self.in_channels_max = in_channels + self.out_channels_max = out_channels + self.width_mult_list = width_mult_list + self.ratio = (1., 1.) + + def set_ratio(self, ratio): + self.ratio = ratio + + def forward(self, input): + assert self.ratio[0] in self.width_mult_list, str(self.ratio[0]) + " in? " + str(self.width_mult_list) + self.in_channels = make_divisible(self.in_channels_max * self.ratio[0]) + assert self.ratio[1] in self.width_mult_list, str(self.ratio[1]) + " in? " + str(self.width_mult_list) + self.out_channels = make_divisible(self.out_channels_max * self.ratio[1]) + self.groups = self.in_channels if self.depthwise else 1 + weight = self.weight[:self.out_channels, :self.in_channels, :, :] + if self.bias is not None: + bias = self.bias[:self.out_channels] + else: + bias = self.bias + y = nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) + return y + + +class USBatchNorm2d(BatchNorm2d): + def __init__(self, num_features, width_mult_list=[1.]): + super(USBatchNorm2d, self).__init__( + num_features, affine=True, track_running_stats=False) + self.num_features_max = num_features + self.width_mult_list = width_mult_list + # for tracking performance during training + self.bn = nn.ModuleList( + [ BatchNorm2d(i, affine=True) for i in [ make_divisible(self.num_features_max * width_mult) for width_mult in width_mult_list ] ] + ) + self.ratio = 1. + + def set_ratio(self, ratio): + self.ratio = ratio + + def forward(self, input): + assert self.ratio in self.width_mult_list + idx = self.width_mult_list.index(self.ratio) + y = self.bn[idx](input) + return y diff --git a/CDARTS_segmentation/train/test.py b/CDARTS_segmentation/train/test.py new file mode 100644 index 0000000..b2b21e5 --- /dev/null +++ b/CDARTS_segmentation/train/test.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +import os +import time +import cv2 +cv2.setNumThreads(0) +import torchvision +from PIL import Image +import argparse +import numpy as np + +import torch +import torch.multiprocessing as mp + +from utils.pyt_utils import ensure_dir, link_file, load_model, parse_devices +from utils.visualize import print_iou, show_prediction +from engine.tester import Tester +from engine.logger import get_logger +from seg_opr.metric import hist_info, compute_score +from datasets.cityscapes import Cityscapes + +logger = get_logger() + + +cityscapes_trainID2id = { + 0: 7, + 1: 8, + 2: 11, + 3: 12, + 4: 13, + 5: 17, + 6: 19, + 7: 20, + 8: 21, + 9: 22, + 10: 23, + 11: 24, + 12: 25, + 13: 26, + 14: 27, + 15: 28, + 16: 31, + 17: 32, + 18: 33, + 19: 0 +} + +class SegTester(Tester): + def func_per_iteration(self, data, device, iter=None): + if self.config is not None: config = self.config + img = data['data'] + label = data['label'] + name = data['fn'] + + if len(config.eval_scale_array) == 1: + pred = self.whole_eval(img, None, device) + else: + pred = self.sliding_eval(img, config.eval_crop_size, config.eval_stride_rate, device) + + if self.show_prediction: + colors = self.dataset.get_class_colors() + image = img + comp_img = show_prediction(colors, config.background, image, pred) + cv2.imwrite(os.path.join(os.path.realpath('.'), self.config.save, "test", name+".viz.png"), comp_img[:,:,::-1]) + + for x in range(pred.shape[0]): + for y in range(pred.shape[1]): + pred[x, y] = cityscapes_trainID2id[pred[x, y]] + cv2.imwrite(os.path.join(os.path.realpath('.'), self.config.save, "test", name+".png"), pred) + + def compute_metric(self, results): + hist = np.zeros((self.config.num_classes, self.config.num_classes)) + correct = 0 + labeled = 0 + count = 0 + for d in results: + hist += d['hist'] + correct += d['correct'] + labeled += d['labeled'] + count += 1 + + iu, mean_IU, mean_IU_no_back, mean_pixel_acc = compute_score(hist, correct, labeled) + result_line = print_iou(iu, mean_pixel_acc, self.dataset.get_class_names(), True) + return result_line, mean_IU diff --git a/CDARTS_segmentation/train/test_seg.py b/CDARTS_segmentation/train/test_seg.py new file mode 100644 index 0000000..27b5c9e --- /dev/null +++ b/CDARTS_segmentation/train/test_seg.py @@ -0,0 +1,191 @@ +from __future__ import division +import os +import sys +import time +import glob +import json +import yaml +import logging +import argparse +from tqdm import tqdm + +import torch +import torch.nn as nn +import torch.utils +import torch.nn.functional as F +import torch.optim as optim +from tensorboardX import SummaryWriter + +import numpy as np +from thop import profile + +from config_test import config +if config.is_eval: + config.save = 'eval-{}-{}'.format(config.save, time.strftime("%Y%m%d-%H%M%S")) +else: + config.save = 'train-{}-{}'.format(config.save, time.strftime("%Y%m%d-%H%M%S")) +from dataloader import get_train_loader, CyclicIterator +from datasets import Cityscapes + +import dataloaders +from utils.init_func import init_weight +from eval import SegEvaluator +from test import SegTester + +from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat +from model_seg import Network_Multi_Path_Infer_SPOS as Network +import seg_metrics + +from utils.pyt_utils import load_pretrain + +## dist train +try: + import apex + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + has_apex = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + has_apex = False + +# The first arg parser parses out only the --config argument, this argument is used to +# load a yaml file containing key-values that override the defaults for the main parser below +config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) +parser.add_argument('-c', '--config', default='../configs/auto2/sz512drop0.2.yaml', type=str, metavar='FILE', + help='YAML config file specifying default arguments') + +parser = argparse.ArgumentParser(description='PyTorch Training') +parser.add_argument("--local_rank", default=0, type=int) +parser.add_argument("--world_size", default=1, type=int) +parser.add_argument("--seed", default=12345, type=int) + +def _parse_args(): + # Do we have a config file to parse? + args_config, remaining = config_parser.parse_known_args() + if args_config.config: + with open(args_config.config, 'r') as f: + cfg = yaml.safe_load(f) + parser.set_defaults(**cfg) + + # The main arg parser parses the rest of the args, the usual + # defaults will have been overridden if config file specified. + args = parser.parse_args(remaining) + + # Cache the args as a text string to save them in the output dir later + args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) + return args, args_text + +def main(): + args, args_text = _parse_args() + + # dist init + torch.distributed.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:26442', world_size=1, rank=0) + config.device = 'cuda:%d' % args.local_rank + torch.cuda.set_device(args.local_rank) + args.world_size = torch.distributed.get_world_size() + args.local_rank = torch.distributed.get_rank() + logging.info("rank: {} world_size: {}".format(args.local_rank, args.world_size)) + + if args.local_rank == 0: + create_exp_dir(config.save, scripts_to_save=glob.glob('*.py')+glob.glob('*.sh')) + logger = SummaryWriter(config.save) + log_format = '%(asctime)s %(message)s' + logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') + fh = logging.FileHandler(os.path.join(config.save, 'log.txt')) + fh.setFormatter(logging.Formatter(log_format)) + logging.getLogger().addHandler(fh) + logging.info("args = %s", str(config)) + else: + logger = None + + # preparation ################ + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # data loader ########################### + if config.is_test: + data_setting = {'img_root': config.img_root_folder, + 'gt_root': config.gt_root_folder, + 'train_source': config.train_eval_source, + 'eval_source': config.eval_source, + 'test_source': config.test_source, + 'down_sampling': config.down_sampling} + else: + data_setting = {'img_root': config.img_root_folder, + 'gt_root': config.gt_root_folder, + 'train_source': config.train_source, + 'eval_source': config.eval_source, + 'test_source': config.test_source, + 'down_sampling': config.down_sampling} + + with open(config.json_file, 'r') as f: + model_dict = json.loads(f.read()) + + model = Network( + model_dict["ops"], model_dict["paths"], model_dict["downs"], model_dict["widths"], model_dict["lasts"], + num_classes=config.num_classes, layers=config.layers, Fch=config.Fch, width_mult_list=config.width_mult_list, stem_head_width=config.stem_head_width) + + if args.local_rank == 0: + logging.info("net: " + str(model)) + flops, params = profile(model, inputs=(torch.randn(1, 3, 1024, 2048),), verbose=False) + logging.info("params = %fMB, FLOPs = %fGB", params / 1e6, flops / 1e9) + logging.info("ops:" + str(model.ops)) + logging.info("path:" + str(model.paths)) + logging.info("last:" + str(model.lasts)) + with open(os.path.join(config.save, 'args.yaml'), 'w') as f: + f.write(args_text) + + model = model.cuda() + init_weight(model, nn.init.kaiming_normal_, torch.nn.BatchNorm2d, config.bn_eps, config.bn_momentum, mode='fan_in', nonlinearity='relu') + + model = load_pretrain(model, config.model_path) + + # partial = torch.load(config.model_path) + # state = model.state_dict() + # pretrained_dict = {k: v for k, v in partial.items() if k in state} + # state.update(pretrained_dict) + # model.load_state_dict(state) + + eval_model = model + evaluator = SegEvaluator(Cityscapes(data_setting, 'val', None), config.num_classes, config.image_mean, + config.image_std, eval_model, config.eval_scale_array, config.eval_flip, 0, out_idx=0, config=config, + verbose=False, save_path=None, show_image=False, show_prediction=False) + tester = SegTester(Cityscapes(data_setting, 'test', None), config.num_classes, config.image_mean, + config.image_std, eval_model, config.eval_scale_array, config.eval_flip, 0, out_idx=0, config=config, + verbose=False, save_path=None, show_prediction=False) + + # Cityscapes ########################################### + logging.info(config.model_path) + logging.info(config.save) + with torch.no_grad(): + if config.is_test: + # test + print("[test...]") + with torch.no_grad(): + test(0, model, tester, logger) + else: + # validation + print("[validation...]") + valid_mIoU = infer(model, evaluator, logger) + logger.add_scalar("mIoU/val", valid_mIoU, 0) + logging.info("Model valid_mIoU %.3f"%(valid_mIoU)) + +def infer(model, evaluator, logger): + model.eval() + # _, mIoU = evaluator.run_online() + _, mIoU = evaluator.run_online_multiprocess() + return mIoU + +def test(epoch, model, tester, logger): + output_path = os.path.realpath('.') + os.system("mkdir %s"%os.path.join(output_path, config.save, "test")) + model.eval() + tester.run_online_multiprocess() + os.system("mv %s %s"%(os.path.join(output_path, config.save, "test"), os.path.join(output_path, config.save, "test_%d_%d"%(0, epoch)))) + +if __name__ == '__main__': + main() diff --git a/CDARTS_segmentation/train/train_ade20k_cydas.py b/CDARTS_segmentation/train/train_ade20k_cydas.py new file mode 100644 index 0000000..021a148 --- /dev/null +++ b/CDARTS_segmentation/train/train_ade20k_cydas.py @@ -0,0 +1,569 @@ +from __future__ import division +import os +import sys +import time +import glob +import json +import logging +import argparse +from tqdm import tqdm + +import torch +import torch.nn as nn +import torch.utils +import torch.nn.functional as F +import torch.optim as optim +import torch.distributed as dist +from tensorboardX import SummaryWriter + +import numpy as np +import _init_paths +from ptflops import get_model_complexity_info +from dataloader import get_train_loader, CyclicIterator +from datasets import Cityscapes + +import dataloaders +from utils.init_func import init_weight +from utils.lr_scheduler import Iter_LR_Scheduler +from seg_opr.loss_opr import ProbOhemCrossEntropy2d +from eval import SegEvaluator +from test import SegTester + +from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat +from utils.dist_utils import reduce_tensor, ModelEma +from cydas import CyDASseg as Network +import seg_metrics + +import yaml +import timm +from timm.optim import create_optimizer +from utils.pyt_utils import AverageMeter, to_cuda, get_loss_info_str, compute_hist, compute_hist_np, load_pretrain + +from detectron2.config import get_cfg +from detectron2.engine import launch, default_setup, default_argument_parser +import detectron2.data.transforms as T +from detectron2.structures import BitMasks, ImageList, Instances +from detectron2.data import MetadataCatalog, DatasetMapper, build_detection_train_loader, build_detection_test_loader + +from detectron2.config import configurable +from detectron2.data.build import _test_loader_from_config, trivial_batch_collator +from detectron2.data.samplers import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler +from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset + +from detectron2.projects.panoptic_deeplab import ( + PanopticDeeplabDatasetMapper, + add_panoptic_deeplab_config, +) + +## dist train +try: + import apex + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + has_apex = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + has_apex = False + +def adjust_learning_rate(base_lr, power, optimizer, epoch, total_epoch): + for param_group in optimizer.param_groups: + param_group['lr'] = param_group['lr'] * power + + +# The first arg parser parses out only the --config argument, this argument is used to +# load a yaml file containing key-values that override the defaults for the main parser below +config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) +parser.add_argument('-c', '--config', default='../configs/ade/cydas.yaml', type=str, metavar='FILE', + help='YAML config file specifying default arguments') + +parser = argparse.ArgumentParser(description='PyTorch Training') +parser.add_argument('--det2_cfg', type=str, default='configs/ADE20K/base.yaml', help='') +parser.add_argument('--save', type=str, default='../OUTPUT/train_', help='') +parser.add_argument('--exp_name', type=str, default='ade20k', help='') +parser.add_argument('--pretrain', type=str, default=None, help='resume path') +parser.add_argument('--resume', type=str, default='../OUTPUT/train/', help='resume path') +parser.add_argument("--local_rank", default=0, type=int) +parser.add_argument("--num_classes", default=150, type=int) +parser.add_argument("--max_iteration", default=160000, type=int) +parser.add_argument("--world_size", default=1, type=int) +parser.add_argument("--eval_height", default=1025, type=int, help='train height') +parser.add_argument("--eval_width", default=2049, type=int, help='train width') +parser.add_argument("--test_epoch", default=250, type=int, help='Epochs for test') +parser.add_argument("--batch_size", default=12, type=int, help='batch size') +parser.add_argument("--Fch", default=12, type=int, help='Fch') +parser.add_argument('--stem_head_width', type=float, default=1.0, help='base learning rate') + +## new retrain ### +parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER', + help='LR scheduler (default: "step"') +parser.add_argument('--epochs', type=int, default=4000, help='num of training epochs') +parser.add_argument('--dataset', type=str, default='cityscapes', help='pascal or cityscapes') +parser.add_argument('--base_lr', type=float, default=0.05, help='base learning rate') +parser.add_argument('--warmup_start_lr', type=float, default=5e-6, help='warm up learning rate') +parser.add_argument('--lr-step', type=float, default=None) +parser.add_argument('--warmup-iters', type=int, default=1000) +parser.add_argument('--min-lr', type=float, default=None) +parser.add_argument('--layers', type=int, default=20, help='layers') +parser.add_argument('--size_divisibility', type=int, default=32, help='size_divisibility') +parser.add_argument('--crop_size', type=int, default=769, help='image crop size') +parser.add_argument('--resize', type=int, default=769, help='image crop size') +parser.add_argument("--image_height", default=513, type=int, help='train height') +parser.add_argument("--image_width", default=1025, type=int, help='train width') +parser.add_argument('--workers', type=int, default=4, help='number of data loading workers') +parser.add_argument('--dist', type=bool, default=True) +parser.add_argument('--autodeeplab', type=str, default='train_seg') +parser.add_argument('--max-iteration', default=1000000, type=bool) +parser.add_argument('--mode', default='poly', type=str, help='how lr decline') +parser.add_argument('--train_mode', type=str, default='iter', choices=['iter', 'epoch']) + +parser.add_argument("--data_path", default='/home/hongyuan/data/cityscapes', type=str, help='If specified, replace config.load_path') +parser.add_argument("--load_path", default='', type=str, help='If specified, replace config.load_path') +parser.add_argument("--json_file", default='jsons/0.json', type=str, help='model_arch') +parser.add_argument("--seed", default=12345, type=int, help="random seed") +parser.add_argument('--sync_bn', action='store_false', + help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') +parser.add_argument('--random_sample', action='store_true', + help='Random sample path.') +parser.add_argument('--drop_path_prob', type=float, default=0.0, help='drop path prob') + +# Optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') + +# Model Exponential Moving Average +parser.add_argument('--model-ema', action='store_true', default=False, + help='Enable tracking moving average of model weights') +parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, + help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') +parser.add_argument('--model-ema-decay', type=float, default=0.9998, + help='decay factor for model weights moving average (default: 0.9998)') + +# train val +parser.add_argument('--bn_eps', type=float, default=1e-5, help='bn eps') +parser.add_argument('--bn_momentum', type=float, default=0.01, help='bn momentum') +parser.add_argument('--ignore', type=int, default=255, help='semantic ignore') +parser.add_argument('--eval_flip', action='store_true', default=False, + help='semantic eval flip') + + +def _parse_args(): + # Do we have a config file to parse? + args_config, remaining = config_parser.parse_known_args() + if args_config.config: + with open(args_config.config, 'r') as f: + cfg = yaml.safe_load(f) + parser.set_defaults(**cfg) + + # The main arg parser parses the rest of the args, the usual + # defaults will have been overridden if config file specified. + args = parser.parse_args(remaining) + + # Cache the args as a text string to save them in the output dir later + args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) + return args, args_text + +def build_sem_seg_train_aug(cfg): + augs = [ + T.ResizeShortestEdge( + cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + ) + ] + if cfg.INPUT.CROP.ENABLED: + augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) + augs.append(T.RandomFlip()) + return augs + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_panoptic_deeplab_config(cfg) + cfg.merge_from_file(args.config_file) + # cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + +@configurable(from_config=_test_loader_from_config) +def build_batch_test_loader(dataset, *, mapper, sampler=None, num_workers=0): + """ + Similar to `build_detection_train_loader`, but uses a batch size of 1, + and :class:`InferenceSampler`. This sampler coordinates all workers to + produce the exact set of all samples. + This interface is experimental. + Args: + dataset (list or torch.utils.data.Dataset): a list of dataset dicts, + or a map-style pytorch dataset. They can be obtained by using + :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`. + mapper (callable): a callable which takes a sample (dict) from dataset + and returns the format to be consumed by the model. + When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``. + sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces + indices to be applied on ``dataset``. Default to :class:`InferenceSampler`, + which splits the dataset across all workers. + num_workers (int): number of parallel data loading workers + Returns: + DataLoader: a torch DataLoader, that loads the given detection + dataset, with test-time transformation and batching. + Examples: + :: + data_loader = build_detection_test_loader( + DatasetRegistry.get("my_test"), + mapper=DatasetMapper(...)) + # or, instantiate with a CfgNode: + data_loader = build_detection_test_loader(cfg, "my_test") + """ + if isinstance(dataset, list): + dataset = DatasetFromList(dataset, copy=False) + if mapper is not None: + dataset = MapDataset(dataset, mapper) + if sampler is None: + sampler = InferenceSampler(len(dataset)) + # Always use 1 image per worker during inference since this is the + # standard when reporting inference time in papers. + batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 4, drop_last=False) + data_loader = torch.utils.data.DataLoader( + dataset, + num_workers=num_workers, + batch_sampler=batch_sampler, + collate_fn=trivial_batch_collator, + ) + return data_loader + + +def main(): + args, args_text = _parse_args() + + # dist init + torch.distributed.init_process_group(backend='nccl', init_method='env://') + torch.cuda.set_device(args.local_rank) + args.world_size = torch.distributed.get_world_size() + args.local_rank = torch.distributed.get_rank() + args.save = args.save + args.exp_name + + # detectron2 data loader ########################### + # det2_args = default_argument_parser().parse_args() + det2_args = args + det2_args.config_file = args.det2_cfg + cfg = setup(det2_args) + mapper = DatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg)) + det2_dataset = iter(build_detection_train_loader(cfg, mapper=mapper)) + det2_val = build_batch_test_loader(cfg, cfg.DATASETS.TEST[0]) + len_det2_train = 20210 // cfg.SOLVER.IMS_PER_BATCH + + if args.local_rank == 0: + create_exp_dir(args.save, scripts_to_save=glob.glob('*.py')+glob.glob('*.sh')) + logger = SummaryWriter(args.save) + log_format = '%(asctime)s %(message)s' + logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') + fh = logging.FileHandler(os.path.join(args.save, 'log.txt')) + fh.setFormatter(logging.Formatter(log_format)) + logging.getLogger().addHandler(fh) + logging.info("args = %s", str(args)) + else: + logger = None + + # preparation ################ + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # config network and criterion ################ + gt_down_sampling = 1 + min_kept = int(args.batch_size * args.image_height * args.image_width // (16 * gt_down_sampling ** 2)) + ohem_criterion = ProbOhemCrossEntropy2d(ignore_label=255, thresh=0.7, min_kept=min_kept, use_weight=False) + + # data loader ########################### + + num_classes = args.num_classes + + with open(args.json_file, 'r') as f: + # dict_a = json.loads(f, cls=NpEncoder) + model_dict = json.loads(f.read()) + + width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,] + model = Network(Fch=args.Fch, num_classes=num_classes, stem_head_width=(args.stem_head_width, args.stem_head_width)) + last = model_dict["lasts"] + + if args.local_rank == 0: + with torch.cuda.device(0): + macs, params = get_model_complexity_info(model, (3, args.eval_height, args.eval_width), as_strings=True, + print_per_layer_stat=True, verbose=True) + logging.info('{:<30} {:<8}'.format('Computational complexity: ', macs)) + logging.info('{:<30} {:<8}'.format('Number of parameters: ', params)) + + with open(os.path.join(args.save, 'args.yaml'), 'w') as f: + f.write(args_text) + + init_weight(model, nn.init.kaiming_normal_, torch.nn.BatchNorm2d, args.bn_eps, args.bn_momentum, mode='fan_in', nonlinearity='relu') + + if args.pretrain: + model.backbone = load_pretrain(model.backbone, args.pretrain) + model = model.cuda() + + # if args.sync_bn: + # if has_apex: + # model = apex.parallel.convert_syncbn_model(model) + # else: + # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + # Optimizer ################################### + base_lr = args.base_lr + + if args.opt == "sgd": + optimizer = torch.optim.SGD(model.parameters(), lr=base_lr, momentum=args.momentum, weight_decay=args.weight_decay) + elif args.opt == "adam": + optimizer = torch.optim.Adam(model.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-08) + elif args.opt == "adamw": + optimizer = torch.optim.AdamW(model.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay) + else: + optimizer = create_optimizer(args, model) + + if args.sched == "raw": + lr_scheduler =None + else: + max_iteration = args.epochs * len_det2_train + lr_scheduler = Iter_LR_Scheduler(args, max_iteration, len_det2_train) + + start_epoch = 0 + if os.path.exists(os.path.join(args.save, 'last.pth.tar')): + args.resume = os.path.join(args.save, 'last.pth.tar') + + if args.resume: + model_state_file = args.resume + if os.path.isfile(model_state_file): + checkpoint = torch.load(model_state_file, map_location=torch.device('cpu')) + start_epoch = checkpoint['start_epoch'] + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + logging.info('Loaded checkpoint (starting from iter {})'.format(checkpoint['start_epoch'])) + + model_ema = None + if args.model_ema: + # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper + model_ema = ModelEma( + model, + decay=args.model_ema_decay, + device='cpu' if args.model_ema_force_cpu else '', + resume=None) + + if model_ema: + eval_model = model_ema.ema + else: + eval_model = model + + if has_apex: + model = DDP(model, delay_allreduce=True) + else: + model = DDP(model, device_ids=[args.local_rank]) + + best_valid_iou = 0. + best_epoch = 0 + temp_iou = 0. + avg_loss = -1 + + logging.info("rank: {} world_size: {}".format(args.local_rank, args.world_size)) + for epoch in range(start_epoch, args.epochs): + if args.local_rank == 0: + logging.info(args.load_path) + logging.info(args.save) + logging.info("lr: " + str(optimizer.param_groups[0]['lr'])) + + # training + drop_prob = args.drop_path_prob * epoch / args.epochs + # model.module.drop_path_prob(drop_prob) + + train_mIoU = train(len_det2_train, det2_dataset, model, model_ema, ohem_criterion, num_classes, lr_scheduler, optimizer, logger, epoch, args, cfg) + + # torch.cuda.empty_cache() + + # if epoch > args.epochs // 3: + if epoch >= 0: + temp_iou, avg_loss = validation(det2_val, eval_model, ohem_criterion, num_classes, args, cfg) + + torch.cuda.empty_cache() + if args.local_rank == 0: + logging.info("Epoch: {} train miou: {:.2f}".format(epoch+1, 100*train_mIoU)) + if temp_iou > best_valid_iou: + best_valid_iou = temp_iou + best_epoch = epoch + + if model_ema is not None: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model_ema.ema.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'best_checkpoint.pth.tar')) + else: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model.module.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'best_checkpoint.pth.tar')) + + logger.add_scalar("mIoU/val", temp_iou, epoch) + logging.info("[Epoch %d/%d] valid mIoU %.4f eval loss %.4f"%(epoch + 1, args.epochs, temp_iou, avg_loss)) + logging.info("Best valid mIoU %.4f Epoch %d"%(best_valid_iou, best_epoch)) + + if model_ema is not None: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model_ema.ema.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'last.pth.tar')) + else: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model.module.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'last.pth.tar')) + + +def train(len_det2_train, det2_dataset, model, model_ema, criterion, num_classes, lr_scheduler, optimizer, logger, epoch, args, cfg): + + model.train() + pixel_mean = cfg.MODEL.PIXEL_MEAN + pixel_std = cfg.MODEL.PIXEL_STD + pixel_mean = torch.Tensor(pixel_mean).view(-1, 1, 1).cuda() + pixel_std = torch.Tensor(pixel_std).view(-1, 1, 1).cuda() + + metric = seg_metrics.Seg_Metrics(n_classes=num_classes) + lamb = 0.2 + # for i, sample in enumerate(train_loader): + for i in range(len_det2_train): + cur_iter = epoch * len_det2_train + i + lr_scheduler(optimizer, cur_iter) + + det2_data = next(det2_dataset) + det2_inputs = [x["image"].cuda(non_blocking=True) for x in det2_data] + det2_inputs = [(x - pixel_mean) / pixel_std for x in det2_inputs] + det2_inputs = ImageList.from_tensors(det2_inputs, args.size_divisibility).tensor + + b, c, h, w = det2_inputs.shape + if h % 32 != 0 or w % 32 != 0: + logging.info("pass bad data!") + continue + + det2_targets = [x["sem_seg"].cuda(non_blocking=True) for x in det2_data] + det2_targets = ImageList.from_tensors(det2_targets, args.size_divisibility, args.ignore).tensor + + N = det2_inputs.size(0) + + loss = 0 + description = "" + + logits8, logits16, logits32 = model(det2_inputs) + loss = loss + criterion(logits8, det2_targets) + if logits16 is not None: + loss = loss + lamb * criterion(logits16, det2_targets) + if logits32 is not None: + loss = loss + lamb * criterion(logits32, det2_targets) + + inter, union = seg_metrics.batch_intersection_union(logits8.data, det2_targets, num_classes) + inter = reduce_tensor(torch.FloatTensor(inter).cuda(), args.world_size) + union = reduce_tensor(torch.FloatTensor(union).cuda(), args.world_size) + metric.update(inter.cpu().numpy(), union.cpu().numpy(), N) + + if args.local_rank == 0: + description += "[mIoU%d: %.3f]"%(0, metric.get_scores()) + + torch.cuda.synchronize() + + reduced_loss = loss + reduced_loss = reduce_tensor(reduced_loss.data, args.world_size) + if args.local_rank == 0 and i % 20 == 0: + logger.add_scalar('loss/train', reduced_loss, epoch*len_det2_train+i) + logging.info('epoch: {0}\t''iter: {1}/{2}\t''lr: {3:.6f}\t''loss: {4:.4f}'.format( + epoch + 1, i + 1, len_det2_train, lr_scheduler.get_lr(optimizer), reduced_loss)) + + loss.backward() + optimizer.step() + optimizer.zero_grad() + torch.cuda.synchronize() + + if model_ema is not None: + model_ema.update(model) + + return metric.get_scores() + + +def validation(val_loader, model, criterion, n_classes, args, cfg): + device = torch.device('cuda:{}'.format(args.local_rank)) + + pixel_mean = cfg.MODEL.PIXEL_MEAN + pixel_std = cfg.MODEL.PIXEL_STD + pixel_mean = torch.Tensor(pixel_mean).view(-1, 1, 1).cuda() + pixel_std = torch.Tensor(pixel_std).view(-1, 1, 1).cuda() + + model.eval() + test_loss = 0.0 + hist_size = (n_classes, n_classes) + hist = torch.zeros(hist_size, dtype=torch.float32).cuda() + + for i, sample in enumerate(val_loader): + image = [x["image"].cuda(non_blocking=True) for x in sample] + image = [(x - pixel_mean) / pixel_std for x in image] + image = ImageList.from_tensors(image, args.size_divisibility).tensor + + target = [x["sem_seg"].cuda(non_blocking=True) for x in sample] + target = ImageList.from_tensors(target, args.size_divisibility, args.ignore).tensor + + N, H, W = target.shape + probs = torch.zeros((N, n_classes, H, W)).cuda() + probs.requires_grad = False + + torch.cuda.synchronize() + if args.local_rank==0: + logging.info("Evaluation [{}/{}]".format(i+1, len(val_loader))) + with torch.no_grad(): + output = model(image) + prob = F.softmax(output, 1) + probs += prob + loss = criterion(output, target).detach().data + dist.all_reduce(loss, dist.ReduceOp.SUM) + test_loss += loss + + if args.eval_flip: + output = model(torch.flip(image, dims=(3,))) + output = torch.flip(output, dims=(3,)) + prob = F.softmax(output, 1) + probs += prob + loss = criterion(output, target).detach().data + dist.all_reduce(loss, dist.ReduceOp.SUM) + test_loss += loss + + + preds = torch.argmax(probs, dim=1) + hist_once = compute_hist(preds, target, n_classes, args.ignore) + hist = hist + hist_once + torch.cuda.synchronize() + + + if args.eval_flip: + avg_loss = test_loss / 2*len(val_loader) + else: + avg_loss = test_loss / len(val_loader) + + dist.all_reduce(hist, dist.ReduceOp.SUM) + hist = hist.cpu().numpy().astype(np.float32) + IOUs = np.diag(hist) / (np.sum(hist, axis=0) + np.sum(hist, axis=1) - np.diag(hist)) + mIOU = np.mean(IOUs) + + return mIOU*100, avg_loss + +if __name__ == '__main__': + main() diff --git a/CDARTS_segmentation/train/train_cydas.py b/CDARTS_segmentation/train/train_cydas.py new file mode 100644 index 0000000..b7b1c86 --- /dev/null +++ b/CDARTS_segmentation/train/train_cydas.py @@ -0,0 +1,516 @@ +from __future__ import division +import os +import sys +import time +import glob +import json +import logging +import argparse +from tqdm import tqdm + +import torch +import torch.nn as nn +import torch.utils +import torch.nn.functional as F +import torch.optim as optim +import torch.distributed as dist +from tensorboardX import SummaryWriter + +import numpy as np +import _init_paths +from ptflops import get_model_complexity_info +from dataloader import get_train_loader, CyclicIterator +from datasets import Cityscapes + +import dataloaders +from utils.init_func import init_weight +from utils.lr_scheduler import Iter_LR_Scheduler +from seg_opr.loss_opr import ProbOhemCrossEntropy2d +from eval import SegEvaluator +from test import SegTester + +from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat +from utils.dist_utils import reduce_tensor, ModelEma +from cydas import CyDASseg as Network +import seg_metrics + +import yaml +import timm +from timm.optim import create_optimizer +from utils.pyt_utils import AverageMeter, to_cuda, get_loss_info_str, compute_hist, compute_hist_np, load_pretrain + +from detectron2.config import get_cfg +from detectron2.engine import launch, default_setup, default_argument_parser +import detectron2.data.transforms as T +from detectron2.structures import BitMasks, ImageList, Instances +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.projects.panoptic_deeplab import ( + PanopticDeeplabDatasetMapper, + add_panoptic_deeplab_config, +) + +## dist train +try: + import apex + from apex import amp + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel import convert_syncbn_model + has_apex = True +except ImportError: + from torch.nn.parallel import DistributedDataParallel as DDP + has_apex = False + +def adjust_learning_rate(base_lr, power, optimizer, epoch, total_epoch): + for param_group in optimizer.param_groups: + param_group['lr'] = param_group['lr'] * power + + +# The first arg parser parses out only the --config argument, this argument is used to +# load a yaml file containing key-values that override the defaults for the main parser below +config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False) +parser.add_argument('-c', '--config', default='../configs/auto2/cydas.yaml', type=str, metavar='FILE', + help='YAML config file specifying default arguments') + +parser = argparse.ArgumentParser(description='PyTorch Training') +parser.add_argument('--det2_cfg', type=str, default='configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml', help='') +parser.add_argument('--save', type=str, default='../OUTPUT/train_', help='') +parser.add_argument('--exp_name', type=str, default='cydas', help='') +parser.add_argument('--pretrain', type=str, default=None, help='resume path') +parser.add_argument('--size_divisibility', type=int, default=32, help='size_divisibility') +parser.add_argument('--resume', type=str, default='../OUTPUT/train/', help='resume path') +parser.add_argument("--local_rank", default=0, type=int) +parser.add_argument("--world_size", default=1, type=int) +parser.add_argument("--eval_height", default=1025, type=int, help='train height') +parser.add_argument("--eval_width", default=2049, type=int, help='train width') +parser.add_argument("--test_epoch", default=250, type=int, help='Epochs for test') +parser.add_argument("--batch_size", default=12, type=int, help='batch size') +parser.add_argument("--Fch", default=12, type=int, help='Fch') +parser.add_argument('--stem_head_width', type=float, default=1.0, help='base learning rate') + +## new retrain ### +parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER', + help='LR scheduler (default: "step"') +parser.add_argument('--epochs', type=int, default=4000, help='num of training epochs') +parser.add_argument('--dataset', type=str, default='cityscapes', help='pascal or cityscapes') +parser.add_argument('--base_lr', type=float, default=0.05, help='base learning rate') +parser.add_argument('--warmup_start_lr', type=float, default=5e-6, help='warm up learning rate') +parser.add_argument('--lr-step', type=float, default=None) +parser.add_argument('--warmup-iters', type=int, default=1000) +parser.add_argument('--min-lr', type=float, default=None) +parser.add_argument('--layers', type=int, default=20, help='layers') +parser.add_argument('--crop_size', type=int, default=769, help='image crop size') +parser.add_argument('--resize', type=int, default=769, help='image crop size') +parser.add_argument("--image_height", default=513, type=int, help='train height') +parser.add_argument("--image_width", default=1025, type=int, help='train width') +parser.add_argument('--workers', type=int, default=4, help='number of data loading workers') +parser.add_argument('--dist', type=bool, default=True) +parser.add_argument('--autodeeplab', type=str, default='train_seg') +parser.add_argument('--max-iteration', default=1000000, type=bool) +parser.add_argument('--mode', default='poly', type=str, help='how lr decline') +parser.add_argument('--train_mode', type=str, default='iter', choices=['iter', 'epoch']) + +parser.add_argument("--data_path", default='/home/hongyuan/data/cityscapes', type=str, help='If specified, replace config.load_path') +parser.add_argument("--load_path", default='', type=str, help='If specified, replace config.load_path') +parser.add_argument("--json_file", default='jsons/0.json', type=str, help='model_arch') +parser.add_argument("--seed", default=12345, type=int, help="random seed") +parser.add_argument('--sync_bn', action='store_false', + help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') +parser.add_argument('--random_sample', action='store_true', + help='Random sample path.') +parser.add_argument('--drop_path_prob', type=float, default=0.0, help='drop path prob') + +# Optimizer parameters +parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', + help='Optimizer (default: "sgd"') +parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', + help='Optimizer Epsilon (default: 1e-8)') +parser.add_argument('--momentum', type=float, default=0.9, metavar='M', + help='SGD momentum (default: 0.9)') +parser.add_argument('--weight-decay', type=float, default=0.0001, + help='weight decay (default: 0.0001)') + +# Model Exponential Moving Average +parser.add_argument('--model-ema', action='store_true', default=False, + help='Enable tracking moving average of model weights') +parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, + help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') +parser.add_argument('--model-ema-decay', type=float, default=0.9998, + help='decay factor for model weights moving average (default: 0.9998)') + +# train val +parser.add_argument('--bn_eps', type=float, default=1e-5, help='bn eps') +parser.add_argument('--bn_momentum', type=float, default=0.01, help='bn momentum') +parser.add_argument('--ignore', type=int, default=255, help='semantic ignore') +parser.add_argument('--eval_flip', action='store_true', default=False, + help='semantic eval flip') + + +def _parse_args(): + # Do we have a config file to parse? + args_config, remaining = config_parser.parse_known_args() + if args_config.config: + with open(args_config.config, 'r') as f: + cfg = yaml.safe_load(f) + parser.set_defaults(**cfg) + + # The main arg parser parses the rest of the args, the usual + # defaults will have been overridden if config file specified. + args = parser.parse_args(remaining) + + # Cache the args as a text string to save them in the output dir later + args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) + return args, args_text + +def build_sem_seg_train_aug(cfg): + augs = [ + T.ResizeShortestEdge( + cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + ) + ] + if cfg.INPUT.CROP.ENABLED: + augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) + augs.append(T.RandomFlip()) + return augs + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_panoptic_deeplab_config(cfg) + cfg.merge_from_file(args.config_file) + # cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + +def main(): + args, args_text = _parse_args() + + # detectron2 data loader ########################### + # det2_args = default_argument_parser().parse_args() + det2_args = args + det2_args.config_file = args.det2_cfg + cfg = setup(det2_args) + mapper = PanopticDeeplabDatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg)) + det2_dataset = iter(build_detection_train_loader(cfg, mapper=mapper)) + + # dist init + torch.distributed.init_process_group(backend='nccl', init_method='env://') + torch.cuda.set_device(args.local_rank) + args.world_size = torch.distributed.get_world_size() + args.local_rank = torch.distributed.get_rank() + + args.save = args.save + args.exp_name + + if args.local_rank == 0: + create_exp_dir(args.save, scripts_to_save=glob.glob('*.py')+glob.glob('*.sh')) + logger = SummaryWriter(args.save) + log_format = '%(asctime)s %(message)s' + logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') + fh = logging.FileHandler(os.path.join(args.save, 'log.txt')) + fh.setFormatter(logging.Formatter(log_format)) + logging.getLogger().addHandler(fh) + logging.info("args = %s", str(args)) + else: + logger = None + + # preparation ################ + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + # config network and criterion ################ + gt_down_sampling = 1 + min_kept = int(args.batch_size * args.image_height * args.image_width // (16 * gt_down_sampling ** 2)) + ohem_criterion = ProbOhemCrossEntropy2d(ignore_label=255, thresh=0.7, min_kept=min_kept, use_weight=False) + + # data loader ########################### + + kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True} + train_loader, train_sampler, val_loader, val_sampler, num_classes = dataloaders.make_data_loader(args, **kwargs) + + with open(args.json_file, 'r') as f: + # dict_a = json.loads(f, cls=NpEncoder) + model_dict = json.loads(f.read()) + + width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,] + model = Network(Fch=args.Fch, num_classes=num_classes, stem_head_width=(args.stem_head_width, args.stem_head_width)) + + last = model_dict["lasts"] + + if args.local_rank == 0: + logging.info("net: " + str(model)) + with torch.cuda.device(0): + macs, params = get_model_complexity_info(model, (3, 1024, 2048), as_strings=True, + print_per_layer_stat=True, verbose=True) + logging.info('{:<30} {:<8}'.format('Computational complexity: ', macs)) + logging.info('{:<30} {:<8}'.format('Number of parameters: ', params)) + + with open(os.path.join(args.save, 'args.yaml'), 'w') as f: + f.write(args_text) + + init_weight(model, nn.init.kaiming_normal_, torch.nn.BatchNorm2d, args.bn_eps, args.bn_momentum, mode='fan_in', nonlinearity='relu') + + if args.pretrain: + model.backbone = load_pretrain(model.backbone, args.pretrain) + model = model.cuda() + + # if args.sync_bn: + # if has_apex: + # model = apex.parallel.convert_syncbn_model(model) + # else: + # model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + # Optimizer ################################### + base_lr = args.base_lr + + if args.opt == "sgd": + optimizer = torch.optim.SGD(model.parameters(), lr=base_lr, momentum=args.momentum, weight_decay=args.weight_decay) + elif args.opt == "adam": + optimizer = torch.optim.Adam(model.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-08) + elif args.opt == "adamw": + optimizer = torch.optim.AdamW(model.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay) + else: + optimizer = create_optimizer(args, model) + + if args.sched == "raw": + lr_scheduler =None + else: + max_iteration = len(train_loader) * args.epochs + lr_scheduler = Iter_LR_Scheduler(args, max_iteration, len(train_loader)) + + start_epoch = 0 + if os.path.exists(os.path.join(args.save, 'last.pth.tar')): + args.resume = os.path.join(args.save, 'last.pth.tar') + + if args.resume: + model_state_file = args.resume + if os.path.isfile(model_state_file): + checkpoint = torch.load(model_state_file, map_location=torch.device('cpu')) + start_epoch = checkpoint['start_epoch'] + model.load_state_dict(checkpoint['state_dict']) + optimizer.load_state_dict(checkpoint['optimizer']) + logging.info('Loaded checkpoint (starting from iter {})'.format(checkpoint['start_epoch'])) + + model_ema = None + if args.model_ema: + # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper + model_ema = ModelEma( + model, + decay=args.model_ema_decay, + device='cpu' if args.model_ema_force_cpu else '', + resume=None) + + if model_ema: + eval_model = model_ema.ema + else: + eval_model = model + + if has_apex: + model = DDP(model, delay_allreduce=True) + else: + model = DDP(model, device_ids=[args.local_rank]) + + best_valid_iou = 0. + best_epoch = 0 + + logging.info("rank: {} world_size: {}".format(args.local_rank, args.world_size)) + for epoch in range(start_epoch, args.epochs): + train_sampler.set_epoch(epoch) + val_sampler.set_epoch(epoch) + if args.local_rank == 0: + logging.info(args.load_path) + logging.info(args.save) + logging.info("lr: " + str(optimizer.param_groups[0]['lr'])) + + # training + drop_prob = args.drop_path_prob * epoch / args.epochs + # model.module.drop_path_prob(drop_prob) + + train_mIoU = train(train_loader, det2_dataset, model, model_ema, ohem_criterion, num_classes, lr_scheduler, optimizer, logger, epoch, args, cfg) + + torch.cuda.empty_cache() + + if epoch > args.epochs // 3: + # if epoch >= 10: + temp_iou, avg_loss = validation(val_loader, eval_model, ohem_criterion, num_classes, args, cal_miou=True) + else: + temp_iou = 0. + avg_loss = -1 + + torch.cuda.empty_cache() + if args.local_rank == 0: + logging.info("Epoch: {} train miou: {:.2f}".format(epoch+1, 100*train_mIoU)) + if temp_iou > best_valid_iou: + best_valid_iou = temp_iou + best_epoch = epoch + + if model_ema is not None: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model_ema.ema.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'best_checkpoint.pth.tar')) + else: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model.module.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'best_checkpoint.pth.tar')) + + logger.add_scalar("mIoU/val", temp_iou, epoch) + logging.info("[Epoch %d/%d] valid mIoU %.4f eval loss %.4f"%(epoch + 1, args.epochs, temp_iou, avg_loss)) + logging.info("Best valid mIoU %.4f Epoch %d"%(best_valid_iou, best_epoch)) + + if model_ema is not None: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model_ema.ema.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'last.pth.tar')) + else: + torch.save({ + 'start_epoch': epoch + 1, + 'state_dict': model.module.state_dict(), + 'optimizer': optimizer.state_dict(), + # 'lr_scheduler': lr_scheduler.state_dict(), + }, os.path.join(args.save, 'last.pth.tar')) + + +def train(train_loader, det2_dataset, model, model_ema, criterion, num_classes, lr_scheduler, optimizer, logger, epoch, args, cfg): + + model.train() + pixel_mean = cfg.MODEL.PIXEL_MEAN + pixel_std = cfg.MODEL.PIXEL_STD + # pixel_mean = [123.675, 116.28, 103.53] + # pixel_std = [58.395, 57.12, 57.375] + pixel_mean = torch.Tensor(pixel_mean).view(-1, 1, 1).cuda() + pixel_std = torch.Tensor(pixel_std).view(-1, 1, 1).cuda() + + metric = seg_metrics.Seg_Metrics(n_classes=num_classes) + lamb = 0.2 + # for i, sample in enumerate(train_loader): + for i in range(len(train_loader)): + cur_iter = epoch * len(train_loader) + i + lr_scheduler(optimizer, cur_iter) + # inputs = sample['image'].cuda(non_blocking=True) + # target = sample['semantic'].cuda(non_blocking=True) + + det2_data = next(det2_dataset) + det2_inputs = [x["image"].cuda(non_blocking=True) for x in det2_data] + det2_inputs = [(x - pixel_mean) / pixel_std for x in det2_inputs] + det2_inputs = ImageList.from_tensors(det2_inputs, args.size_divisibility).tensor + + det2_targets = [x["sem_seg"].cuda(non_blocking=True) for x in det2_data] + det2_targets = ImageList.from_tensors(det2_targets, args.size_divisibility, args.ignore).tensor + + N = det2_inputs.size(0) + + loss = 0 + description = "" + + logits8, logits16, logits32 = model(det2_inputs) + loss = loss + criterion(logits8, det2_targets) + if logits16 is not None: + loss = loss + lamb * criterion(logits16, det2_targets) + if logits32 is not None: + loss = loss + lamb * criterion(logits32, det2_targets) + + inter, union = seg_metrics.batch_intersection_union(logits8.data, det2_targets, num_classes) + inter = reduce_tensor(torch.FloatTensor(inter).cuda(), args.world_size) + union = reduce_tensor(torch.FloatTensor(union).cuda(), args.world_size) + metric.update(inter.cpu().numpy(), union.cpu().numpy(), N) + + if args.local_rank == 0: + description += "[mIoU%d: %.3f]"%(0, metric.get_scores()) + + torch.cuda.synchronize() + + reduced_loss = loss + reduced_loss = reduce_tensor(reduced_loss.data, args.world_size) + if args.local_rank == 0 and i % 20 == 0: + logger.add_scalar('loss/train', reduced_loss, epoch*len(train_loader)+i) + logging.info('epoch: {0}\t''iter: {1}/{2}\t''lr: {3:.6f}\t''loss: {4:.4f}'.format( + epoch + 1, i + 1, len(train_loader), lr_scheduler.get_lr(optimizer), reduced_loss)) + + loss.backward() + optimizer.step() + optimizer.zero_grad() + torch.cuda.synchronize() + + if model_ema is not None: + model_ema.update(model) + + return metric.get_scores() + + +def validation(val_loader, model, criterion, n_classes, args, cal_miou=True): + device = torch.device('cuda:{}'.format(args.local_rank)) + model.eval() + test_loss = 0.0 + + hist_size = (n_classes, n_classes) + hist = torch.zeros(hist_size, dtype=torch.float32).cuda() + + for i, sample in enumerate(val_loader): + sample = to_cuda(sample, device) + image = sample['image'] + target = sample['semantic'] + + N, H, W = target.shape + probs = torch.zeros((N, n_classes, H, W)).cuda() + probs.requires_grad = False + + torch.cuda.synchronize() + if args.local_rank==0: + logging.info("Evaluation [{}/{}]".format(i+1, len(val_loader))) + with torch.no_grad(): + output = model(image) + prob = F.softmax(output, 1) + probs += prob + loss = criterion(output, target).detach().data + dist.all_reduce(loss, dist.ReduceOp.SUM) + test_loss += loss + + if args.eval_flip: + output = model(torch.flip(image, dims=(3,))) + output = torch.flip(output, dims=(3,)) + prob = F.softmax(output, 1) + probs += prob + loss = criterion(output, target).detach().data + dist.all_reduce(loss, dist.ReduceOp.SUM) + test_loss += loss + + if cal_miou: + # probs = probs.data.numpy() + preds = torch.argmax(probs, dim=1) + hist_once = compute_hist(preds, target, n_classes, args.ignore) + hist = hist + hist_once + + torch.cuda.synchronize() + + + if args.eval_flip: + avg_loss = test_loss / 2*len(val_loader) + else: + avg_loss = test_loss / len(val_loader) + + if cal_miou: + # hist = torch.tensor(hist).cuda() + dist.all_reduce(hist, dist.ReduceOp.SUM) + hist = hist.cpu().numpy().astype(np.float32) + IOUs = np.diag(hist) / (np.sum(hist, axis=0) + np.sum(hist, axis=1) - np.diag(hist)) + mIOU = np.mean(IOUs) + else: + mIOU = -avg_loss + + return mIOU*100, avg_loss + +if __name__ == '__main__': + main() diff --git a/CDARTS_segmentation/train/vis_arch.py b/CDARTS_segmentation/train/vis_arch.py new file mode 100644 index 0000000..b4f0ecf --- /dev/null +++ b/CDARTS_segmentation/train/vis_arch.py @@ -0,0 +1,44 @@ +from __future__ import division +import os +import shutil +import sys +import time +import glob +import json +import logging +import argparse +import _init_paths +from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat + +parser = argparse.ArgumentParser(description='parameters for sampling') +parser.add_argument('--arch_loc', default='./jsons', type=str, help='resumed model') +parser.add_argument('--save_dir', default='./archs', type=str, help='saved dict') +parser.add_argument("--Fch", default=12, type=int, help='Fch') +parser.add_argument('--stem_head_width', type=float, default=0.6666666666666666, help='base learning rate') +args = parser.parse_args() + +def main(): + width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,] + json_files = glob.glob(os.path.join(args.arch_loc, "*.json")) + for json_file in json_files: + with open(json_file, 'r') as f: + model_dict = json.loads(f.read()) + + last = model_dict["lasts"] + save_dir = os.path.join(args.save_dir, os.path.basename(json_file).strip('.json')) + os.makedirs(save_dir, exist_ok=True) + + try: + for b in range(len(last)): + if len(width_mult_list) > 1: + plot_op(model_dict["ops"][b], model_dict["paths"][b], width=model_dict["widths"][b], head_width=args.stem_head_width, F_base=args.Fch).savefig(os.path.join(save_dir, "ops_%d_%d.png"%(0,b)), bbox_inches="tight") + else: + plot_op(model_dict["ops"][b], model_dict["paths"][b], F_base=args.Fch).savefig(os.path.join(save_dir, "ops_%d_%d.png"%(0,b)), bbox_inches="tight") + plot_path_width(model_dict["lasts"], model_dict["paths"], model_dict["widths"]).savefig(os.path.join(save_dir, "path_width%d.png"%0)) + except: + print("Arch: {} is invalid".format(json_file)) + shutil.rmtree(save_dir) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..5ddf12a --- /dev/null +++ b/LICENSE @@ -0,0 +1,23 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + + diff --git a/README.md b/README.md new file mode 100644 index 0000000..eaf7feb --- /dev/null +++ b/README.md @@ -0,0 +1,209 @@ +## Cyclic Differentiable Architecture Search + +
+ + +
+ +### we are hiring talented interns for NAS research: houwen.peng@microsoft.com + +### News +- Our preprint paper on [ArXiv](https://arxiv.org/pdf/2006.10724.pdf) +- :star2: We achieved the best or comparable performance on CIFAR10(97.60%), CIFAR100(84.31%) and ImageNet (76.3%) under the DARTS searching space! +- :star2: Our big model achieved impressive performance on CIFAR10(98.32%), CIFAR100(87.01%) and ImageNet (81.12%)! +- :star2::star2: Our codebase supports **distributed searching and training**, e.g., 4-GPU Cifar or 8-GPU ImageNet Searching. Our codebase runs much **faster** than previous non-distributed [DARTS](https://github.com/quark0/darts) and [PDARTS](https://github.com/chenxin061/pdarts). + +### Results +#### Discoverd Cells +
+ + +
+ +#### Results on NATS-Bench benchmark +
+ + +
+ +#### Results on CIFAR +
+ + +
+ +#### Results on ImageNet +
+ + +
+ +### Environments +Tesla V100, CUDA10.0, linux 16.04, pytorch>=1.2, python3, [apex](https://github.com/NVIDIA/apex) + +### Data Preparation +* [Cifar-10](https://www.cs.toronto.edu/~kriz/cifar.html) +* [Cifar-100](https://www.cs.toronto.edu/~kriz/cifar.html) +* [ImageNet-2012](http://www.image-net.org/) + +Create soft link in main dir. +``` +ln -s $DataLocation experiments/data +``` +In ${ROOT}/experiments/data, it should be like this. +``` +experiments/data/imagenet/train +experiments/data/imagenet/val +... +``` + +### Installation +* First, you should install graphviz. + ``` + apt-get install graphviz + ``` +* Install python requirements. + ```buildoutcfg + pip install -r requirements + ``` +* Then you should install apex. + ```buildoutcfg + git clone https://github.com/NVIDIA/apex + cd apex + python setup.py install --cpp_ext --cuda_ext + ``` + +### Search, Retrain and Test +We have provided all the shell scripts and the corresponding default parameters, which are stored in the scripts folder. +* For example: + ```buildoutcfg + cd ${CODE_ROOT} + + bash CyDAS/scripts/run_search_cifar_1gpu.sh + bash CyDAS/scripts/run_retrain_cifar_1gpu.sh + ... + ``` + +#### Search +* Main python file is + ```buildoutcfg + ${ROOT}/CyDAS/search.py + ``` +* Followings are options during training. + ```buildoutcfg + --regular # whether to use regular + --regular_ratio # if use regular, the ragular ratio + --regular_coeff # if use regular, the regular coefficient + --ensemble_param # Ensemble different layer features + --loss_alpha # the loss coefficient + --w_lr # the learning rate of the search network + --alpha_lr # the learning rate of the architecture parameters + --nasnet_lr # the learning rate of the evaluation network + --w_weight_decay # the weight decay the search and the evaluation network + --alpha_weight_decay # the weight decay the the architecture parameters + --fix_head # wheter to fix the paramters of auxiliary heads + --interactive_type # The KD function, 0 kl, 1 cosine, 2 mse, 3 sl1 + --pretrain_epochs # the pretrain epochs of the search network + --search_iter # the search iterations + --search_iter_epochs # the epochs in each search iteration + --nasnet_warmup # the epochs used to train a new evaluation network + ``` +* Here we present our search scripts on CIFAR and ImageNet. + ```buildoutcfg + bash CyDAS/scripts/run_search_cifar_1gpu.sh + bash CyDAS/scripts/run_search_cifar_4gpus.sh + bash CyDAS/scripts/run_search_imagenet.sh + ``` +* Modify the following settings in `run_search_cifar_1gpu.sh` and `run_search_cifar_4gpus.sh` to search on CIFAR100. + ``` + --dataset cifar100 + --n_classes 100 + ``` + +#### Retrain +* Main python file is + ```buildoutcfg + ${ROOT}/CyDAS/retrain.py + ``` +* We have provided all cell genotypes of Cifar and ImageNet in + ```buildoutcfg + ${ROOT}/CyDAS/cells/cifar_genotypes.json + ... + ``` +* Followings are options during training. + ```buildoutcfg + --cell_file # path of cell genotype + --weight_decay # decay of W in the Retrain-Phase + --lr # learning rate of W in the Retrain-Phase + --warmup_epochs # warmup epochs + --epochs # total retrain epochs + --cutout_length # cutout length for cifar + --aux_weight # weight of auxiliary loss, 0.4 is the best option + --drop_path_prob # used for dropping path in NAS + --label_smooth # label smooth ratio + ``` +* Here we present our train scripts on CIFAR and ImageNet. + ```buildoutcfg + bash CyDAS/scripts/run_retrain_cifar_1gpu.sh + bash CyDAS/scripts/run_retrain_cifar_4gpus.sh + bash CyDAS/scripts/run_retrain_imagenet.sh + ``` +* Modify the following settings in `run_retrain_cifar.sh` to train CIFAR100. + ``` + --dataset cifar100 + --n_classes 100 + ``` + +#### Test +* Main python file is + ```buildoutcfg + ${ROOT}/CyDAS/test.py + ``` +* Followings are options during testing. + ```buildoutcfg + --resume # whether to load checkpint + --resume_name # checkpint name + ``` +* Here we present our test scripts on CIFAR and ImageNet. + ```buildoutcfg + bash CyDAS/scripts/run_test_cifar.sh + bash CyDAS/scripts/run_test_imagenet.sh + ``` +* Modify the following settings in `run_test_cifar.sh` to test CIFAR100. + ``` + --dataset cifar100 + --n_classes 100 + ``` + +### NAS-Bench-201 +* Main python file is + ```buildoutcfg + ${ROOT}/benchmark201/search.py + ``` +* Here we present our search script on NAS-Bench-201. + ```buildoutcfg + cd benchmark201 + bash run_search_cifar_1gpu.sh + ``` + +### Object Detection +#### Results on COCO +We provide training models and logs (DET-A and DET-B), which can be downloaded from [Google Drive](https://drive.google.com/drive/folders/1CkFp24bEDq0wUp504BQ68jn5Vs069qox?usp=sharing). +
+ + +
+ +### Semantic Segmentation +We provide training models and logs (cityscapes and ade20k), which can be downloaded from [Google Drive](https://drive.google.com/drive/folders/1CkFp24bEDq0wUp504BQ68jn5Vs069qox?usp=sharing). +#### Results on Cityscapes +
+ + +
+ +#### Results on ADE20K +
+ + +
\ No newline at end of file diff --git a/benchmark201/configs/config.py b/benchmark201/configs/config.py new file mode 100644 index 0000000..b28e13c --- /dev/null +++ b/benchmark201/configs/config.py @@ -0,0 +1,228 @@ +""" Config class for search/augment """ +import argparse +import os +from functools import partial +import torch + + +def get_parser(name): + """ make default formatted parser """ + parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + # print default value always + parser.add_argument = partial(parser.add_argument, help=' ') + return parser + + +def parse_gpus(gpus): + if gpus == 'all': + return list(range(torch.cuda.device_count())) + else: + return [int(s) for s in gpus.split(',')] + + +class BaseConfig(argparse.Namespace): + def print_params(self, prtf=print): + prtf("") + prtf("Parameters:") + for attr, value in sorted(vars(self).items()): + prtf("{}={}".format(attr.upper(), value)) + prtf("") + + def as_markdown(self): + """ Return configs as markdown format """ + text = "|name|value| \n|-|-| \n" + for attr, value in sorted(vars(self).items()): + text += "|{}|{}| \n".format(attr, value) + + return text + + +class SearchConfig(BaseConfig): + def build_parser(self): + parser = get_parser("Search config") + parser.add_argument('--name', required=True) + ########### basic settings ############ + parser.add_argument('--dataset', default='imagenet', help='CIFAR10 / MNIST / FashionMNIST / imagenet') + parser.add_argument('--model_type', type=str, default='cifar', help='cifar or imagenet') + parser.add_argument('--data_dir', type=str, default='data/cifar', help='cifar dataset') + parser.add_argument('--train_dir', type=str, default='data/imagenet/train', help='') + parser.add_argument('--val_dir', type=str, default='data/imagenet/train', help='') + parser.add_argument('--test_dir', type=str, default='data/imagenet/val', help='') + parser.add_argument('--param_pool_path', type=str, default=None, help='') + parser.add_argument('--input_channels', type=int, default=3) + parser.add_argument('--init_channels', type=int, default=16) + parser.add_argument('--stem_multiplier', type=int, default=3) + parser.add_argument('--n_classes', type=int, default=10) + parser.add_argument('--batch_size', type=int, default=128, help='batch size') + parser.add_argument('--print_freq', type=int, default=50, help='print frequency') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument('--workers', type=int, default=4, help='# of workers') + parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. ' + '`all` indicates use all gpus.') + parser.add_argument('--sample_ratio', type=float, default=0.2, help='imagenet sample ratio') + parser.add_argument('--resume', action='store_true', default=False, help='resnet stem(pretrain)') + + ########### learning rate ############ + parser.add_argument('--w_lr', type=float, default=0.05, help='lr for weights') + parser.add_argument('--lr_ratio', type=float, default=0.5, help='lr for trained layers') + parser.add_argument('--w_lr_min', type=float, default=0.001, help='minimum lr for weights') + parser.add_argument('--w_momentum', type=float, default=0.9, help='momentum for weights') + parser.add_argument('--w_weight_decay', type=float, default=3e-4, + help='weight decay for weights') + parser.add_argument('--w_grad_clip', type=float, default=5., + help='gradient clipping for weights') + parser.add_argument('--alpha_lr', type=float, default=6e-4, help='lr for alpha') + parser.add_argument('--alpha_weight_decay', type=float, default=1e-3, + help='weight decay for alpha') + + ########### alternate training ############ + parser.add_argument('--res_stem', action='store_true', default=False, help='resnet stem(pretrain)') + parser.add_argument('--layer_num', type=int, default=3, help='layer need to be replaced') + parser.add_argument('--cells_num', type=int, default=3, help='cells num of one layer') + parser.add_argument('--pretrain_epochs', type=int, default=5, help='# of training epochs') + parser.add_argument('--pretrain_decay', type=int, default=5, help='pretrain epochs') + parser.add_argument('--random_times', type=int, default=10, help='# of training epochs') + parser.add_argument('--random_epochs', type=int, default=3, help='# of training epochs') + parser.add_argument('--search_iter', type=int, default=5, help='times of search') + parser.add_argument('--search_iter_epochs', type=int, default=5, help='# of training epochs') + parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss') + parser.add_argument('--one_stage', action='store_true', default=False, help='one_stage search') + parser.add_argument('--same_structure', action='store_true', default=False, help='same_structure search and retrain') + parser.add_argument('--clean_arch', action='store_true', default=False, help='clean archs each epoch') + parser.add_argument('--sync_param', action='store_true', default=False, help='whether to sync param') + parser.add_argument('--ensemble_sum', action='store_true', default=False, help='ensemble sum or concat') + parser.add_argument('--ensemble_param', action='store_true', default=False, help='whether to learn ensemble params') + parser.add_argument('--use_beta', action='store_true', default=False, help='whether to use beta arch param') + parser.add_argument('--bn_affine', action='store_true', default=False, help='main bn affine') + parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to sync bn') + parser.add_argument('--use_apex', action='store_true', default=False, help='whether to apex') + parser.add_argument('--regular', action='store_true', default=False, help='resnet stem(pretrain)') + parser.add_argument('--regular_ratio', type=float, default=0.5, help='regular ratio') + parser.add_argument('--regular_coeff', type=float, default=5, help='regular coefficient') + parser.add_argument('--repeat_cell', action='store_true', default=False, help='use repeat cell') + parser.add_argument('--fix_head', action='store_true', default=False, help='whether to fix head') + parser.add_argument('--share_fc', action='store_true', default=False, help='whether to share fc') + parser.add_argument('--nasnet_lr', type=float, default=0.1, help='lr of nasnet') + parser.add_argument('--nasnet_warmup', type=int, default=5, help='warm up of nasnet') + parser.add_argument('--loss_alpha', type=float, default=1, help='loss alpha') + parser.add_argument('--loss_T', type=float, default=2, help='loss T') + parser.add_argument('--interactive_type', type=int, default=0, help='0 kl 1 cosine 2 mse') + parser.add_argument('--gumbel_sample', action='store_true', default=False, help='whether to use gumbel sample') + parser.add_argument('--sample_pretrain', action='store_true', default=False, help='sample_pretrain') + + + ########### data augument ############ + parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight') + parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') + parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path prob') + parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa') + parser.add_argument('--mixup_alpha', default=0., type=float, + help='mixup interpolation coefficient (default: 1)') + + ########### distributed ############ + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training') + parser.add_argument('--distributed', action='store_true', help='Run model distributed mode.') + + + return parser + + def __init__(self): + parser = self.build_parser() + args = parser.parse_args() + super().__init__(**vars(args)) + + self.data_path = './data/' + self.path = os.path.join('search', self.name) + self.resume_path = os.path.join(self.path, 'search_resume.pth.tar') + self.plot_path = os.path.join(self.path, 'plots') + self.gpus = parse_gpus(self.gpus) + + +class AugmentConfig(BaseConfig): + def build_parser(self): + parser = get_parser("Augment config") + parser.add_argument('--name', required=True) + parser.add_argument('--dataset', required=True, help='cifar10 / cifar100 / imagenet') + parser.add_argument('--model_type', type=str, default='cifar', help='cifar or imagenet') + + parser.add_argument('--data_dir', type=str, default='data/cifar', help='cifar dataset') + parser.add_argument('--train_dir', type=str, default='data/imagenet/train', help='') + parser.add_argument('--test_dir', type=str, default='data/imagenet/val', help='') + parser.add_argument('--cell_file', type=str, default='cells/cifar_genotype.json', help='') + parser.add_argument('--resume', action='store_true', default=False, help='resnet stem(pretrain)') + + parser.add_argument('--n_classes', type=int, default=10) + parser.add_argument('--input_channels', type=int, default=3) + parser.add_argument('--stem_multiplier', type=int, default=3) + + ########### alternate training ############ + parser.add_argument('--res_stem', action='store_true', default=False, help='resnet stem(pretrain)') + parser.add_argument('--layer_num', type=int, default=3, help='layer need to be replaced') + parser.add_argument('--cells_num', type=int, default=3, help='cells num of one layer') + parser.add_argument('--same_structure', action='store_true', default=False, help='same_structure search and retrain') + parser.add_argument('--ensemble_sum', action='store_true', default=False, help='whether to ensemble') + parser.add_argument('--ensemble_param', action='store_true', default=False, help='whether to learn ensemble params') + parser.add_argument('--use_beta', action='store_true', default=False, help='whether to use beta arch param') + parser.add_argument('--bn_affine', action='store_true', default=False, help='main bn affine') + parser.add_argument('--repeat_cell', action='store_true', default=False, help='use repeat cell') + parser.add_argument('--fix_head', action='store_true', default=False, help='whether to fix head') + parser.add_argument('--share_fc', action='store_true', default=False, help='whether to share fc') + parser.add_argument('--sample_pretrain', action='store_true', default=False, help='sample_pretrain') + + parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa') + parser.add_argument('--mixup_alpha', default=0., type=float, + help='mixup interpolation coefficient (default: 1)') + parser.add_argument('--resume_name', type=str, default='retrain_resume.pth.tar') + + parser.add_argument('--batch_size', type=int, default=128, help='batch size') + parser.add_argument('--lr', type=float, default=0.025, help='lr for weights') + parser.add_argument('--momentum', type=float, default=0.9, help='momentum') + parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay') + parser.add_argument('--grad_clip', type=float, default=5., + help='gradient clipping for weights') + parser.add_argument('--print_freq', type=int, default=200, help='print frequency') + parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. ' + '`all` indicates use all gpus.') + parser.add_argument('--epochs', type=int, default=600, help='# of training epochs') + parser.add_argument('--warmup_epochs', type=int, default=5, help='# warmup') + parser.add_argument('--init_channels', type=int, default=36) + parser.add_argument('--layers', type=int, default=20, help='# of layers') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument('--workers', type=int, default=4, help='# of workers') + parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight') + parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') + parser.add_argument('--sample_archs', type=int, default=1, help='sample arch num') + parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing') + parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path prob') + + ########### distributed ############ + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument('--use_amp', action='store_true', default=False, help='whether to use amp') + parser.add_argument('--opt-level', type=str, default='O1') + + parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training') + parser.add_argument('--fp16', action='store_true', + help='Run model fp16 mode.') + parser.add_argument('--distributed', action='store_true', + help='Run model distributed mode.') + + parser.add_argument('--static-loss-scale', type=float, default=1, + help='Static loss scale, positive power of 2 values can improve fp16 convergence.') + parser.add_argument('--dynamic-loss-scale', action='store_true', + help='Use dynamic loss scaling. If supplied, this argument supersedes ' + + '--static-loss-scale.') + return parser + + def __init__(self): + parser = self.build_parser() + args = parser.parse_args() + super().__init__(**vars(args)) + + self.data_path = './data/' + self.path = os.path.join('augments', self.name) + self.gpus = parse_gpus(self.gpus) + self.resume_path = os.path.join(self.path, self.resume_name) + diff --git a/benchmark201/core/augment_function.py b/benchmark201/core/augment_function.py new file mode 100644 index 0000000..e4c08fd --- /dev/null +++ b/benchmark201/core/augment_function.py @@ -0,0 +1,130 @@ +import torch +import torch.nn as nn +from utils import utils +from datasets import data_utils +from models.loss import CrossEntropyLabelSmooth + +def train(train_loader, model, optimizer, epoch, writer, logger, config): + device = torch.device("cuda") + if config.label_smooth > 0: + criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device) + else: + criterion = nn.CrossEntropyLoss().to(device) + + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + step_num = len(train_loader) + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} LR {}".format(epoch, cur_lr)) + writer.add_scalar('train/lr', cur_lr, cur_step) + + model.train() + + for step, (X, y) in enumerate(train_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True) + + optimizer.zero_grad() + logits, logits_aux = model(X) + # loss = criterion(logits, y) + loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam) + if config.aux_weight > 0: + # loss_aux = criterion(logits_aux, y) + loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam) + loss = loss + config.aux_weight * loss_aux + + if config.use_amp: + from apex import amp + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip) + optimizer.step() + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, + step_num, losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('train/loss', reduced_loss.item(), cur_step) + writer.add_scalar('train/top1', prec1.item(), cur_step) + writer.add_scalar('train/top5', prec5.item(), cur_step) + cur_step += 1 + + if config.local_rank == 0: + logger.info("Train: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + +def validate(valid_loader, model, epoch, cur_step, writer, logger, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + logits, _ = model(X) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + + return top1.avg, top5.avg diff --git a/benchmark201/core/pretrain_function.py b/benchmark201/core/pretrain_function.py new file mode 100644 index 0000000..b55c7c1 --- /dev/null +++ b/benchmark201/core/pretrain_function.py @@ -0,0 +1,342 @@ +import torch +import torch.nn as nn +from utils import utils +from datasets import data_utils +from models.loss import CrossEntropyLabelSmooth + +def train(train_loader, model, optimizer, epoch, writer, logger, config): + device = torch.device("cuda") + if config.label_smooth > 0: + criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device) + else: + criterion = nn.CrossEntropyLoss().to(device) + + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + step_num = len(train_loader) + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} LR {}".format(epoch, cur_lr)) + writer.add_scalar('train/lr', cur_lr, cur_step) + + model.train() + + for step, (X, y) in enumerate(train_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True) + + optimizer.zero_grad() + logits, logits_aux = model(X, layer_idx=0, super_flag=True, pretrain_flag=True) + loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam) + if config.aux_weight > 0: + # loss_aux = criterion(logits_aux, y) + loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam) + loss = loss + config.aux_weight * loss_aux + + if config.use_amp: + from apex import amp + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip) + optimizer.step() + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, + step_num, losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('train/loss', reduced_loss.item(), cur_step) + writer.add_scalar('train/top1', prec1.item(), cur_step) + writer.add_scalar('train/top5', prec5.item(), cur_step) + cur_step += 1 + + if config.local_rank == 0: + logger.info("Train: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + +def validate(valid_loader, model, epoch, cur_step, writer, logger, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + + return top1.avg, top5.avg + + +def sample_train(train_loader, model, optimizer, epoch, writer, logger, config): + device = torch.device("cuda") + if config.label_smooth > 0: + criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device) + else: + criterion = nn.CrossEntropyLoss().to(device) + + step_num = len(train_loader) + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} LR {}".format(epoch, cur_lr)) + writer.add_scalar('train/lr', cur_lr, cur_step) + + model.train() + + for step, (X, y) in enumerate(train_loader): + + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True) + + optimizer.zero_grad() + + all_losses = [] + all_logits = [] + for i in range(config.sample_archs): + ### sample new arch ### + model.module.init_arch_params(layer_idx=0) + genotypes = [] + for i in range(config.layer_num): + genotype, connect = model.module.generate_genotype(i) + genotypes.append(genotype) + + model.module.genotypes[i] = genotype + model.module.connects[i] = connect + + logits, logits_aux = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True) + all_logits.append(logits) + loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam) + if config.aux_weight > 0: + # loss_aux = criterion(logits_aux, y) + loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam) + loss = loss + config.aux_weight * loss_aux + + all_losses.append(loss) + + ''' + for j, genotype in enumerate(genotypes): + if config.local_rank == 0: + logger.info("Random stage: {} layer: {} genotype = {}".format(i, j, genotype)) + ''' + + loss = torch.sum(torch.stack(all_losses)) + + if config.use_amp: + from apex import amp + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + + # tricks + for p in model.module.parameters(): + if p.grad is not None and p.grad.sum() == 0: + p.grad = None + + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip) + optimizer.step() + + for i, logits in enumerate(all_logits): + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(all_losses[i].data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = all_losses[i].data + + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train: Epoch {:2d}/{} Step {:03d}/{:03d} Sample idx {} Loss {:.3f} " + "Prec@(1,5) ({:.1%}, {:.1%})".format( + epoch+1, config.epochs, step, step_num, i, + reduced_loss.item(), prec1.item(), prec5.item())) + + if config.local_rank == 0: + writer.add_scalar('train/loss', reduced_loss.item(), cur_step) + writer.add_scalar('train/top1', prec1.item(), cur_step) + writer.add_scalar('train/top5', prec5.item(), cur_step) + cur_step += 1 + + + +def sample_validate(valid_loader, model, epoch, cur_step, writer, logger, config): + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + for i in range(config.sample_archs): + ### sample new arch ### + model.module.init_arch_params(layer_idx=0) + genotypes = [] + for i in range(config.layer_num): + genotype, connect = model.module.generate_genotype(i) + genotypes.append(genotype) + + model.module.genotypes[i] = genotype + model.module.connects[i] = connect + + logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Sample_index {} Loss {:.3f} " + "Prec@(1,5) ({:.1%}, {:.1%})".format( + epoch+1, config.epochs, step, step_num, i, + reduced_loss.item(), prec1.item(), prec5.item())) + + if config.local_rank == 0: + writer.add_scalar('val/loss', reduced_loss.item(), cur_step) + writer.add_scalar('val/top1', prec1.item(), cur_step) + writer.add_scalar('val/top5', prec5.item(), cur_step) + + return prec1.item(), prec5.item() + + +def test_sample(valid_loader, model, epoch, cur_step, writer, logger, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + + model.module.init_arch_params(layer_idx=0) + genotypes = [] + + for i in range(config.layer_num): + genotype, connect = model.module.generate_genotype(i) + genotypes.append(genotype) + + model.module.genotypes[i] = genotype + model.module.connects[i] = connect + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + # logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True) + logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + + return top1.avg, top5.avg \ No newline at end of file diff --git a/benchmark201/core/search_function.py b/benchmark201/core/search_function.py new file mode 100644 index 0000000..29c60e3 --- /dev/null +++ b/benchmark201/core/search_function.py @@ -0,0 +1,241 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from utils import utils +from models.loss import Loss_interactive + +def search(train_loader, valid_loader, model, optimizer, w_optim, alpha_optim, epoch, writer, logger, config): + # interactive retrain and kl + + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + losses_interactive = utils.AverageMeter() + losses_cls = utils.AverageMeter() + losses_reg = utils.AverageMeter() + + step_num = len(train_loader) + step_num = int(step_num * config.sample_ratio) + + cur_step = epoch*step_num + cur_lr_search = w_optim.param_groups[0]['lr'] + cur_lr_main = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} Search LR {}".format(epoch, cur_lr_search)) + logger.info("Train Epoch {} Main LR {}".format(epoch, cur_lr_main)) + writer.add_scalar('retrain/lr', cur_lr_search, cur_step) + + model.train() + + for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(train_loader, valid_loader)): + if step > step_num: + break + + trn_X, trn_y = trn_X.to(device, non_blocking=True), trn_y.to(device, non_blocking=True) + val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True) + N = trn_X.size(0) + + #use valid data + alpha_optim.zero_grad() + optimizer.zero_grad() + + logits_search, emsemble_logits_search = model(val_X, super_flag=True) + logits_main, emsemble_logits_main= model(val_X, super_flag=False) + + loss_cls = (criterion(logits_search, val_y) + criterion(logits_main, val_y)) / config.loss_alpha + loss_interactive = Loss_interactive(emsemble_logits_search, emsemble_logits_main, config.loss_T, config.interactive_type) * config.loss_alpha + loss_regular = 0 * loss_cls + + if config.regular: + coeff = max(config.regular_coeff * (1 - float(epoch-config.pretrain_epochs)/(( + config.search_iter-config.pretrain_epochs)*config.search_iter_epochs*config.regular_ratio)), 0) + # loss_regular += coeff * torch.sum(abs(model.module._arch_parameters[:, 0])) + loss_regular += coeff * model.module.l1_loss() + + loss = loss_cls + loss_interactive + loss_regular + loss.backward() + nn.utils.clip_grad_norm_(model.module.parameters(), config.w_grad_clip) + optimizer.step() + alpha_optim.step() + + prec1, prec5 = utils.accuracy(logits_search, val_y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + reduced_loss_interactive = utils.reduce_tensor(loss_interactive.data, config.world_size) + reduced_loss_cls = utils.reduce_tensor(loss_cls.data, config.world_size) + reduced_loss_reg = utils.reduce_tensor(loss_regular.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + + else: + reduced_loss = loss.data + reduced_loss_interactive = loss_interactive.data + reduced_loss_cls = loss_cls.data + reduced_loss_reg = loss_regular.data + + losses.update(reduced_loss.item(), N) + losses_interactive.update(reduced_loss_interactive.item(), N) + losses_cls.update(reduced_loss_cls.item(), N) + losses_reg.update(reduced_loss_reg.item(), N) + + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train_2: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Loss_interactive {losses_interactive.avg:.3f} Losses_cls {losses_cls.avg:.3f} Losses_reg {losses_reg.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.search_iter*config.search_iter_epochs, step, + step_num, losses=losses, losses_interactive=losses_interactive, losses_cls=losses_cls, + losses_reg=losses_reg, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('retrain/loss', reduced_loss.item(), cur_step) + writer.add_scalar('retrain/top1', prec1.item(), cur_step) + writer.add_scalar('retrain/top5', prec5.item(), cur_step) + cur_step += 1 + + + w_optim.zero_grad() + logits_search_train, _ = model(trn_X, super_flag=True) + loss_cls_train = criterion(logits_search_train, trn_y) + loss_train = loss_cls_train + loss_train.backward() + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.w_grad_clip) + # only update w + w_optim.step() + + # alpha_optim.step() + if config.distributed: + reduced_loss_cls_train = utils.reduce_tensor(loss_cls_train.data, config.world_size) + reduced_loss_train = utils.reduce_tensor(loss_train.data, config.world_size) + else: + reduced_loss_cls_train = reduced_loss_cls_train.data + reduced_loss_train = reduced_loss_train.data + + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num-1): + logger.info( + "Train_1: Loss_cls: {:.3f} Loss: {:.3f}".format( + reduced_loss_cls_train.item(), reduced_loss_train.item()) + ) + + + if config.local_rank == 0: + logger.info("Train_2: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.search_iter*config.search_iter_epochs, top1.avg)) + + +def retrain_warmup(valid_loader, model, optimizer, epoch, writer, logger, super_flag, retrain_epochs, config): + + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + step_num = len(valid_loader) + step_num = int(step_num * config.sample_ratio) + + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Warmup Epoch {} LR {:.3f}".format(epoch+1, cur_lr)) + writer.add_scalar('warmup/lr', cur_lr, cur_step) + + model.train() + + for step, (val_X, val_y) in enumerate(valid_loader): + if step > step_num: + break + + val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True) + N = val_X.size(0) + + optimizer.zero_grad() + logits_main, _ = model(val_X, super_flag=super_flag) + loss = criterion(logits_main, val_y) + loss.backward() + + nn.utils.clip_grad_norm_(model.module.parameters(), config.w_grad_clip) + optimizer.step() + + prec1, prec5 = utils.accuracy(logits_main, val_y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Warmup: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, retrain_epochs, step, + step_num, losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('retrain/loss', reduced_loss.item(), cur_step) + writer.add_scalar('retrain/top1', prec1.item(), cur_step) + writer.add_scalar('retrain/top5', prec5.item(), cur_step) + cur_step += 1 + + if config.local_rank == 0: + logger.info("Warmup: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, retrain_epochs, top1.avg)) + +def validate(valid_loader, model, epoch, cur_step, writer, logger, super_flag, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + logits, _ = model(X, super_flag=False) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.search_iter*config.search_iter_epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.search_iter*config.search_iter_epochs, top1.avg)) + + return top1.avg \ No newline at end of file diff --git a/benchmark201/datasets/cifar.py b/benchmark201/datasets/cifar.py new file mode 100644 index 0000000..5531be6 --- /dev/null +++ b/benchmark201/datasets/cifar.py @@ -0,0 +1,103 @@ +import torch +import numpy as np +import torchvision.datasets as dset +import torchvision.transforms as transforms +from datasets.data_utils import SubsetDistributedSampler +from datasets.data_utils import CIFAR10Policy, Cutout + + +def data_transforms_cifar(config, cutout=False): + CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] + CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] + + if config.use_aa: + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4, fill=128), + transforms.RandomHorizontalFlip(), CIFAR10Policy(), + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + else: + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + + + if cutout: + train_transform.transforms.append(Cutout(config.cutout_length)) + + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + return train_transform, valid_transform + +def get_search_datasets(config): + + dataset = config.dataset.lower() + if dataset == 'cifar10': + dset_cls = dset.CIFAR10 + n_classes = 10 + elif dataset == 'cifar100': + dset_cls = dset.CIFAR100 + n_classes = 100 + else: + raise Exception("Not support dataset!") + + train_transform, valid_transform = data_transforms_cifar(config, cutout=False) + train_data = dset_cls(root=config.data_dir, train=True, download=True, transform=train_transform) + test_data = dset_cls(root=config.data_dir, train=False, download=True, transform=valid_transform) + + num_train = len(train_data) + # num_train = 512 + indices = list(range(num_train)) + split_mid = int(np.floor(0.5 * num_train)) + + train_sampler = SubsetDistributedSampler(train_data, indices[:split_mid]) + valid_sampler = SubsetDistributedSampler(train_data, indices[split_mid:num_train]) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + valid_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=valid_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, valid_loader], [train_sampler, valid_sampler] + +def get_augment_datasets(config): + + dataset = config.dataset.lower() + if dataset == 'cifar10': + dset_cls = dset.CIFAR10 + elif dataset == 'cifar100': + dset_cls = dset.CIFAR100 + else: + raise Exception("Not support dataset!") + + train_transform, valid_transform = data_transforms_cifar(config, cutout=True) + train_data = dset_cls(root=config.data_dir, train=True, download=True, transform=train_transform) + test_data = dset_cls(root=config.data_dir, train=False, download=True, transform=valid_transform) + + + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + test_loader = torch.utils.data.DataLoader( + test_data, batch_size=config.batch_size, + sampler=test_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, test_loader], [train_sampler, test_sampler] + diff --git a/benchmark201/datasets/data_utils.py b/benchmark201/datasets/data_utils.py new file mode 100644 index 0000000..eef2575 --- /dev/null +++ b/benchmark201/datasets/data_utils.py @@ -0,0 +1,393 @@ +import math +import torch +import random +import numpy as np +import torch.distributed as dist +from torch.utils.data import Sampler +from PIL import Image, ImageEnhance, ImageOps + +class SubsetDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size. + + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + shuffle (optional): If true (default), sampler will shuffle the indices + """ + + def __init__(self, dataset, indices, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.indices = indices + self.num_samples = int(math.ceil(len(self.indices) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + # indices = torch.randperm(len(self.dataset), generator=g).tolist() + indices = list(self.indices[i] for i in torch.randperm(len(self.indices))) + else: + # indices = list(range(len(self.dataset))) + indices = self.indices + + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class data_prefetcher(): + def __init__(self, loader): + self.loader = iter(loader) + self.stream = torch.cuda.Stream() + self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1) + self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1) + # With Amp, it isn't necessary to manually convert data to half. + # if args.fp16: + # self.mean = self.mean.half() + # self.std = self.std.half() + self.preload() + + def preload(self): + try: + self.next_input, self.next_target = next(self.loader) + except StopIteration: + self.next_input = None + self.next_target = None + return + with torch.cuda.stream(self.stream): + self.next_input = self.next_input.cuda(non_blocking=True) + self.next_target = self.next_target.cuda(non_blocking=True) + # With Amp, it isn't necessary to manually convert data to half. + # if args.fp16: + # self.next_input = self.next_input.half() + # else: + self.next_input = self.next_input.float() + self.next_input = self.next_input.sub_(self.mean).div_(self.std) + + def next(self): + torch.cuda.current_stream().wait_stream(self.stream) + input = self.next_input + target = self.next_target + self.preload() + return input, target + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + + return img + +class ImageNetPolicy(object): + """ Randomly choose one of the best 24 Sub-policies on ImageNet. + Example: + >>> policy = ImageNetPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> ImageNetPolicy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + + SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor), + SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor), + SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor), + SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor), + + SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor), + SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor), + SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + + SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor), + SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor), + SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor), + SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor), + SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor), + + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment ImageNet Policy" + + +class CIFAR10Policy(object): + """ Randomly choose one of the best 25 Sub-policies on CIFAR10. + Example: + >>> policy = CIFAR10Policy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> CIFAR10Policy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor), + SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor), + SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor), + SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor), + + SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor), + SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor), + SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor), + SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor), + + SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor), + SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor), + SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor), + SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor), + SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor), + + SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor), + SubPolicy(0.2, "equalize", 8, 0.6, "equalize", 4, fillcolor), + SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor), + SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor), + SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor), + + SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor), + SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment CIFAR10 Policy" + + +class SVHNPolicy(object): + """ Randomly choose one of the best 25 Sub-policies on SVHN. + Example: + >>> policy = SVHNPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> SVHNPolicy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor), + SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor), + SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor), + SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor), + SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor), + SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor), + + SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor), + SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor), + SubPolicy(0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor), + SubPolicy(0.1, "shearX", 6, 0.6, "invert", 5, fillcolor), + + SubPolicy(0.7, "solarize", 2, 0.6, "translateY", 7, fillcolor), + SubPolicy(0.8, "shearY", 4, 0.8, "invert", 8, fillcolor), + SubPolicy(0.7, "shearX", 9, 0.8, "translateY", 3, fillcolor), + SubPolicy(0.8, "shearY", 5, 0.7, "autocontrast", 3, fillcolor), + SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment SVHN Policy" + + +class SubPolicy(object): + def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): + ranges = { + "shearX": np.linspace(0, 0.3, 10), + "shearY": np.linspace(0, 0.3, 10), + "translateX": np.linspace(0, 150 / 331, 10), + "translateY": np.linspace(0, 150 / 331, 10), + "rotate": np.linspace(0, 30, 10), + "color": np.linspace(0.0, 0.9, 10), + "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), + "solarize": np.linspace(256, 0, 10), + "contrast": np.linspace(0.0, 0.9, 10), + "sharpness": np.linspace(0.0, 0.9, 10), + "brightness": np.linspace(0.0, 0.9, 10), + "autocontrast": [0] * 10, + "equalize": [0] * 10, + "invert": [0] * 10 + } + + # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode) + + func = { + "shearX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), + "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + self.p1 = p1 + self.operation1 = func[operation1] + self.magnitude1 = ranges[operation1][magnitude_idx1] + self.p2 = p2 + self.operation2 = func[operation2] + self.magnitude2 = ranges[operation2][magnitude_idx2] + + + def __call__(self, img): + if random.random() < self.p1: img = self.operation1(img, self.magnitude1) + if random.random() < self.p2: img = self.operation2(img, self.magnitude2) + return img + +def fast_collate(batch): + imgs = [img[0] for img in batch] + targets = torch.tensor([target[1] for target in batch], dtype=torch.int64) + w = imgs[0].size[0] + h = imgs[0].size[1] + tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 ) + for i, img in enumerate(imgs): + nump_array = np.asarray(img, dtype=np.uint8) + if(nump_array.ndim < 3): + nump_array = np.expand_dims(nump_array, axis=-1) + nump_array = np.rollaxis(nump_array, 2) + + tensor[i] += torch.from_numpy(nump_array) + + return tensor, targets + +def mixup_data(x, y, alpha=1.0, use_cuda=True): + '''Returns mixed inputs, pairs of targets, and lambda''' + if alpha > 0: + lam = np.random.beta(alpha, alpha) + else: + lam = 1 + + batch_size = x.size()[0] + if use_cuda: + index = torch.randperm(batch_size).cuda() + else: + index = torch.randperm(batch_size) + + mixed_x = lam * x + (1 - lam) * x[index, :] + y_a, y_b = y, y[index] + return mixed_x, y_a, y_b, lam + + +def mixup_criterion(criterion, pred, y_a, y_b, lam): + return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) + diff --git a/benchmark201/datasets/imagenet.py b/benchmark201/datasets/imagenet.py new file mode 100644 index 0000000..25e9891 --- /dev/null +++ b/benchmark201/datasets/imagenet.py @@ -0,0 +1,102 @@ +import torch +import numpy as np +import torchvision.datasets as dset +import torchvision.transforms as transforms +from datasets.data_utils import SubsetDistributedSampler +from datasets.data_utils import ImageNetPolicy + +def get_search_datasets(config): + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + train_data = dset.ImageFolder( + config.train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ColorJitter( + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.2), + transforms.ToTensor(), + normalize, + ])) + + test_data = dset.ImageFolder( + config.test_dir, + transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + num_train = len(train_data) + indices = list(range(num_train)) + split_mid = int(np.floor(0.5 * num_train)) + + train_sampler = SubsetDistributedSampler(train_data, indices[:split_mid]) + valid_sampler = SubsetDistributedSampler(train_data, indices[split_mid:num_train]) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + valid_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=valid_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, valid_loader], [train_sampler, valid_sampler] + +def get_augment_datasets(config): + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + if config.use_aa: + train_data = dset.ImageFolder( + config.train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + ImageNetPolicy(), + transforms.ToTensor(), + normalize, + ])) + else: + train_data = dset.ImageFolder( + config.train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ColorJitter( + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.2), + transforms.ToTensor(), + normalize, + ])) + + test_data = dset.ImageFolder( + config.test_dir, + transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + test_loader = torch.utils.data.DataLoader( + test_data, batch_size=config.batch_size, + sampler=test_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, test_loader], [train_sampler, test_sampler] + diff --git a/benchmark201/models/augment_cells.py b/benchmark201/models/augment_cells.py new file mode 100644 index 0000000..8db1e3d --- /dev/null +++ b/benchmark201/models/augment_cells.py @@ -0,0 +1,49 @@ +""" CNN cell for network augmentation """ +import torch.nn as nn +from copy import deepcopy +from models.ops import OPS + + +# Cell for NAS-Bench-201 +class InferCell(nn.Module): + + def __init__(self, genotype, C_in, C_out, stride): + super(InferCell, self).__init__() + + self.layers = nn.ModuleList() + self.node_IN = [] + self.node_IX = [] + self.genotype = deepcopy(genotype) + for i in range(1, len(genotype)): + node_info = genotype[i-1] + cur_index = [] + cur_innod = [] + for (op_name, op_in) in node_info: + if op_in == 0: + layer = OPS[op_name](C_in , C_out, stride, True, True) + else: + layer = OPS[op_name](C_out, C_out, 1, True, True) + cur_index.append( len(self.layers) ) + cur_innod.append( op_in ) + self.layers.append( layer ) + self.node_IX.append( cur_index ) + self.node_IN.append( cur_innod ) + self.nodes = len(genotype) + self.in_dim = C_in + self.out_dim = C_out + + def extra_repr(self): + string = 'info :: nodes={nodes}, inC={in_dim}, outC={out_dim}'.format(**self.__dict__) + laystr = [] + for i, (node_layers, node_innods) in enumerate(zip(self.node_IX,self.node_IN)): + y = ['I{:}-L{:}'.format(_ii, _il) for _il, _ii in zip(node_layers, node_innods)] + x = '{:}<-({:})'.format(i+1, ','.join(y)) + laystr.append( x ) + return string + ', [{:}]'.format( ' | '.join(laystr) ) + ', {:}'.format(self.genotype.tostr()) + + def forward(self, inputs): + nodes = [inputs] + for i, (node_layers, node_innods) in enumerate(zip(self.node_IX,self.node_IN)): + node_feature = sum( self.layers[_il](nodes[_ii]) for _il, _ii in zip(node_layers, node_innods) ) + nodes.append( node_feature ) + return nodes[-1] diff --git a/benchmark201/models/aux_head.py b/benchmark201/models/aux_head.py new file mode 100644 index 0000000..88750f2 --- /dev/null +++ b/benchmark201/models/aux_head.py @@ -0,0 +1,99 @@ +import torch +import torch.nn as nn + + +class DistillHeadCIFAR(nn.Module): + + def __init__(self, C, size, num_classes, bn_affine=True): + """assuming input size 8x8 or 16x16""" + super(DistillHeadCIFAR, self).__init__() + self.features = nn.Sequential( + nn.ReLU(), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), # image size = 2 x 2 / 6 x 6 + nn.Conv2d(C, 128, 1, bias=False), + # nn.BatchNorm2d(128, affine=bn_affine, track_running_stats=False), + nn.BatchNorm2d(128, affine=bn_affine), + nn.ReLU(), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768, affine=bn_affine), + nn.ReLU() + ) + self.classifier = nn.Linear(768, num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + x = self.features(x) + x = self.gap(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + +class DistillHeadImagenet(nn.Module): + + def __init__(self, C, size, num_classes, bn_affine=True): + """assuming input size 7x7 or 14x14""" + super(DistillHeadImagenet, self).__init__() + self.features = nn.Sequential( + nn.ReLU(), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), # image size = 2 x 2 / 6 x 6 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128, affine=bn_affine), + nn.ReLU(), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768, affine=bn_affine), + nn.ReLU() + ) + self.classifier = nn.Linear(768, num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + x = self.features(x) + x = self.gap(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + +class AuxiliaryHeadCIFAR(nn.Module): + + def __init__(self, C, size=5, num_classes=10): + """assuming input size 8x8""" + super(AuxiliaryHeadCIFAR, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(size, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + + +class AuxiliaryHeadImageNet(nn.Module): + + def __init__(self, C, size=5, num_classes=1000): + """assuming input size 7x7""" + super(AuxiliaryHeadImageNet, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + # NOTE: This batchnorm was omitted in my earlier implementation due to a typo. + # Commenting it out for consistency with the experiments in the paper. + # nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0),-1)) + return x diff --git a/benchmark201/models/cdarts_controller.py b/benchmark201/models/cdarts_controller.py new file mode 100644 index 0000000..59e6db9 --- /dev/null +++ b/benchmark201/models/cdarts_controller.py @@ -0,0 +1,374 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import logging +import copy + +from models.search_cells import SearchCell +from models.augment_cells import InferCell +from models.aux_head import DistillHeadCIFAR +from models.ops import ResNetBasicblock, OPS, NAS_BENCH_201 +from utils.genotypes import Structure + +class CDARTSController(nn.Module): + """ CDARTS Controller""" + def __init__(self, config, criterion, n_nodes=4, stem_multiplier=3, track_running_stats=True): + """ + args: + + """ + super(CDARTSController, self).__init__() + + # some settings + self.n_nodes = n_nodes + self.criterion = criterion + self.layer_num = config.layer_num + self.c_in = config.input_channels + self.num_classes = config.n_classes + # cifar10 or imagenet + self.model_type = config.model_type + self.stem_multiplier = stem_multiplier + self.init_channel = config.init_channels + self.ensemble_sum = config.ensemble_sum + self.use_ensemble_param = config.ensemble_param + self.bn_affine = config.bn_affine + self.fix_head = config.fix_head + self.share_fc = config.share_fc + + self.layers = [6, 6, 5] + self.layers_reduction = [True, True, False] + self.augment_layers = [6, 6, 5] + self.num_edge = None + self.edge2index = None + self.nas_genotype = None + self.cell_connects = {} + self.search_space = NAS_BENCH_201 + self.op_names = copy.deepcopy(self.search_space) + self.track_running_stats = track_running_stats + + self.fc_super = None + self.fc_nas = None + self.distill_aux_c1 = None + self.distill_aux_c2 = None + self.feature_extractor = None + self.gap = nn.AdaptiveAvgPool2d(1) + + self.nas_layers = nn.ModuleList([None, None, None]) + self.super_layers = nn.ModuleList() + self.super_layers_arch = nn.ModuleList() + + self.super_layers_pool = nn.ModuleList() + self.super_layers_pool_arch = nn.ModuleList() + self.model_main = None + + self.build_init_model() + + ######################## ---------------------------- ######################## + ######################## Functions for update modules ######################## + ######################## ---------------------------- ######################## + def build_init_model(self): + self.extractor_grad = True + if self.model_type == 'cifar': + self.feature_extractor = self.cifar_stem(self.init_channel * self.stem_multiplier) + else: + raise Exception("error! not support now!") + + c_p = self.init_channel * self.stem_multiplier + c_cur = self.init_channel + + + for layer_idx in range(self.layer_num): + reduction = self.layers_reduction[layer_idx] + super_layer = self.add_super_layer(c_cur, c_p, reduction, self.layers[layer_idx]) + super_layer_pool = self.add_super_layer(c_cur, c_p, reduction, self.augment_layers[layer_idx]) + + self.super_layers.append(super_layer) + self.super_layers_pool.append(super_layer_pool) + + if reduction: + c_cur = c_cur * 2 + else: + c_cur = c_cur + c_p = c_cur + + if layer_idx == self.layer_num-3: + self.distill_aux_c1 = c_p + if layer_idx == self.layer_num-2: + self.distill_aux_c2 = c_p + + self.fc_super = nn.Linear(c_p, self.num_classes) + if self.share_fc: + self.fc_nas = self.fc_super + else: + self.fc_nas = nn.Linear(c_p, self.num_classes) + + if self.use_ensemble_param: + self.ensemble_param = nn.Parameter(0.333*torch.rand(3), requires_grad=True) + else: + self.ensemble_param = nn.Parameter(0.333*torch.ones(3), requires_grad=False) + if self.model_type == 'cifar': + self.distill_aux_head1 = DistillHeadCIFAR(self.distill_aux_c1, 6, self.num_classes, bn_affine=self.bn_affine) + self.distill_aux_head2 = DistillHeadCIFAR(self.distill_aux_c2, 6, self.num_classes, bn_affine=self.bn_affine) + else: + raise Exception("error! not support now!") + + self._arch_parameters = nn.Parameter( 1e-3*torch.randn(self.num_edge, len(self.search_space)) ) + self.fix_structure() + + def fix_structure(self): + if self.fix_head: + for n, p in self.distill_aux_head1.named_parameters(): + p.requires_grad = False + for n, p in self.distill_aux_head2.named_parameters(): + p.requires_grad = False + + def build_nas_model(self, genotype): + c_p = self.init_channel * self.stem_multiplier + c_cur = self.init_channel + + for i in range(self.layer_num): + reduction = self.layers_reduction[i] + + self.nas_layers[i] = self.add_nas_layer(c_cur, c_p, reduction, genotype, self.augment_layers[i]) + + if reduction: + c_cur = c_cur * 2 + else: + c_cur = c_cur + c_p = c_cur + + def param_copy_plus(self, target_model, model): + if model: + for target_param, param in zip(target_model.parameters(), model.parameters()): + target_param.data.copy_(param.data) + + def param_copy_plus1(self, target_model, model): + model_dict_keys = model.state_dict().keys() + for n, p in target_model.named_parameters(): + if n in model_dict_keys: + p.data.copy_(model.state_dict()[n]) + + def copy_params_from_super_layer(self): + for layer_idx in range(self.layer_num): + super_layer = self.super_layers_pool[layer_idx] + nas_layer = self.nas_layers[layer_idx] + for super_cell, nas_cell in zip(super_layer, nas_layer): + if isinstance(super_cell, ResNetBasicblock) and isinstance(nas_cell, ResNetBasicblock): + self.param_copy_plus(nas_cell, super_cell) + else: + for edge_key, nas_op in zip(super_cell._modules['edges'].keys(), nas_cell._modules['layers']): + self.param_copy_plus(nas_op, super_cell._modules['edges'][edge_key][self.cell_connects[edge_key]]) + + def copy_params_from_nas_layer(self): + for layer_idx in range(self.layer_num): + super_layer = self.super_layers_pool[layer_idx] + nas_layer = self.nas_layers[layer_idx] + for super_cell, nas_cell in zip(super_layer, nas_layer): + if isinstance(super_cell, ResNetBasicblock) and isinstance(nas_cell, ResNetBasicblock): + self.param_copy_plus(super_cell, nas_cell) + else: + for edge_key, nas_op in zip(super_cell._modules['edges'].keys(), nas_cell._modules['layers']): + self.param_copy_plus(super_cell._modules['edges'][edge_key][self.cell_connects[edge_key]], nas_op) + + ######################## -------------------------- ######################## + ######################## Functions for layer search ######################## + ######################## -------------------------- ######################## + + def add_super_layer(self, C_cur, C_p, reduction_cur=False, cell_num=3): + cells = nn.ModuleList() + reduction_idx = cell_num - 1 + + for i in range(cell_num): + if i == reduction_idx and reduction_cur: + C_cur *= 2 + reduction = True + else: + reduction = False + + if reduction: + cell = ResNetBasicblock(C_p, C_cur, 2) + else: + cell = SearchCell(C_p, C_cur, 1, self.n_nodes, self.search_space, self.bn_affine, self.track_running_stats) + if self.num_edge is None: self.num_edge, self.edge2index = cell.num_edges, cell.edge2index + else: assert self.num_edge == cell.num_edges and self.edge2index == cell.edge2index, 'invalid {:} vs. {:}.'.format(self.num_edge, cell.num_edges) + + cells.append(cell) + C_p = cell.out_dim + + return cells + + ######################## ---------------------------- ######################## + ######################## Functions for layer generate ######################## + ######################## ---------------------------- ######################## + + def add_nas_layer(self, C_cur, C_p, reduction_cur, genotype, cell_num=3): + cells = nn.ModuleList() + reduction_idx = cell_num - 1 + + for i in range(cell_num): + if i == reduction_idx and reduction_cur: + C_cur *= 2 + reduction = True + else: + reduction = False + + if reduction: + cell = ResNetBasicblock(C_p, C_cur, 2, True) + else: + cell = InferCell(genotype, C_p, C_cur, 1) + + cells.append(cell) + C_p = cell.out_dim + + return cells + + ######################## ---------------------------- ######################## + ######################## Functions for stem ######################## + ######################## ---------------------------- ######################## + + def cifar_stem(self, init_channel): + C_in = self.c_in + C_cur = init_channel + feature_extractor = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(C_cur) + ) + feature_extractor.append(stem) + return feature_extractor + + + ######################## ---------------------------- ######################## + ######################## Functions for forward ######################## + ######################## ---------------------------- ######################## + + def extract_features(self, im): + # feature_extractor is nn.ModuleList() + if len(self.feature_extractor) == 1: + s0 = self.feature_extractor[0](im) + s1 = s0 + return [s0, s1] + elif len(self.feature_extractor) == 2: + s0 = self.feature_extractor[0](im) + s1 = self.feature_extractor[1](s0) + return [s0, s1] + else: + raise NotImplementedError + + def get_aux_logits(self, idx, s1): + if idx == self.layer_num-3: + return self.distill_aux_head1(s1) + if idx == self.layer_num-2: + return self.distill_aux_head2(s1) + return None + + def forward(self, x, super_flag=True, updateType='alpha'): + + if super_flag: + super_layers = self.super_layers + nas_layers_num = 0 + super_layers_num = len(self.super_layers) + else: + nas_layers = self.nas_layers + nas_layers_num = len(self.nas_layers) + super_layers_num = 0 + + outputs = [] + s0, s1 = self.extract_features(x) + + for i in range(nas_layers_num): + s1 = self.forward_nas_layer(s1, nas_layers[i]) + logit = self.get_aux_logits(i, s1) + if logit is not None: + outputs.append(logit) + + for j in range(super_layers_num): + k = nas_layers_num + j + s1 = self.forward_super_layer(s1, super_layers[k], updateType) + logit = self.get_aux_logits(k, s1) + if logit is not None: + outputs.append(logit) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + if super_flag: + logits = self.fc_super(out) + else: + logits = self.fc_nas(out) + + outputs.append(logits) + logits_output = logits + + ensemble_param = F.softmax(self.ensemble_param, dim=0) + if self.ensemble_sum: + em_output = ensemble_param[0] * outputs[0] + ensemble_param[1] * outputs[1] + ensemble_param[2] * outputs[2] + else: + em_output = torch.cat((ensemble_param[0] * outputs[0], ensemble_param[1] * outputs[1], ensemble_param[2] * outputs[2]), 0) + + return logits_output, em_output + + def forward_super_layer(self, s1, super_layer, updateType='alpha'): + if updateType == 'weight': + alphas = self._arch_parameters + else: + alphas = F.softmax(self._arch_parameters, dim=-1) + + for cell in super_layer: + if isinstance(cell, SearchCell): + s1 = cell(s1, alphas) + else: + s1 = cell(s1) + return s1 + + def forward_nas_layer(self, s1, nas_layer): + for cell in nas_layer: + s1 = cell(s1) + return s1 + + def loss(self, X, y): + logits = self.forward(X) + return self.criterion(logits, y) + + def genotype(self): + genotypes = [] + for i in range(1, self.n_nodes): + xlist = [] + for j in range(i): + node_str = '{:}<-{:}'.format(i, j) + with torch.no_grad(): + weights = self._arch_parameters[ self.edge2index[node_str] ] + op_name = self.op_names[ weights.argmax().item() ] + self.cell_connects[node_str] = weights.argmax().item() + xlist.append((op_name, j)) + genotypes.append( tuple(xlist) ) + self.nas_genotype = Structure(genotypes) + return self.nas_genotype + + def show_alphas(self): + with torch.no_grad(): + return 'arch-parameters :\n{:}'.format( nn.functional.softmax(self._arch_parameters, dim=-1).cpu()) + + def get_message(self): + string = self.extra_repr() + for i, cell in enumerate(self.cells): + string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr()) + return string + + def _save_arch_parameters(self): + self._saved_arch_parameters = self._arch_parameters.clone() + + def softmax_arch_parameters(self): + self._save_arch_parameters() + self._arch_parameters.data.copy_(F.softmax(self._arch_parameters, dim=-1)) + + def restore_arch_parameters(self): + self._arch_parameters.data.copy_(self._saved_arch_parameters) + del self._saved_arch_parameters + + def arch_parameters(self): + return [self._arch_parameters] + + def l1_loss(self): + return torch.mean(torch.abs(self._arch_parameters[:, 0:1])) + + diff --git a/benchmark201/models/loss.py b/benchmark201/models/loss.py new file mode 100644 index 0000000..5dfbc91 --- /dev/null +++ b/benchmark201/models/loss.py @@ -0,0 +1,36 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +cos = nn.CosineSimilarity(dim=1, eps=1e-6) +mse = nn.MSELoss() +smooth_l1 = nn.SmoothL1Loss() + +class CrossEntropyLabelSmooth(nn.Module): + + def __init__(self, num_classes, epsilon): + super(CrossEntropyLabelSmooth, self).__init__() + self.num_classes = num_classes + self.epsilon = epsilon + self.logsoftmax = nn.LogSoftmax(dim=1) + + def forward(self, inputs, targets): + log_probs = self.logsoftmax(inputs) + targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1) + targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes + loss = (-targets * log_probs).mean(0).sum() + return loss + +def Loss_interactive(outputs, teacher_outputs, T=2, interactive_type=0): + if interactive_type==0: + loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1), F.softmax(teacher_outputs/T, dim=1)) + elif interactive_type==1: + # Cosine distance + loss = -torch.mean(cos(outputs, teacher_outputs)) + elif interactive_type==2: + loss = mse(outputs, teacher_outputs) + elif interactive_type == 3: + loss = smooth_l1(outputs, teacher_outputs) + else: + raise Exception("Wrong interactive type!") + return loss * (T * T) diff --git a/benchmark201/models/model_augment.py b/benchmark201/models/model_augment.py new file mode 100644 index 0000000..2956fc7 --- /dev/null +++ b/benchmark201/models/model_augment.py @@ -0,0 +1,48 @@ +import torch +import torch.nn as nn + +class ModelAug(nn.Module): + + def __init__(self, feature_extractor, nas_layers, fc_layer, n_nodes=4, aux_head=None): + """ + args: + + """ + super(ModelAug, self).__init__() + self.feature_extractor = feature_extractor + + self.nas_layers = nas_layers + self.nas_layers_num = len(nas_layers) + self.fc = fc_layer + self.aux_head = aux_head + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + logits_aux = None + if len(self.feature_extractor) == 1: + s0 = self.feature_extractor[0](x) + s1 = s0 + elif len(self.feature_extractor) == 2: + s0 = self.feature_extractor[0](x) + s1 = self.feature_extractor[1](s0) + else: + raise NotImplementedError + + for i in range(self.nas_layers_num): + s1 = self.forward_nas_layer(s1, self.nas_layers[i]) + # if i == (self.nas_layers_num * 2 // 3 - 1): + if i == (self.nas_layers_num - 2): + if self.training: + logits_aux = self.aux_head(s1) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.fc(out) + return logits, logits_aux + + def forward_nas_layer(self, s1, nas_layer): + + for cell in nas_layer: + s1 = cell(s1) + return s1 + diff --git a/benchmark201/models/model_test.py b/benchmark201/models/model_test.py new file mode 100644 index 0000000..c2cbbfa --- /dev/null +++ b/benchmark201/models/model_test.py @@ -0,0 +1,169 @@ +import torch +import torch.nn as nn +from models import ops +from models.augment_cells import AugmentCell + +class ModelTest(nn.Module): + + def __init__(self, genotypes_dict, model_type, res_stem=False, init_channel=96, stem_multiplier=3, n_nodes=4, num_classes=1000): + """ + args: + + """ + super(ModelTest, self).__init__() + self.c_in = 3 + self.init_channel = init_channel + self.stem_multiplier = stem_multiplier + self.num_classes = num_classes + self.n_nodes = n_nodes + self.model_type = model_type + self.res_stem = res_stem + + if self.model_type == 'cifar': + reduction_p = False + self.layers_reduction = [True, True, False] + self.augment_layers = [7, 7, 6] + self.nas_layers = nn.ModuleList([None, None, None]) + self.feature_extractor = self.cifar_stem(self.init_channel * self.stem_multiplier) + + elif self.model_type == 'imagenet': + if self.res_stem: + reduction_p = False + self.nas_layers = nn.ModuleList([None, None, None, None]) + self.layers_reduction = [False, True, True, True] + self.augment_layers = [3, 4, 3, 4] + self.feature_extractor = self.resnet_stem(self.init_channel * self.stem_multiplier) + else: + reduction_p = True + self.nas_layers = nn.ModuleList([None, None, None]) + self.layers_reduction = [True, True, False] + self.augment_layers = [5, 5, 4] + self.feature_extractor = self.imagenet_stem(self.init_channel * self.stem_multiplier) + else: + raise Exception("Wrong model type!") + + self.nas_layers_num = len(self.nas_layers) + c_p = self.init_channel * self.stem_multiplier + c_pp = self.init_channel * self.stem_multiplier + c_cur = self.init_channel + + for layer_idx, genotype in genotypes_dict.items(): + reduction = self.layers_reduction[layer_idx] + nas_layer = self.generate_nas_layer(c_cur, c_p, c_pp, reduction_p, reduction, genotype, self.augment_layers[layer_idx]) + self.nas_layers[layer_idx] = nas_layer + + if reduction: + c_p = c_cur * 2 * self.n_nodes + else: + c_p = c_cur * self.n_nodes + + if self.res_stem: + c_pp = c_p + reduction_p = False + else: + c_pp = c_cur * self.n_nodes + reduction_p = reduction + + if reduction: + c_cur = c_cur * 2 + else: + c_cur = c_cur + + self.fc = nn.Linear(c_p, self.num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def generate_nas_layer(self, C_cur, C_p, C_pp, reduction_p, reduction_cur, genotype, cell_num=3, bn_affine=True): + cells = nn.ModuleList() + if self.res_stem: + reduction_idx = 0 + else: + reduction_idx = cell_num - 1 + + for i in range(cell_num): + if i == reduction_idx and reduction_cur: + C_cur *= 2 + reduction = True + else: + reduction = False + + cell = AugmentCell(genotype, C_pp, C_p, C_cur, reduction_p, reduction, bn_affine) + reduction_p = reduction + cells.append(cell) + C_cur_out = C_cur * len(cell.concat) + C_pp, C_p = C_p, C_cur_out + + return cells + + def forward(self, x): + s0, s1 = self.extract_features(x) + for i in range(self.nas_layers_num): + s0, s1 = self.forward_nas_layer(s0, s1, self.nas_layers[i]) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.fc(out) + return logits, logits + + def forward_nas_layer(self, s0, s1, nas_layer): + for cell in nas_layer: + s0, s1 = s1, cell(s0, s1) + return s0, s1 + + def extract_features(self, im): + # feature_extractor is nn.ModuleList() + if len(self.feature_extractor) == 1: + s0 = self.feature_extractor[0](im) + s1 = s0 + return [s0, s1] + elif len(self.feature_extractor) == 2: + s0 = self.feature_extractor[0](im) + s1 = self.feature_extractor[1](s0) + return [s0, s1] + else: + raise NotImplementedError + + def resnet_stem(self, inplanes=64): + C_in = self.c_in + feature_extractor = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(C_in, inplanes, kernel_size=7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(inplanes), + nn.ReLU(inplace=True), + # the layer1 is concated with maxpool + nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + ) + feature_extractor.append(stem) + return feature_extractor + + def cifar_stem(self, init_channel): + C_in = self.c_in + C_cur = init_channel + feature_extractor = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(C_cur) + ) + feature_extractor.append(stem) + return feature_extractor + + def imagenet_stem(self, init_channel): + C_in = self.c_in + C_cur = init_channel + feature_extractor = nn.ModuleList() + stem0 = nn.Sequential( + nn.Conv2d(C_in, C_cur // 2, kernel_size=3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur // 2), + nn.ReLU(inplace=True), + nn.Conv2d(C_cur // 2, C_cur, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur), + ) + + stem1 = nn.Sequential( + nn.ReLU(inplace=True), + nn.Conv2d(C_cur, C_cur, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur), + ) + feature_extractor.append(stem0) + feature_extractor.append(stem1) + return feature_extractor + diff --git a/benchmark201/models/ops.py b/benchmark201/models/ops.py new file mode 100644 index 0000000..d8a7fb6 --- /dev/null +++ b/benchmark201/models/ops.py @@ -0,0 +1,184 @@ +import torch +import torch.nn as nn + +__all__ = ['OPS', 'ResNetBasicblock', 'SearchSpaceNames'] + +OPS = { + 'none' : lambda C_in, C_out, stride, affine, track_running_stats: Zero(C_in, C_out, stride), + 'avg_pool_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: POOLING(C_in, C_out, stride, 'avg', affine, track_running_stats), + 'max_pool_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: POOLING(C_in, C_out, stride, 'max', affine, track_running_stats), + 'nor_conv_7x7' : lambda C_in, C_out, stride, affine, track_running_stats: ReLUConvBN(C_in, C_out, (7,7), (stride,stride), (3,3), (1,1), affine, track_running_stats), + 'nor_conv_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: ReLUConvBN(C_in, C_out, (3,3), (stride,stride), (1,1), (1,1), affine, track_running_stats), + 'nor_conv_1x1' : lambda C_in, C_out, stride, affine, track_running_stats: ReLUConvBN(C_in, C_out, (1,1), (stride,stride), (0,0), (1,1), affine, track_running_stats), + 'dua_sepc_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: DualSepConv(C_in, C_out, (3,3), (stride,stride), (1,1), (1,1), affine, track_running_stats), + 'dua_sepc_5x5' : lambda C_in, C_out, stride, affine, track_running_stats: DualSepConv(C_in, C_out, (5,5), (stride,stride), (2,2), (1,1), affine, track_running_stats), + 'dil_sepc_3x3' : lambda C_in, C_out, stride, affine, track_running_stats: SepConv(C_in, C_out, (3,3), (stride,stride), (2,2), (2,2), affine, track_running_stats), + 'dil_sepc_5x5' : lambda C_in, C_out, stride, affine, track_running_stats: SepConv(C_in, C_out, (5,5), (stride,stride), (4,4), (2,2), affine, track_running_stats), + 'skip_connect' : lambda C_in, C_out, stride, affine, track_running_stats: Identity() if stride == 1 and C_in == C_out else FactorizedReduce(C_in, C_out, stride, affine, track_running_stats), +} + +CONNECT_NAS_BENCHMARK = ['none', 'skip_connect', 'nor_conv_3x3'] +NAS_BENCH_201 = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3'] +DARTS_SPACE = ['none', 'skip_connect', 'dua_sepc_3x3', 'dua_sepc_5x5', 'dil_sepc_3x3', 'dil_sepc_5x5', 'avg_pool_3x3', 'max_pool_3x3'] + +SearchSpaceNames = {'connect-nas' : CONNECT_NAS_BENCHMARK, + 'nas-bench-201': NAS_BENCH_201, + 'darts' : DARTS_SPACE} + + +class ReLUConvBN(nn.Module): + + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine, track_running_stats=True): + super(ReLUConvBN, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False), + nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats) + ) + + def forward(self, x): + return self.op(x) + + +class SepConv(nn.Module): + + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine, track_running_stats=True): + super(SepConv, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False), + nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats), + ) + + def forward(self, x): + return self.op(x) + + +class DualSepConv(nn.Module): + + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine, track_running_stats=True): + super(DualSepConv, self).__init__() + self.op_a = SepConv(C_in, C_in , kernel_size, stride, padding, dilation, affine, track_running_stats) + self.op_b = SepConv(C_in, C_out, kernel_size, 1, padding, dilation, affine, track_running_stats) + + def forward(self, x): + x = self.op_a(x) + x = self.op_b(x) + return x + + +class ResNetBasicblock(nn.Module): + + def __init__(self, inplanes, planes, stride, affine=True): + super(ResNetBasicblock, self).__init__() + assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride) + self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1, affine) + self.conv_b = ReLUConvBN( planes, planes, 3, 1, 1, 1, affine) + if stride == 2: + self.downsample = nn.Sequential( + nn.AvgPool2d(kernel_size=2, stride=2, padding=0), + nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)) + elif inplanes != planes: + self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1, affine) + else: + self.downsample = None + self.in_dim = inplanes + self.out_dim = planes + self.stride = stride + self.num_conv = 2 + + def extra_repr(self): + string = '{name}(inC={in_dim}, outC={out_dim}, stride={stride})'.format(name=self.__class__.__name__, **self.__dict__) + return string + + def forward(self, inputs): + + basicblock = self.conv_a(inputs) + basicblock = self.conv_b(basicblock) + + if self.downsample is not None: + residual = self.downsample(inputs) + else: + residual = inputs + return residual + basicblock + + +class POOLING(nn.Module): + + def __init__(self, C_in, C_out, stride, mode, affine=True, track_running_stats=True): + super(POOLING, self).__init__() + if C_in == C_out: + self.preprocess = None + else: + self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, affine, track_running_stats) + if mode == 'avg' : self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) + elif mode == 'max': self.op = nn.MaxPool2d(3, stride=stride, padding=1) + else : raise ValueError('Invalid mode={:} in POOLING'.format(mode)) + + def forward(self, inputs): + if self.preprocess: x = self.preprocess(inputs) + else : x = inputs + return self.op(x) + + +class Identity(nn.Module): + + def __init__(self): + super(Identity, self).__init__() + + def forward(self, x): + return x + + +class Zero(nn.Module): + + def __init__(self, C_in, C_out, stride): + super(Zero, self).__init__() + self.C_in = C_in + self.C_out = C_out + self.stride = stride + self.is_zero = True + + def forward(self, x): + if self.C_in == self.C_out: + if self.stride == 1: return x.mul(0.) + else : return x[:,:,::self.stride,::self.stride].mul(0.) + else: + shape = list(x.shape) + shape[1] = self.C_out + zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device) + return zeros + + def extra_repr(self): + return 'C_in={C_in}, C_out={C_out}, stride={stride}'.format(**self.__dict__) + + +class FactorizedReduce(nn.Module): + + def __init__(self, C_in, C_out, stride, affine, track_running_stats): + super(FactorizedReduce, self).__init__() + self.stride = stride + self.C_in = C_in + self.C_out = C_out + self.relu = nn.ReLU(inplace=False) + if stride == 2: + #assert C_out % 2 == 0, 'C_out : {:}'.format(C_out) + C_outs = [C_out // 2, C_out - C_out // 2] + self.convs = nn.ModuleList() + for i in range(2): + self.convs.append( nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False) ) + self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0) + else: + raise ValueError('Invalid stride : {:}'.format(stride)) + self.bn = nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats) + + def forward(self, x): + x = self.relu(x) + y = self.pad(x) + out = torch.cat([self.convs[0](x), self.convs[1](y[:,:,1:,1:])], dim=1) + out = self.bn(out) + return out + + def extra_repr(self): + return 'C_in={C_in}, C_out={C_out}, stride={stride}'.format(**self.__dict__) \ No newline at end of file diff --git a/benchmark201/models/search_cells.py b/benchmark201/models/search_cells.py new file mode 100644 index 0000000..a471bae --- /dev/null +++ b/benchmark201/models/search_cells.py @@ -0,0 +1,117 @@ +""" CNN cell for architecture search """ +import torch +import torch.nn as nn +from copy import deepcopy +from models.ops import ResNetBasicblock, OPS, NAS_BENCH_201 +from utils.genotypes import Structure + +# This module is used for NAS-Bench-201, represents a small search space with a complete DAG +class SearchCell(nn.Module): + + def __init__(self, C_in, C_out, stride, max_nodes, op_names, affine=False, track_running_stats=True): + super(SearchCell, self).__init__() + + self.op_names = deepcopy(op_names) + self.edges = nn.ModuleDict() + self.max_nodes = max_nodes + self.in_dim = C_in + self.out_dim = C_out + for i in range(1, max_nodes): + for j in range(i): + node_str = '{:}<-{:}'.format(i, j) + if j == 0: + xlists = [OPS[op_name](C_in , C_out, stride, affine, track_running_stats) for op_name in op_names] + else: + xlists = [OPS[op_name](C_in , C_out, 1, affine, track_running_stats) for op_name in op_names] + self.edges[ node_str ] = nn.ModuleList( xlists ) + self.edge_keys = sorted(list(self.edges.keys())) + self.edge2index = {key:i for i, key in enumerate(self.edge_keys)} + self.num_edges = len(self.edges) + + def extra_repr(self): + string = 'info :: {max_nodes} nodes, inC={in_dim}, outC={out_dim}'.format(**self.__dict__) + return string + + def forward(self, inputs, weightss): + nodes = [inputs] + for i in range(1, self.max_nodes): + inter_nodes = [] + for j in range(i): + node_str = '{:}<-{:}'.format(i, j) + weights = weightss[ self.edge2index[node_str] ] + inter_nodes.append( sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) ) + nodes.append( sum(inter_nodes) ) + return nodes[-1] + + # GDAS + def forward_gdas(self, inputs, hardwts, index): + nodes = [inputs] + for i in range(1, self.max_nodes): + inter_nodes = [] + for j in range(i): + node_str = '{:}<-{:}'.format(i, j) + weights = hardwts[ self.edge2index[node_str] ] + argmaxs = index[ self.edge2index[node_str] ].item() + weigsum = sum( weights[_ie] * edge(nodes[j]) if _ie == argmaxs else weights[_ie] for _ie, edge in enumerate(self.edges[node_str]) ) + inter_nodes.append( weigsum ) + nodes.append( sum(inter_nodes) ) + return nodes[-1] + + # joint + def forward_joint(self, inputs, weightss): + nodes = [inputs] + for i in range(1, self.max_nodes): + inter_nodes = [] + for j in range(i): + node_str = '{:}<-{:}'.format(i, j) + weights = weightss[ self.edge2index[node_str] ] + #aggregation = sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) / weights.numel() + aggregation = sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) + inter_nodes.append( aggregation ) + nodes.append( sum(inter_nodes) ) + return nodes[-1] + + # uniform random sampling per iteration, SETN + def forward_urs(self, inputs): + nodes = [inputs] + for i in range(1, self.max_nodes): + while True: # to avoid select zero for all ops + sops, has_non_zero = [], False + for j in range(i): + node_str = '{:}<-{:}'.format(i, j) + candidates = self.edges[node_str] + select_op = random.choice(candidates) + sops.append( select_op ) + if not hasattr(select_op, 'is_zero') or select_op.is_zero is False: has_non_zero=True + if has_non_zero: break + inter_nodes = [] + for j, select_op in enumerate(sops): + inter_nodes.append( select_op(nodes[j]) ) + nodes.append( sum(inter_nodes) ) + return nodes[-1] + + # select the argmax + def forward_select(self, inputs, weightss): + nodes = [inputs] + for i in range(1, self.max_nodes): + inter_nodes = [] + for j in range(i): + node_str = '{:}<-{:}'.format(i, j) + weights = weightss[ self.edge2index[node_str] ] + inter_nodes.append( self.edges[node_str][ weights.argmax().item() ]( nodes[j] ) ) + #inter_nodes.append( sum( layer(nodes[j]) * w for layer, w in zip(self.edges[node_str], weights) ) ) + nodes.append( sum(inter_nodes) ) + return nodes[-1] + + # forward with a specific structure + def forward_dynamic(self, inputs, structure): + nodes = [inputs] + for i in range(1, self.max_nodes): + cur_op_node = structure.nodes[i-1] + inter_nodes = [] + for op_name, j in cur_op_node: + node_str = '{:}<-{:}'.format(i, j) + op_index = self.op_names.index( op_name ) + inter_nodes.append( self.edges[node_str][op_index]( nodes[j] ) ) + nodes.append( sum(inter_nodes) ) + return nodes[-1] diff --git a/benchmark201/run_search_cifar_1gpu.sh b/benchmark201/run_search_cifar_1gpu.sh new file mode 100644 index 0000000..6118043 --- /dev/null +++ b/benchmark201/run_search_cifar_1gpu.sh @@ -0,0 +1,20 @@ +NGPUS=1 +SGPU=0 +EGPU=$[NGPUS+SGPU-1] +GPU_ID=`seq -s , $SGPU $EGPU` +CUDA_VISIBLE_DEVICES=$GPU_ID python -m torch.distributed.launch --nproc_per_node=$NGPUS search.py \ + --name cifar10-search --dataset cifar10 --model_type cifar \ + --n_classes 10 --init_channels 16 --layer_num 3 --stem_multiplier 1 \ + --batch_size 64 --sample_ratio 1.0 \ + --workers 1 --print_freq 10 \ + --distributed --world_size $NGPUS --dist_url 'tcp://127.0.0.1:23343' \ + --use_apex --sync_param \ + --regular --regular_ratio 0.667 --regular_coeff 5 \ + --clean_arch --loss_alpha 1 \ + --ensemble_param \ + --w_lr 0.08 --alpha_lr 3e-4 --nasnet_lr 0.08 \ + --w_weight_decay 3e-4 --alpha_weight_decay 0. \ + --one_stage --repeat_cell \ + --interactive_type 3 \ + --pretrain_epochs 0 --pretrain_decay 0 \ + --search_iter 50 --search_iter_epochs 1 --nasnet_warmup 1 diff --git a/benchmark201/search.py b/benchmark201/search.py new file mode 100644 index 0000000..2841ba4 --- /dev/null +++ b/benchmark201/search.py @@ -0,0 +1,250 @@ +""" Search cell """ +import os +import copy +import apex +import json +import torch +import time +import math +import torch.nn as nn +import numpy as np +import torch.distributed as dist + +from tensorboardX import SummaryWriter +from models.cdarts_controller import CDARTSController +from utils.visualize import plot +from utils import utils +from datasets.data_utils import SubsetDistributedSampler +from core.search_function import search, retrain_warmup, validate +from nas_201_api import NASBench201API as API + +from configs.config import SearchConfig +config = SearchConfig() + +if 'cifar' in config.dataset: + from datasets.cifar import get_search_datasets +elif 'imagenet' in config.dataset: + from datasets.imagenet import get_search_datasets + +# tensorboard +writer = SummaryWriter(log_dir=os.path.join(config.path, "tb")) +writer.add_text('config', config.as_markdown(), 0) + +logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name))) +if config.local_rank == 0: + config.print_params(logger.info) + +try: + os.makedirs(config.plot_path) +except: + pass + +if config.use_apex: + import apex + from apex.parallel import DistributedDataParallel as DDP +else: + DDP = torch.nn.parallel.DistributedDataParallel + + +def main(): + logger.info("Logger is set - training start") + + + # set seed + np.random.seed(config.seed) + torch.manual_seed(config.seed) + torch.cuda.manual_seed_all(config.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = True + + # TODO + # api = None + api = API('/home/hongyuan/benchmark/NAS-Bench-201-v1_0-e61699.pth') + + if config.distributed: + config.gpu = config.local_rank % torch.cuda.device_count() + torch.cuda.set_device(config.gpu) + # distributed init + torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url, + world_size=config.world_size, rank=config.local_rank) + + config.world_size = torch.distributed.get_world_size() + + config.total_batch_size = config.world_size * config.batch_size + else: + config.total_batch_size = config.batch_size + + + loaders, samplers = get_search_datasets(config) + train_loader, valid_loader = loaders + train_sampler, valid_sampler = samplers + + net_crit = nn.CrossEntropyLoss().cuda() + controller = CDARTSController(config, net_crit, n_nodes=4, stem_multiplier=config.stem_multiplier) + + resume_state = None + if config.resume: + resume_state = torch.load(config.resume_path, map_location='cpu') + + if config.resume: + controller.load_state_dict(resume_state['controller']) + + controller = controller.cuda() + if config.sync_bn: + if config.use_apex: + controller = apex.parallel.convert_syncbn_model(controller) + else: + controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller) + + if config.use_apex: + controller = DDP(controller, delay_allreduce=True) + else: + controller = DDP(controller, device_ids=[config.gpu]) + + # warm up model_search + if config.ensemble_param: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers.parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.ensemble_param}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + else: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers.parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + + + # search training loop + sta_search_iter = 0 + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min) + lr_scheduler_retrain = nn.ModuleList() + alpha_optim = nn.ModuleList() + optimizer = nn.ModuleList() + sub_epoch = 0 + + for search_iter in range(sta_search_iter, config.search_iter): + if search_iter < config.pretrain_epochs: + if config.local_rank == 0: + logger.info("####### Super model warmup #######") + train_sampler.set_epoch(search_iter) + retrain_warmup(train_loader, controller, w_optim, search_iter, writer, logger, True, config.pretrain_epochs, config) + #lr_scheduler.step() + else: + # build new controller + genotype = controller.module.genotype() + controller.module.build_nas_model(genotype) + + controller_b = copy.deepcopy(controller.module) + del controller + controller = controller_b.cuda() + + # sync params from super layer pool + controller.copy_params_from_super_layer() + + if config.sync_bn: + if config.use_apex: + controller = apex.parallel.convert_syncbn_model(controller) + else: + controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller) + + if config.use_apex: + controller = DDP(controller, delay_allreduce=True) + else: + controller = DDP(controller, device_ids=[config.gpu]) + + # weights optimizer + if config.ensemble_param: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers.parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.ensemble_param}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + else: + w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.super_layers.parameters()}, + {"params": controller.module.fc_super.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}], + lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + # arch_params optimizer + alpha_optim = torch.optim.Adam(controller.module.arch_parameters(), config.alpha_lr, betas=(0.5, 0.999), + weight_decay=config.alpha_weight_decay) + + + if config.ensemble_param: + optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.nas_layers.parameters()}, + {"params": controller.module.ensemble_param}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.fc_nas.parameters()}], + lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + else: + optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()}, + {"params": controller.module.nas_layers.parameters()}, + {"params": controller.module.distill_aux_head1.parameters()}, + {"params": controller.module.distill_aux_head2.parameters()}, + {"params": controller.module.fc_nas.parameters()}], + lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay) + + lr_scheduler_retrain = torch.optim.lr_scheduler.CosineAnnealingLR( + optimizer, config.search_iter_epochs, eta_min=config.w_lr_min) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min) + + # warmup model main + if config.local_rank == 0: + logger.info("####### Sub model warmup #######") + for warmup_epoch in range(config.nasnet_warmup): + valid_sampler.set_epoch(warmup_epoch) + retrain_warmup(valid_loader, controller, optimizer, warmup_epoch, writer, logger, False, config.nasnet_warmup, config) + + + lr_search = lr_scheduler.get_lr()[0] + lr_main = lr_scheduler_retrain.get_lr()[0] + + search_epoch = search_iter + + # reset iterators + train_sampler.set_epoch(search_epoch) + valid_sampler.set_epoch(search_epoch) + + # training + search(train_loader, valid_loader, controller, optimizer, w_optim, alpha_optim, search_epoch, writer, logger, config) + + # sync params to super layer pool + controller.module.copy_params_from_nas_layer() + + # nasbench201 + if config.local_rank == 0: + logger.info('{}'.format(controller.module._arch_parameters)) + result = api.query_by_arch(controller.module.genotype()) + logger.info('{:}'.format(result)) + cifar10_train, cifar10_test, cifar100_train, cifar100_valid, \ + cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test = utils.distill(result) + + writer.add_scalars('nasbench201/cifar10', {'train':cifar10_train,'test':cifar10_test}, search_epoch) + writer.add_scalars('nasbench201/cifar100', {'train':cifar100_train,'valid':cifar100_valid, 'test':cifar100_test}, search_epoch) + writer.add_scalars('nasbench201/imagenet16', {'train':imagenet16_train,'valid':imagenet16_valid, 'test':imagenet16_test}, search_epoch) + + + #lr_scheduler.step() + #lr_scheduler_retrain.step() + torch.cuda.empty_cache() + +if __name__ == "__main__": + sta_time = time.time() + main() + search_time = time.time() - sta_time + search_hour = math.floor(search_time / 3600) + search_min = math.floor(search_time / 60 - search_hour * 60) + if config.local_rank==0: + logger.info("Search time: hour: {} minute: {}".format(search_hour, search_min)) diff --git a/benchmark201/search/cifar10-search/cifar10-search.log b/benchmark201/search/cifar10-search/cifar10-search.log new file mode 100644 index 0000000..955e7b6 --- /dev/null +++ b/benchmark201/search/cifar10-search/cifar10-search.log @@ -0,0 +1,7157 @@ +10/22 12:30:18 AM | +10/22 12:30:18 AM | Parameters: +10/22 12:30:18 AM | ALPHA_LR=0.0006 +10/22 12:30:18 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:30:18 AM | AUX_WEIGHT=0.4 +10/22 12:30:18 AM | BATCH_SIZE=128 +10/22 12:30:18 AM | CELLS_NUM=3 +10/22 12:30:18 AM | CLEAN_ARCH=False +10/22 12:30:18 AM | CUTOUT_LENGTH=16 +10/22 12:30:18 AM | DATA_DIR=/data/cifar +10/22 12:30:18 AM | DATA_PATH=./data/ +10/22 12:30:18 AM | DATASET=imagenet +10/22 12:30:18 AM | DIST_URL=tcp://127.0.0.1:23456 +10/22 12:30:18 AM | DISTRIBUTED=False +10/22 12:30:18 AM | DROP_PATH_PROB=0.2 +10/22 12:30:18 AM | ENSEMBLE=False +10/22 12:30:18 AM | GPUS=[0] +10/22 12:30:18 AM | INIT_CHANNELS=16 +10/22 12:30:18 AM | INPUT_CHANNELS=3 +10/22 12:30:18 AM | LAYER_NUM=3 +10/22 12:30:18 AM | LOCAL_RANK=0 +10/22 12:30:18 AM | LR_RATIO=0.5 +10/22 12:30:18 AM | MODEL_TYPE=cifar +10/22 12:30:18 AM | N_CLASSES=10 +10/22 12:30:18 AM | NAME=cifar10-search +10/22 12:30:18 AM | NO_REPEAT=False +10/22 12:30:18 AM | PATH=searchs/cifar10-search +10/22 12:30:18 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:30:18 AM | PRETRAIN_DECAY=5 +10/22 12:30:18 AM | PRETRAIN_EPOCHS=5 +10/22 12:30:18 AM | PRINT_FREQ=50 +10/22 12:30:18 AM | RETRAIN_EPOCHS=25 +10/22 12:30:18 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:30:18 AM | RETRAIN_SETTING=0 +10/22 12:30:18 AM | RETRAIN_UPDATE_W=False +10/22 12:30:18 AM | SAME_STRUCTURE=False +10/22 12:30:18 AM | SAMPLE_RATIO=0.2 +10/22 12:30:18 AM | SEARCH_ITER=5 +10/22 12:30:18 AM | SEARCH_ITER_EPOCHS=5 +10/22 12:30:18 AM | SEED=0 +10/22 12:30:18 AM | SHORT_CONNECT=False +10/22 12:30:18 AM | SYNC_PARAM=False +10/22 12:30:18 AM | TEACHER2STUDENT=False +10/22 12:30:18 AM | TEST_DIR=/data/imagenet/val +10/22 12:30:18 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:30:18 AM | TRAIN_PORTION=0.5 +10/22 12:30:18 AM | UNROLLED=False +10/22 12:30:18 AM | USE_BETA=False +10/22 12:30:18 AM | VAL_DIR=/data/imagenet/train +10/22 12:30:18 AM | W_GRAD_CLIP=5.0 +10/22 12:30:18 AM | W_LR=0.05 +10/22 12:30:18 AM | W_LR_MIN=0.001 +10/22 12:30:18 AM | W_MOMENTUM=0.9 +10/22 12:30:18 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:30:18 AM | WORKERS=4 +10/22 12:30:18 AM | WORLD_SIZE=1 +10/22 12:30:18 AM | +10/22 12:30:18 AM | Logger is set - training start +10/22 12:31:35 AM | +10/22 12:31:35 AM | Parameters: +10/22 12:31:35 AM | ALPHA_LR=0.0003 +10/22 12:31:35 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:31:35 AM | AUX_WEIGHT=0.4 +10/22 12:31:35 AM | BATCH_SIZE=64 +10/22 12:31:35 AM | CELLS_NUM=3 +10/22 12:31:35 AM | CLEAN_ARCH=True +10/22 12:31:35 AM | CUTOUT_LENGTH=16 +10/22 12:31:35 AM | DATA_DIR=/data/cifar +10/22 12:31:35 AM | DATA_PATH=./data/ +10/22 12:31:35 AM | DATASET=cifar10 +10/22 12:31:35 AM | DIST_URL='tcp://127.0.0.1:23343' +10/22 12:31:35 AM | DISTRIBUTED=True +10/22 12:31:35 AM | DROP_PATH_PROB=0.2 +10/22 12:31:35 AM | ENSEMBLE=True +10/22 12:31:35 AM | GPUS=[0] +10/22 12:31:35 AM | INIT_CHANNELS=16 +10/22 12:31:35 AM | INPUT_CHANNELS=3 +10/22 12:31:35 AM | LAYER_NUM=3 +10/22 12:31:35 AM | LOCAL_RANK=0 +10/22 12:31:35 AM | LR_RATIO=0.5 +10/22 12:31:35 AM | MODEL_TYPE=cifar +10/22 12:31:35 AM | N_CLASSES=10 +10/22 12:31:35 AM | NAME=cifar10-search +10/22 12:31:35 AM | NO_REPEAT=False +10/22 12:31:35 AM | PATH=searchs/cifar10-search +10/22 12:31:35 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:31:35 AM | PRETRAIN_DECAY=0 +10/22 12:31:35 AM | PRETRAIN_EPOCHS=0 +10/22 12:31:35 AM | PRINT_FREQ=10 +10/22 12:31:35 AM | RETRAIN_EPOCHS=1 +10/22 12:31:35 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:31:35 AM | RETRAIN_SETTING=0 +10/22 12:31:35 AM | RETRAIN_UPDATE_W=True +10/22 12:31:35 AM | SAME_STRUCTURE=True +10/22 12:31:35 AM | SAMPLE_RATIO=0.2 +10/22 12:31:35 AM | SEARCH_ITER=25 +10/22 12:31:35 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:31:35 AM | SEED=0 +10/22 12:31:35 AM | SHORT_CONNECT=False +10/22 12:31:35 AM | SYNC_PARAM=True +10/22 12:31:35 AM | TEACHER2STUDENT=True +10/22 12:31:35 AM | TEST_DIR=/data/imagenet/val +10/22 12:31:35 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:31:35 AM | TRAIN_PORTION=0.5 +10/22 12:31:35 AM | UNROLLED=False +10/22 12:31:35 AM | USE_BETA=True +10/22 12:31:35 AM | VAL_DIR=/data/imagenet/train +10/22 12:31:35 AM | W_GRAD_CLIP=5.0 +10/22 12:31:35 AM | W_LR=0.05 +10/22 12:31:35 AM | W_LR_MIN=0.001 +10/22 12:31:35 AM | W_MOMENTUM=0.9 +10/22 12:31:35 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:31:35 AM | WORKERS=1 +10/22 12:31:35 AM | WORLD_SIZE=2 +10/22 12:31:35 AM | +10/22 12:31:35 AM | Logger is set - training start +10/22 12:31:48 AM | +10/22 12:31:48 AM | Parameters: +10/22 12:31:48 AM | ALPHA_LR=0.0003 +10/22 12:31:48 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:31:48 AM | AUX_WEIGHT=0.4 +10/22 12:31:48 AM | BATCH_SIZE=64 +10/22 12:31:48 AM | CELLS_NUM=3 +10/22 12:31:48 AM | CLEAN_ARCH=True +10/22 12:31:48 AM | CUTOUT_LENGTH=16 +10/22 12:31:48 AM | DATA_DIR=/data/cifar +10/22 12:31:48 AM | DATA_PATH=./data/ +10/22 12:31:48 AM | DATASET=cifar10 +10/22 12:31:48 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:31:48 AM | DISTRIBUTED=True +10/22 12:31:48 AM | DROP_PATH_PROB=0.2 +10/22 12:31:48 AM | ENSEMBLE=True +10/22 12:31:48 AM | GPUS=[0] +10/22 12:31:48 AM | INIT_CHANNELS=16 +10/22 12:31:48 AM | INPUT_CHANNELS=3 +10/22 12:31:48 AM | LAYER_NUM=3 +10/22 12:31:48 AM | LOCAL_RANK=0 +10/22 12:31:48 AM | LR_RATIO=0.5 +10/22 12:31:48 AM | MODEL_TYPE=cifar +10/22 12:31:48 AM | N_CLASSES=10 +10/22 12:31:48 AM | NAME=cifar10-search +10/22 12:31:48 AM | NO_REPEAT=False +10/22 12:31:48 AM | PATH=searchs/cifar10-search +10/22 12:31:48 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:31:48 AM | PRETRAIN_DECAY=0 +10/22 12:31:48 AM | PRETRAIN_EPOCHS=0 +10/22 12:31:48 AM | PRINT_FREQ=10 +10/22 12:31:48 AM | RETRAIN_EPOCHS=1 +10/22 12:31:48 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:31:48 AM | RETRAIN_SETTING=0 +10/22 12:31:48 AM | RETRAIN_UPDATE_W=True +10/22 12:31:48 AM | SAME_STRUCTURE=True +10/22 12:31:48 AM | SAMPLE_RATIO=0.2 +10/22 12:31:48 AM | SEARCH_ITER=25 +10/22 12:31:48 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:31:48 AM | SEED=0 +10/22 12:31:48 AM | SHORT_CONNECT=False +10/22 12:31:48 AM | SYNC_PARAM=True +10/22 12:31:48 AM | TEACHER2STUDENT=True +10/22 12:31:48 AM | TEST_DIR=/data/imagenet/val +10/22 12:31:48 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:31:48 AM | TRAIN_PORTION=0.5 +10/22 12:31:48 AM | UNROLLED=False +10/22 12:31:48 AM | USE_BETA=True +10/22 12:31:48 AM | VAL_DIR=/data/imagenet/train +10/22 12:31:48 AM | W_GRAD_CLIP=5.0 +10/22 12:31:48 AM | W_LR=0.05 +10/22 12:31:48 AM | W_LR_MIN=0.001 +10/22 12:31:48 AM | W_MOMENTUM=0.9 +10/22 12:31:48 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:31:48 AM | WORKERS=1 +10/22 12:31:48 AM | WORLD_SIZE=2 +10/22 12:31:48 AM | +10/22 12:31:48 AM | Logger is set - training start +10/22 12:32:09 AM | +10/22 12:32:09 AM | Parameters: +10/22 12:32:09 AM | ALPHA_LR=0.0003 +10/22 12:32:09 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:32:09 AM | AUX_WEIGHT=0.4 +10/22 12:32:09 AM | BATCH_SIZE=64 +10/22 12:32:09 AM | CELLS_NUM=3 +10/22 12:32:09 AM | CLEAN_ARCH=True +10/22 12:32:09 AM | CUTOUT_LENGTH=16 +10/22 12:32:09 AM | DATA_DIR=/data/cifar +10/22 12:32:09 AM | DATA_PATH=./data/ +10/22 12:32:09 AM | DATASET=cifar10 +10/22 12:32:09 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:32:09 AM | DISTRIBUTED=True +10/22 12:32:09 AM | DROP_PATH_PROB=0.2 +10/22 12:32:09 AM | ENSEMBLE=True +10/22 12:32:09 AM | GPUS=[0] +10/22 12:32:09 AM | INIT_CHANNELS=16 +10/22 12:32:09 AM | INPUT_CHANNELS=3 +10/22 12:32:09 AM | LAYER_NUM=3 +10/22 12:32:09 AM | LOCAL_RANK=0 +10/22 12:32:09 AM | LR_RATIO=0.5 +10/22 12:32:09 AM | MODEL_TYPE=cifar +10/22 12:32:09 AM | N_CLASSES=10 +10/22 12:32:09 AM | NAME=cifar10-search +10/22 12:32:09 AM | NO_REPEAT=False +10/22 12:32:09 AM | PATH=searchs/cifar10-search +10/22 12:32:09 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:32:09 AM | PRETRAIN_DECAY=0 +10/22 12:32:09 AM | PRETRAIN_EPOCHS=0 +10/22 12:32:09 AM | PRINT_FREQ=10 +10/22 12:32:09 AM | RETRAIN_EPOCHS=1 +10/22 12:32:09 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:32:09 AM | RETRAIN_SETTING=0 +10/22 12:32:09 AM | RETRAIN_UPDATE_W=True +10/22 12:32:09 AM | SAME_STRUCTURE=True +10/22 12:32:09 AM | SAMPLE_RATIO=0.2 +10/22 12:32:09 AM | SEARCH_ITER=25 +10/22 12:32:09 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:32:09 AM | SEED=0 +10/22 12:32:09 AM | SHORT_CONNECT=False +10/22 12:32:09 AM | SYNC_PARAM=True +10/22 12:32:09 AM | TEACHER2STUDENT=True +10/22 12:32:09 AM | TEST_DIR=/data/imagenet/val +10/22 12:32:09 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:32:09 AM | TRAIN_PORTION=0.5 +10/22 12:32:09 AM | UNROLLED=False +10/22 12:32:09 AM | USE_BETA=True +10/22 12:32:09 AM | VAL_DIR=/data/imagenet/train +10/22 12:32:09 AM | W_GRAD_CLIP=5.0 +10/22 12:32:09 AM | W_LR=0.05 +10/22 12:32:09 AM | W_LR_MIN=0.001 +10/22 12:32:09 AM | W_MOMENTUM=0.9 +10/22 12:32:09 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:32:09 AM | WORKERS=1 +10/22 12:32:09 AM | WORLD_SIZE=2 +10/22 12:32:09 AM | +10/22 12:32:09 AM | Logger is set - training start +10/22 12:32:53 AM | +10/22 12:32:53 AM | Parameters: +10/22 12:32:53 AM | ALPHA_LR=0.0003 +10/22 12:32:53 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:32:53 AM | AUX_WEIGHT=0.4 +10/22 12:32:53 AM | BATCH_SIZE=64 +10/22 12:32:53 AM | CELLS_NUM=3 +10/22 12:32:53 AM | CLEAN_ARCH=True +10/22 12:32:53 AM | CUTOUT_LENGTH=16 +10/22 12:32:53 AM | DATA_DIR=./cifar +10/22 12:32:53 AM | DATA_PATH=./data/ +10/22 12:32:53 AM | DATASET=cifar10 +10/22 12:32:53 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:32:53 AM | DISTRIBUTED=True +10/22 12:32:53 AM | DROP_PATH_PROB=0.2 +10/22 12:32:53 AM | ENSEMBLE=True +10/22 12:32:53 AM | GPUS=[0] +10/22 12:32:53 AM | INIT_CHANNELS=16 +10/22 12:32:53 AM | INPUT_CHANNELS=3 +10/22 12:32:53 AM | LAYER_NUM=3 +10/22 12:32:53 AM | LOCAL_RANK=0 +10/22 12:32:53 AM | LR_RATIO=0.5 +10/22 12:32:53 AM | MODEL_TYPE=cifar +10/22 12:32:53 AM | N_CLASSES=10 +10/22 12:32:53 AM | NAME=cifar10-search +10/22 12:32:53 AM | NO_REPEAT=False +10/22 12:32:53 AM | PATH=searchs/cifar10-search +10/22 12:32:53 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:32:53 AM | PRETRAIN_DECAY=0 +10/22 12:32:53 AM | PRETRAIN_EPOCHS=0 +10/22 12:32:53 AM | PRINT_FREQ=10 +10/22 12:32:53 AM | RETRAIN_EPOCHS=1 +10/22 12:32:53 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:32:53 AM | RETRAIN_SETTING=0 +10/22 12:32:53 AM | RETRAIN_UPDATE_W=True +10/22 12:32:53 AM | SAME_STRUCTURE=True +10/22 12:32:53 AM | SAMPLE_RATIO=0.2 +10/22 12:32:53 AM | SEARCH_ITER=25 +10/22 12:32:53 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:32:53 AM | SEED=0 +10/22 12:32:53 AM | SHORT_CONNECT=False +10/22 12:32:53 AM | SYNC_PARAM=True +10/22 12:32:53 AM | TEACHER2STUDENT=True +10/22 12:32:53 AM | TEST_DIR=/data/imagenet/val +10/22 12:32:53 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:32:53 AM | TRAIN_PORTION=0.5 +10/22 12:32:53 AM | UNROLLED=False +10/22 12:32:53 AM | USE_BETA=True +10/22 12:32:53 AM | VAL_DIR=/data/imagenet/train +10/22 12:32:53 AM | W_GRAD_CLIP=5.0 +10/22 12:32:53 AM | W_LR=0.05 +10/22 12:32:53 AM | W_LR_MIN=0.001 +10/22 12:32:53 AM | W_MOMENTUM=0.9 +10/22 12:32:53 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:32:53 AM | WORKERS=1 +10/22 12:32:53 AM | WORLD_SIZE=2 +10/22 12:32:53 AM | +10/22 12:32:53 AM | Logger is set - training start +10/22 12:33:48 AM | +10/22 12:33:48 AM | Parameters: +10/22 12:33:48 AM | ALPHA_LR=0.0003 +10/22 12:33:48 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:33:48 AM | AUX_WEIGHT=0.4 +10/22 12:33:48 AM | BATCH_SIZE=64 +10/22 12:33:48 AM | CELLS_NUM=3 +10/22 12:33:48 AM | CLEAN_ARCH=True +10/22 12:33:48 AM | CUTOUT_LENGTH=16 +10/22 12:33:48 AM | DATA_DIR=./cifar +10/22 12:33:48 AM | DATA_PATH=./data/ +10/22 12:33:48 AM | DATASET=cifar10 +10/22 12:33:48 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:33:48 AM | DISTRIBUTED=True +10/22 12:33:48 AM | DROP_PATH_PROB=0.2 +10/22 12:33:48 AM | ENSEMBLE=True +10/22 12:33:48 AM | GPUS=[0] +10/22 12:33:48 AM | INIT_CHANNELS=16 +10/22 12:33:48 AM | INPUT_CHANNELS=3 +10/22 12:33:48 AM | LAYER_NUM=3 +10/22 12:33:48 AM | LOCAL_RANK=0 +10/22 12:33:48 AM | LR_RATIO=0.5 +10/22 12:33:48 AM | MODEL_TYPE=cifar +10/22 12:33:48 AM | N_CLASSES=10 +10/22 12:33:48 AM | NAME=cifar10-search +10/22 12:33:48 AM | NO_REPEAT=False +10/22 12:33:48 AM | PATH=searchs/cifar10-search +10/22 12:33:48 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:33:48 AM | PRETRAIN_DECAY=0 +10/22 12:33:48 AM | PRETRAIN_EPOCHS=0 +10/22 12:33:48 AM | PRINT_FREQ=10 +10/22 12:33:48 AM | RETRAIN_EPOCHS=1 +10/22 12:33:48 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:33:48 AM | RETRAIN_SETTING=0 +10/22 12:33:48 AM | RETRAIN_UPDATE_W=True +10/22 12:33:48 AM | SAME_STRUCTURE=True +10/22 12:33:48 AM | SAMPLE_RATIO=0.2 +10/22 12:33:48 AM | SEARCH_ITER=25 +10/22 12:33:48 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:33:48 AM | SEED=0 +10/22 12:33:48 AM | SHORT_CONNECT=False +10/22 12:33:48 AM | SYNC_PARAM=True +10/22 12:33:48 AM | TEACHER2STUDENT=True +10/22 12:33:48 AM | TEST_DIR=/data/imagenet/val +10/22 12:33:48 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:33:48 AM | TRAIN_PORTION=0.5 +10/22 12:33:48 AM | UNROLLED=False +10/22 12:33:48 AM | USE_BETA=True +10/22 12:33:48 AM | VAL_DIR=/data/imagenet/train +10/22 12:33:48 AM | W_GRAD_CLIP=5.0 +10/22 12:33:48 AM | W_LR=0.05 +10/22 12:33:48 AM | W_LR_MIN=0.001 +10/22 12:33:48 AM | W_MOMENTUM=0.9 +10/22 12:33:48 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:33:48 AM | WORKERS=1 +10/22 12:33:48 AM | WORLD_SIZE=1 +10/22 12:33:48 AM | +10/22 12:33:48 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +10/22 12:34:41 AM | +10/22 12:34:41 AM | Parameters: +10/22 12:34:41 AM | ALPHA_LR=0.0003 +10/22 12:34:41 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:34:41 AM | AUX_WEIGHT=0.4 +10/22 12:34:41 AM | BATCH_SIZE=64 +10/22 12:34:41 AM | CELLS_NUM=3 +10/22 12:34:41 AM | CLEAN_ARCH=True +10/22 12:34:41 AM | CUTOUT_LENGTH=16 +10/22 12:34:41 AM | DATA_DIR=./cifar +10/22 12:34:41 AM | DATA_PATH=./data/ +10/22 12:34:41 AM | DATASET=cifar10 +10/22 12:34:41 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:34:41 AM | DISTRIBUTED=True +10/22 12:34:41 AM | DROP_PATH_PROB=0.2 +10/22 12:34:41 AM | ENSEMBLE=True +10/22 12:34:41 AM | GPUS=[0] +10/22 12:34:41 AM | INIT_CHANNELS=16 +10/22 12:34:41 AM | INPUT_CHANNELS=3 +10/22 12:34:41 AM | LAYER_NUM=3 +10/22 12:34:41 AM | LOCAL_RANK=0 +10/22 12:34:41 AM | LR_RATIO=0.5 +10/22 12:34:41 AM | MODEL_TYPE=cifar +10/22 12:34:41 AM | N_CLASSES=10 +10/22 12:34:41 AM | NAME=cifar10-search +10/22 12:34:41 AM | NO_REPEAT=False +10/22 12:34:41 AM | PATH=searchs/cifar10-search +10/22 12:34:41 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:34:41 AM | PRETRAIN_DECAY=0 +10/22 12:34:41 AM | PRETRAIN_EPOCHS=0 +10/22 12:34:41 AM | PRINT_FREQ=10 +10/22 12:34:41 AM | RETRAIN_EPOCHS=1 +10/22 12:34:41 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:34:41 AM | RETRAIN_SETTING=0 +10/22 12:34:41 AM | RETRAIN_UPDATE_W=True +10/22 12:34:41 AM | SAME_STRUCTURE=True +10/22 12:34:41 AM | SAMPLE_RATIO=0.2 +10/22 12:34:41 AM | SEARCH_ITER=25 +10/22 12:34:41 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:34:41 AM | SEED=0 +10/22 12:34:41 AM | SHORT_CONNECT=False +10/22 12:34:41 AM | SYNC_PARAM=True +10/22 12:34:41 AM | TEACHER2STUDENT=True +10/22 12:34:41 AM | TEST_DIR=/data/imagenet/val +10/22 12:34:41 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:34:41 AM | TRAIN_PORTION=0.5 +10/22 12:34:41 AM | UNROLLED=False +10/22 12:34:41 AM | USE_BETA=True +10/22 12:34:41 AM | VAL_DIR=/data/imagenet/train +10/22 12:34:41 AM | W_GRAD_CLIP=5.0 +10/22 12:34:41 AM | W_LR=0.05 +10/22 12:34:41 AM | W_LR_MIN=0.001 +10/22 12:34:41 AM | W_MOMENTUM=0.9 +10/22 12:34:41 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:34:41 AM | WORKERS=1 +10/22 12:34:41 AM | WORLD_SIZE=1 +10/22 12:34:41 AM | +10/22 12:34:41 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +10/22 12:37:14 AM | +10/22 12:37:14 AM | Parameters: +10/22 12:37:14 AM | ALPHA_LR=0.0003 +10/22 12:37:14 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:37:14 AM | AUX_WEIGHT=0.4 +10/22 12:37:14 AM | BATCH_SIZE=64 +10/22 12:37:14 AM | CELLS_NUM=3 +10/22 12:37:14 AM | CLEAN_ARCH=True +10/22 12:37:14 AM | CUTOUT_LENGTH=16 +10/22 12:37:14 AM | DATA_DIR=./cifar +10/22 12:37:14 AM | DATA_PATH=./data/ +10/22 12:37:14 AM | DATASET=cifar10 +10/22 12:37:14 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:37:14 AM | DISTRIBUTED=True +10/22 12:37:14 AM | DROP_PATH_PROB=0.2 +10/22 12:37:14 AM | ENSEMBLE=True +10/22 12:37:14 AM | GPUS=[0] +10/22 12:37:14 AM | INIT_CHANNELS=16 +10/22 12:37:14 AM | INPUT_CHANNELS=3 +10/22 12:37:14 AM | LAYER_NUM=3 +10/22 12:37:14 AM | LOCAL_RANK=0 +10/22 12:37:14 AM | LR_RATIO=0.5 +10/22 12:37:14 AM | MODEL_TYPE=cifar +10/22 12:37:14 AM | N_CLASSES=10 +10/22 12:37:14 AM | NAME=cifar10-search +10/22 12:37:14 AM | NO_REPEAT=False +10/22 12:37:14 AM | PATH=searchs/cifar10-search +10/22 12:37:14 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:37:14 AM | PRETRAIN_DECAY=0 +10/22 12:37:14 AM | PRETRAIN_EPOCHS=0 +10/22 12:37:14 AM | PRINT_FREQ=10 +10/22 12:37:14 AM | RETRAIN_EPOCHS=1 +10/22 12:37:14 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:37:14 AM | RETRAIN_SETTING=0 +10/22 12:37:14 AM | RETRAIN_UPDATE_W=True +10/22 12:37:14 AM | SAME_STRUCTURE=True +10/22 12:37:14 AM | SAMPLE_RATIO=0.2 +10/22 12:37:14 AM | SEARCH_ITER=25 +10/22 12:37:14 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:37:14 AM | SEED=0 +10/22 12:37:14 AM | SHORT_CONNECT=False +10/22 12:37:14 AM | SYNC_PARAM=True +10/22 12:37:14 AM | TEACHER2STUDENT=True +10/22 12:37:14 AM | TEST_DIR=/data/imagenet/val +10/22 12:37:14 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:37:14 AM | TRAIN_PORTION=0.5 +10/22 12:37:14 AM | UNROLLED=False +10/22 12:37:14 AM | USE_BETA=True +10/22 12:37:14 AM | VAL_DIR=/data/imagenet/train +10/22 12:37:14 AM | W_GRAD_CLIP=5.0 +10/22 12:37:14 AM | W_LR=0.05 +10/22 12:37:14 AM | W_LR_MIN=0.001 +10/22 12:37:14 AM | W_MOMENTUM=0.9 +10/22 12:37:14 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:37:14 AM | WORKERS=1 +10/22 12:37:14 AM | WORLD_SIZE=1 +10/22 12:37:14 AM | +10/22 12:37:14 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +10/22 12:37:47 AM | +10/22 12:37:47 AM | Parameters: +10/22 12:37:47 AM | ALPHA_LR=0.0003 +10/22 12:37:47 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:37:47 AM | AUX_WEIGHT=0.4 +10/22 12:37:47 AM | BATCH_SIZE=64 +10/22 12:37:47 AM | CELLS_NUM=3 +10/22 12:37:47 AM | CLEAN_ARCH=True +10/22 12:37:47 AM | CUTOUT_LENGTH=16 +10/22 12:37:47 AM | DATA_DIR=./cifar +10/22 12:37:47 AM | DATA_PATH=./data/ +10/22 12:37:47 AM | DATASET=cifar10 +10/22 12:37:47 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:37:47 AM | DISTRIBUTED=True +10/22 12:37:47 AM | DROP_PATH_PROB=0.2 +10/22 12:37:47 AM | ENSEMBLE=True +10/22 12:37:47 AM | GPUS=[0] +10/22 12:37:47 AM | INIT_CHANNELS=16 +10/22 12:37:47 AM | INPUT_CHANNELS=3 +10/22 12:37:47 AM | LAYER_NUM=3 +10/22 12:37:47 AM | LOCAL_RANK=0 +10/22 12:37:47 AM | LR_RATIO=0.5 +10/22 12:37:47 AM | MODEL_TYPE=cifar +10/22 12:37:47 AM | N_CLASSES=10 +10/22 12:37:47 AM | NAME=cifar10-search +10/22 12:37:47 AM | NO_REPEAT=False +10/22 12:37:47 AM | PATH=searchs/cifar10-search +10/22 12:37:47 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:37:47 AM | PRETRAIN_DECAY=0 +10/22 12:37:47 AM | PRETRAIN_EPOCHS=0 +10/22 12:37:47 AM | PRINT_FREQ=10 +10/22 12:37:47 AM | RETRAIN_EPOCHS=1 +10/22 12:37:47 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:37:47 AM | RETRAIN_SETTING=0 +10/22 12:37:47 AM | RETRAIN_UPDATE_W=True +10/22 12:37:47 AM | SAME_STRUCTURE=True +10/22 12:37:47 AM | SAMPLE_RATIO=0.2 +10/22 12:37:47 AM | SEARCH_ITER=25 +10/22 12:37:47 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:37:47 AM | SEED=0 +10/22 12:37:47 AM | SHORT_CONNECT=False +10/22 12:37:47 AM | SYNC_PARAM=True +10/22 12:37:47 AM | TEACHER2STUDENT=True +10/22 12:37:47 AM | TEST_DIR=/data/imagenet/val +10/22 12:37:47 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:37:47 AM | TRAIN_PORTION=0.5 +10/22 12:37:47 AM | UNROLLED=False +10/22 12:37:47 AM | USE_BETA=True +10/22 12:37:47 AM | VAL_DIR=/data/imagenet/train +10/22 12:37:47 AM | W_GRAD_CLIP=5.0 +10/22 12:37:47 AM | W_LR=0.05 +10/22 12:37:47 AM | W_LR_MIN=0.001 +10/22 12:37:47 AM | W_MOMENTUM=0.9 +10/22 12:37:47 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:37:47 AM | WORKERS=1 +10/22 12:37:47 AM | WORLD_SIZE=1 +10/22 12:37:47 AM | +10/22 12:37:47 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +10/22 12:38:25 AM | +10/22 12:38:25 AM | Parameters: +10/22 12:38:25 AM | ALPHA_LR=0.0003 +10/22 12:38:25 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:38:25 AM | AUX_WEIGHT=0.4 +10/22 12:38:25 AM | BATCH_SIZE=64 +10/22 12:38:25 AM | CELLS_NUM=3 +10/22 12:38:25 AM | CLEAN_ARCH=True +10/22 12:38:25 AM | CUTOUT_LENGTH=16 +10/22 12:38:25 AM | DATA_DIR=./cifar +10/22 12:38:25 AM | DATA_PATH=./data/ +10/22 12:38:25 AM | DATASET=cifar10 +10/22 12:38:25 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:38:25 AM | DISTRIBUTED=True +10/22 12:38:25 AM | DROP_PATH_PROB=0.2 +10/22 12:38:25 AM | ENSEMBLE=True +10/22 12:38:25 AM | GPUS=[0] +10/22 12:38:25 AM | INIT_CHANNELS=16 +10/22 12:38:25 AM | INPUT_CHANNELS=3 +10/22 12:38:25 AM | LAYER_NUM=3 +10/22 12:38:25 AM | LOCAL_RANK=0 +10/22 12:38:25 AM | LR_RATIO=0.5 +10/22 12:38:25 AM | MODEL_TYPE=cifar +10/22 12:38:25 AM | N_CLASSES=10 +10/22 12:38:25 AM | NAME=cifar10-search +10/22 12:38:25 AM | NO_REPEAT=False +10/22 12:38:25 AM | PATH=searchs/cifar10-search +10/22 12:38:25 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:38:25 AM | PRETRAIN_DECAY=0 +10/22 12:38:25 AM | PRETRAIN_EPOCHS=0 +10/22 12:38:25 AM | PRINT_FREQ=10 +10/22 12:38:25 AM | RETRAIN_EPOCHS=1 +10/22 12:38:25 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:38:25 AM | RETRAIN_SETTING=0 +10/22 12:38:25 AM | RETRAIN_UPDATE_W=True +10/22 12:38:25 AM | SAME_STRUCTURE=True +10/22 12:38:25 AM | SAMPLE_RATIO=0.2 +10/22 12:38:25 AM | SEARCH_ITER=25 +10/22 12:38:25 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:38:25 AM | SEED=0 +10/22 12:38:25 AM | SHORT_CONNECT=False +10/22 12:38:25 AM | SYNC_PARAM=True +10/22 12:38:25 AM | TEACHER2STUDENT=True +10/22 12:38:25 AM | TEST_DIR=/data/imagenet/val +10/22 12:38:25 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:38:25 AM | TRAIN_PORTION=0.5 +10/22 12:38:25 AM | UNROLLED=False +10/22 12:38:25 AM | USE_BETA=True +10/22 12:38:25 AM | VAL_DIR=/data/imagenet/train +10/22 12:38:25 AM | W_GRAD_CLIP=5.0 +10/22 12:38:25 AM | W_LR=0.05 +10/22 12:38:25 AM | W_LR_MIN=0.001 +10/22 12:38:25 AM | W_MOMENTUM=0.9 +10/22 12:38:25 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:38:25 AM | WORKERS=1 +10/22 12:38:25 AM | WORLD_SIZE=1 +10/22 12:38:25 AM | +10/22 12:38:25 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +10/22 12:41:07 AM | +10/22 12:41:07 AM | Parameters: +10/22 12:41:07 AM | ALPHA_LR=0.0003 +10/22 12:41:07 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:41:07 AM | AUX_WEIGHT=0.4 +10/22 12:41:07 AM | BATCH_SIZE=64 +10/22 12:41:07 AM | CELLS_NUM=3 +10/22 12:41:07 AM | CLEAN_ARCH=True +10/22 12:41:07 AM | CUTOUT_LENGTH=16 +10/22 12:41:07 AM | DATA_DIR=./cifar +10/22 12:41:07 AM | DATA_PATH=./data/ +10/22 12:41:07 AM | DATASET=cifar10 +10/22 12:41:07 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:41:07 AM | DISTRIBUTED=True +10/22 12:41:07 AM | DROP_PATH_PROB=0.2 +10/22 12:41:07 AM | ENSEMBLE=True +10/22 12:41:07 AM | GPUS=[0] +10/22 12:41:07 AM | INIT_CHANNELS=16 +10/22 12:41:07 AM | INPUT_CHANNELS=3 +10/22 12:41:07 AM | LAYER_NUM=3 +10/22 12:41:07 AM | LOCAL_RANK=0 +10/22 12:41:07 AM | LR_RATIO=0.5 +10/22 12:41:07 AM | MODEL_TYPE=cifar +10/22 12:41:07 AM | N_CLASSES=10 +10/22 12:41:07 AM | NAME=cifar10-search +10/22 12:41:07 AM | NO_REPEAT=False +10/22 12:41:07 AM | PATH=searchs/cifar10-search +10/22 12:41:07 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:41:07 AM | PRETRAIN_DECAY=0 +10/22 12:41:07 AM | PRETRAIN_EPOCHS=0 +10/22 12:41:07 AM | PRINT_FREQ=10 +10/22 12:41:07 AM | RETRAIN_EPOCHS=1 +10/22 12:41:07 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:41:07 AM | RETRAIN_SETTING=0 +10/22 12:41:07 AM | RETRAIN_UPDATE_W=True +10/22 12:41:07 AM | SAME_STRUCTURE=True +10/22 12:41:07 AM | SAMPLE_RATIO=0.2 +10/22 12:41:07 AM | SEARCH_ITER=25 +10/22 12:41:07 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:41:07 AM | SEED=0 +10/22 12:41:07 AM | SHORT_CONNECT=False +10/22 12:41:07 AM | SYNC_PARAM=True +10/22 12:41:07 AM | TEACHER2STUDENT=True +10/22 12:41:07 AM | TEST_DIR=/data/imagenet/val +10/22 12:41:07 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:41:07 AM | TRAIN_PORTION=0.5 +10/22 12:41:07 AM | UNROLLED=False +10/22 12:41:07 AM | USE_BETA=True +10/22 12:41:07 AM | VAL_DIR=/data/imagenet/train +10/22 12:41:07 AM | W_GRAD_CLIP=5.0 +10/22 12:41:07 AM | W_LR=0.05 +10/22 12:41:07 AM | W_LR_MIN=0.001 +10/22 12:41:07 AM | W_MOMENTUM=0.9 +10/22 12:41:07 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:41:07 AM | WORKERS=1 +10/22 12:41:07 AM | WORLD_SIZE=1 +10/22 12:41:07 AM | +10/22 12:41:07 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/391 Loss 2.300 Prec@(1,5) (6.2%, 50.0%) +10/22 12:43:33 AM | +10/22 12:43:33 AM | Parameters: +10/22 12:43:33 AM | ALPHA_LR=0.0003 +10/22 12:43:33 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:43:33 AM | AUX_WEIGHT=0.4 +10/22 12:43:33 AM | BATCH_SIZE=64 +10/22 12:43:33 AM | CELLS_NUM=3 +10/22 12:43:33 AM | CLEAN_ARCH=True +10/22 12:43:33 AM | CUTOUT_LENGTH=16 +10/22 12:43:33 AM | DATA_DIR=./cifar +10/22 12:43:33 AM | DATA_PATH=./data/ +10/22 12:43:33 AM | DATASET=cifar10 +10/22 12:43:33 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:43:33 AM | DISTRIBUTED=True +10/22 12:43:33 AM | DROP_PATH_PROB=0.2 +10/22 12:43:33 AM | ENSEMBLE=True +10/22 12:43:33 AM | GPUS=[0] +10/22 12:43:33 AM | INIT_CHANNELS=16 +10/22 12:43:33 AM | INPUT_CHANNELS=3 +10/22 12:43:33 AM | LAYER_NUM=3 +10/22 12:43:33 AM | LOCAL_RANK=0 +10/22 12:43:33 AM | LR_RATIO=0.5 +10/22 12:43:33 AM | MODEL_TYPE=cifar +10/22 12:43:33 AM | N_CLASSES=10 +10/22 12:43:33 AM | NAME=cifar10-search +10/22 12:43:33 AM | NO_REPEAT=False +10/22 12:43:33 AM | PATH=searchs/cifar10-search +10/22 12:43:33 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:43:33 AM | PRETRAIN_DECAY=0 +10/22 12:43:33 AM | PRETRAIN_EPOCHS=0 +10/22 12:43:33 AM | PRINT_FREQ=10 +10/22 12:43:33 AM | RETRAIN_EPOCHS=1 +10/22 12:43:33 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:43:33 AM | RETRAIN_SETTING=0 +10/22 12:43:33 AM | RETRAIN_UPDATE_W=True +10/22 12:43:33 AM | SAME_STRUCTURE=True +10/22 12:43:33 AM | SAMPLE_RATIO=0.2 +10/22 12:43:33 AM | SEARCH_ITER=25 +10/22 12:43:33 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:43:33 AM | SEED=0 +10/22 12:43:33 AM | SHORT_CONNECT=False +10/22 12:43:33 AM | SYNC_PARAM=True +10/22 12:43:33 AM | TEACHER2STUDENT=True +10/22 12:43:33 AM | TEST_DIR=/data/imagenet/val +10/22 12:43:33 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:43:33 AM | TRAIN_PORTION=0.5 +10/22 12:43:33 AM | UNROLLED=False +10/22 12:43:33 AM | USE_BETA=True +10/22 12:43:33 AM | VAL_DIR=/data/imagenet/train +10/22 12:43:33 AM | W_GRAD_CLIP=5.0 +10/22 12:43:33 AM | W_LR=0.05 +10/22 12:43:33 AM | W_LR_MIN=0.001 +10/22 12:43:33 AM | W_MOMENTUM=0.9 +10/22 12:43:33 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:43:33 AM | WORKERS=1 +10/22 12:43:33 AM | WORLD_SIZE=1 +10/22 12:43:33 AM | +10/22 12:43:33 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +10/22 12:44:42 AM | +10/22 12:44:42 AM | Parameters: +10/22 12:44:42 AM | ALPHA_LR=0.0003 +10/22 12:44:42 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:44:42 AM | AUX_WEIGHT=0.4 +10/22 12:44:42 AM | BATCH_SIZE=64 +10/22 12:44:42 AM | CELLS_NUM=3 +10/22 12:44:42 AM | CLEAN_ARCH=True +10/22 12:44:42 AM | CUTOUT_LENGTH=16 +10/22 12:44:42 AM | DATA_DIR=./cifar +10/22 12:44:42 AM | DATA_PATH=./data/ +10/22 12:44:42 AM | DATASET=cifar10 +10/22 12:44:42 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:44:42 AM | DISTRIBUTED=True +10/22 12:44:42 AM | DROP_PATH_PROB=0.2 +10/22 12:44:42 AM | ENSEMBLE=True +10/22 12:44:42 AM | GPUS=[0] +10/22 12:44:42 AM | INIT_CHANNELS=16 +10/22 12:44:42 AM | INPUT_CHANNELS=3 +10/22 12:44:42 AM | LAYER_NUM=3 +10/22 12:44:42 AM | LOCAL_RANK=0 +10/22 12:44:42 AM | LR_RATIO=0.5 +10/22 12:44:42 AM | MODEL_TYPE=cifar +10/22 12:44:42 AM | N_CLASSES=10 +10/22 12:44:42 AM | NAME=cifar10-search +10/22 12:44:42 AM | NO_REPEAT=False +10/22 12:44:42 AM | PATH=searchs/cifar10-search +10/22 12:44:42 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:44:42 AM | PRETRAIN_DECAY=0 +10/22 12:44:42 AM | PRETRAIN_EPOCHS=0 +10/22 12:44:42 AM | PRINT_FREQ=10 +10/22 12:44:42 AM | RETRAIN_EPOCHS=1 +10/22 12:44:42 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:44:42 AM | RETRAIN_SETTING=0 +10/22 12:44:42 AM | RETRAIN_UPDATE_W=True +10/22 12:44:42 AM | SAME_STRUCTURE=True +10/22 12:44:42 AM | SAMPLE_RATIO=0.2 +10/22 12:44:42 AM | SEARCH_ITER=25 +10/22 12:44:42 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:44:42 AM | SEED=0 +10/22 12:44:42 AM | SHORT_CONNECT=False +10/22 12:44:42 AM | SYNC_PARAM=True +10/22 12:44:42 AM | TEACHER2STUDENT=True +10/22 12:44:42 AM | TEST_DIR=/data/imagenet/val +10/22 12:44:42 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:44:42 AM | TRAIN_PORTION=0.5 +10/22 12:44:42 AM | UNROLLED=False +10/22 12:44:42 AM | USE_BETA=True +10/22 12:44:42 AM | VAL_DIR=/data/imagenet/train +10/22 12:44:42 AM | W_GRAD_CLIP=5.0 +10/22 12:44:42 AM | W_LR=0.05 +10/22 12:44:42 AM | W_LR_MIN=0.001 +10/22 12:44:42 AM | W_MOMENTUM=0.9 +10/22 12:44:42 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:44:42 AM | WORKERS=1 +10/22 12:44:42 AM | WORLD_SIZE=1 +10/22 12:44:42 AM | +10/22 12:44:42 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +10/22 12:46:37 AM | +10/22 12:46:37 AM | Parameters: +10/22 12:46:37 AM | ALPHA_LR=0.0003 +10/22 12:46:37 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:46:37 AM | AUX_WEIGHT=0.4 +10/22 12:46:37 AM | BATCH_SIZE=64 +10/22 12:46:37 AM | CELLS_NUM=3 +10/22 12:46:37 AM | CLEAN_ARCH=True +10/22 12:46:37 AM | CUTOUT_LENGTH=16 +10/22 12:46:37 AM | DATA_DIR=./cifar +10/22 12:46:37 AM | DATA_PATH=./data/ +10/22 12:46:37 AM | DATASET=cifar10 +10/22 12:46:37 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:46:37 AM | DISTRIBUTED=True +10/22 12:46:37 AM | DROP_PATH_PROB=0.2 +10/22 12:46:37 AM | ENSEMBLE=True +10/22 12:46:37 AM | GPUS=[0] +10/22 12:46:37 AM | INIT_CHANNELS=16 +10/22 12:46:37 AM | INPUT_CHANNELS=3 +10/22 12:46:37 AM | LAYER_NUM=3 +10/22 12:46:37 AM | LOCAL_RANK=0 +10/22 12:46:37 AM | LR_RATIO=0.5 +10/22 12:46:37 AM | MODEL_TYPE=cifar +10/22 12:46:37 AM | N_CLASSES=10 +10/22 12:46:37 AM | NAME=cifar10-search +10/22 12:46:37 AM | NO_REPEAT=False +10/22 12:46:37 AM | PATH=searchs/cifar10-search +10/22 12:46:37 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:46:37 AM | PRETRAIN_DECAY=0 +10/22 12:46:37 AM | PRETRAIN_EPOCHS=0 +10/22 12:46:37 AM | PRINT_FREQ=10 +10/22 12:46:37 AM | RETRAIN_EPOCHS=1 +10/22 12:46:37 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:46:37 AM | RETRAIN_SETTING=0 +10/22 12:46:37 AM | RETRAIN_UPDATE_W=True +10/22 12:46:37 AM | SAME_STRUCTURE=True +10/22 12:46:37 AM | SAMPLE_RATIO=0.2 +10/22 12:46:37 AM | SEARCH_ITER=25 +10/22 12:46:37 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:46:37 AM | SEED=0 +10/22 12:46:37 AM | SHORT_CONNECT=False +10/22 12:46:37 AM | SYNC_PARAM=True +10/22 12:46:37 AM | TEACHER2STUDENT=True +10/22 12:46:37 AM | TEST_DIR=/data/imagenet/val +10/22 12:46:37 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:46:37 AM | TRAIN_PORTION=0.5 +10/22 12:46:37 AM | UNROLLED=False +10/22 12:46:37 AM | USE_BETA=True +10/22 12:46:37 AM | VAL_DIR=/data/imagenet/train +10/22 12:46:37 AM | W_GRAD_CLIP=5.0 +10/22 12:46:37 AM | W_LR=0.05 +10/22 12:46:37 AM | W_LR_MIN=0.001 +10/22 12:46:37 AM | W_MOMENTUM=0.9 +10/22 12:46:37 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:46:37 AM | WORKERS=1 +10/22 12:46:37 AM | WORLD_SIZE=1 +10/22 12:46:37 AM | +10/22 12:46:37 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +10/22 12:47:58 AM | +10/22 12:47:58 AM | Parameters: +10/22 12:47:58 AM | ALPHA_LR=0.0003 +10/22 12:47:58 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:47:58 AM | AUX_WEIGHT=0.4 +10/22 12:47:58 AM | BATCH_SIZE=64 +10/22 12:47:58 AM | CELLS_NUM=3 +10/22 12:47:58 AM | CLEAN_ARCH=True +10/22 12:47:58 AM | CUTOUT_LENGTH=16 +10/22 12:47:58 AM | DATA_DIR=./cifar +10/22 12:47:58 AM | DATA_PATH=./data/ +10/22 12:47:58 AM | DATASET=cifar10 +10/22 12:47:58 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:47:58 AM | DISTRIBUTED=True +10/22 12:47:58 AM | DROP_PATH_PROB=0.2 +10/22 12:47:58 AM | ENSEMBLE=True +10/22 12:47:58 AM | GPUS=[0] +10/22 12:47:58 AM | INIT_CHANNELS=16 +10/22 12:47:58 AM | INPUT_CHANNELS=3 +10/22 12:47:58 AM | LAYER_NUM=3 +10/22 12:47:58 AM | LOCAL_RANK=0 +10/22 12:47:58 AM | LR_RATIO=0.5 +10/22 12:47:58 AM | MODEL_TYPE=cifar +10/22 12:47:58 AM | N_CLASSES=10 +10/22 12:47:58 AM | NAME=cifar10-search +10/22 12:47:58 AM | NO_REPEAT=False +10/22 12:47:58 AM | PATH=searchs/cifar10-search +10/22 12:47:58 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:47:58 AM | PRETRAIN_DECAY=0 +10/22 12:47:58 AM | PRETRAIN_EPOCHS=0 +10/22 12:47:58 AM | PRINT_FREQ=10 +10/22 12:47:58 AM | RETRAIN_EPOCHS=1 +10/22 12:47:58 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:47:58 AM | RETRAIN_SETTING=0 +10/22 12:47:58 AM | RETRAIN_UPDATE_W=True +10/22 12:47:58 AM | SAME_STRUCTURE=True +10/22 12:47:58 AM | SAMPLE_RATIO=0.2 +10/22 12:47:58 AM | SEARCH_ITER=25 +10/22 12:47:58 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:47:58 AM | SEED=0 +10/22 12:47:58 AM | SHORT_CONNECT=False +10/22 12:47:58 AM | SYNC_PARAM=True +10/22 12:47:58 AM | TEACHER2STUDENT=True +10/22 12:47:58 AM | TEST_DIR=/data/imagenet/val +10/22 12:47:58 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:47:58 AM | TRAIN_PORTION=0.5 +10/22 12:47:58 AM | UNROLLED=False +10/22 12:47:58 AM | USE_BETA=True +10/22 12:47:58 AM | VAL_DIR=/data/imagenet/train +10/22 12:47:58 AM | W_GRAD_CLIP=5.0 +10/22 12:47:58 AM | W_LR=0.05 +10/22 12:47:58 AM | W_LR_MIN=0.001 +10/22 12:47:58 AM | W_MOMENTUM=0.9 +10/22 12:47:58 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:47:58 AM | WORKERS=1 +10/22 12:47:58 AM | WORLD_SIZE=1 +10/22 12:47:58 AM | +10/22 12:47:58 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +####### ALPHA ####### +# Alpha - normal +tensor([[0.1248, 0.1249, 0.1254, 0.1250, 0.1249, 0.1249, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251, 0.1248, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250, 0.1248], + [0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1246, 0.1251], + [0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1251], + [0.1249, 0.1250, 0.1252, 0.1251, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1247, 0.1249, 0.1251, 0.1251, 0.1250, 0.1250, 0.1250, 0.1252], + [0.1249, 0.1250, 0.1249, 0.1251, 0.1249, 0.1251, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1252, 0.1252, 0.1249, 0.1249, 0.1248, 0.1252], + [0.1252, 0.1250, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1249, 0.1249, 0.1252, 0.1250, 0.1252, 0.1249, 0.1248], + [0.1248, 0.1253, 0.1249, 0.1249, 0.1251, 0.1252, 0.1248, 0.1250], + [0.1249, 0.1249, 0.1252, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1251, 0.1250, 0.1249, 0.1248, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1249, 0.1250, 0.1250, 0.1252, 0.1251, 0.1251], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1253, 0.1249, 0.1250], + [0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1251, 0.1249, 0.1251, 0.1250, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1249, 0.1250, 0.1252, 0.1250], + [0.1249, 0.1249, 0.1250, 0.1251, 0.1251, 0.1250, 0.1250, 0.1251], + [0.1249, 0.1250, 0.1250, 0.1250, 0.1252, 0.1252, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1249, 0.1251, 0.1250, 0.1251, 0.1248, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1249, 0.1249, 0.1250, 0.1252, 0.1249, 0.1252], + [0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1251, 0.1251, 0.1248], + [0.1251, 0.1251, 0.1248, 0.1249, 0.1248, 0.1251, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5004, 0.4996], device='cuda:0', grad_fn=) +tensor([0.3332, 0.3329, 0.3339], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2503, 0.2499, 0.2498], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.1998, 0.2001, 0.2001], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4999, 0.5001], device='cuda:0', grad_fn=) +tensor([0.3336, 0.3327, 0.3337], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2502, 0.2498], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2000, 0.1998, 0.2001], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1251, 0.1248, 0.1252, 0.1252, 0.1250, 0.1248, 0.1249], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1248, 0.1250, 0.1252, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1247, 0.1250, 0.1250, 0.1254, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1252, 0.1248, 0.1249, 0.1249], + [0.1251, 0.1251, 0.1250, 0.1248, 0.1251, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1253, 0.1251, 0.1250, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1250, 0.1247, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1250, 0.1252], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1252], + [0.1249, 0.1249, 0.1249, 0.1253, 0.1251, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1250, 0.1249, 0.1251, 0.1251, 0.1251, 0.1248, 0.1251], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1249, 0.1250, 0.1247, 0.1252, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1252, 0.1249, 0.1249, 0.1252, 0.1248, 0.1252, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1250, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1252, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251], + [0.1249, 0.1250, 0.1250, 0.1250, 0.1252, 0.1251, 0.1248, 0.1250], + [0.1248, 0.1249, 0.1250, 0.1249, 0.1249, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1248, 0.1251, 0.1251, 0.1249, 0.1248, 0.1253, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5004, 0.4996], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3338, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2496, 0.2501, 0.2499, 0.2504], device='cuda:0', + grad_fn=) +tensor([0.1998, 0.2001, 0.2000, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4998, 0.5002], device='cuda:0', grad_fn=) +tensor([0.3332, 0.3335, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2499, 0.2498, 0.2498, 0.2504], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2002, 0.1999, 0.1999], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1249, 0.1250], + [0.1248, 0.1248, 0.1248, 0.1252, 0.1250, 0.1251, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1251, 0.1250], + [0.1249, 0.1249, 0.1250, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251], + [0.1251, 0.1249, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1248, 0.1252, 0.1252, 0.1250, 0.1248, 0.1252], + [0.1250, 0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1248, 0.1252], + [0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251], + [0.1247, 0.1249, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1252, 0.1250, 0.1249, 0.1252, 0.1249, 0.1250, 0.1248], + [0.1250, 0.1248, 0.1252, 0.1250, 0.1250, 0.1250, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1249, 0.1250, 0.1249, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5002, 0.4998], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3331, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2504, 0.2498, 0.2497, 0.2502], device='cuda:0', + grad_fn=) +tensor([0.2002, 0.1997, 0.1998, 0.2000, 0.2003], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 2/25 Step 000/002 Loss 2.273 Prec@(1,5) (18.8%, 60.9%) +Train: Layer 1/3 Epoch 2/25 Step 001/002 Loss 2.270 Prec@(1,5) (18.0%, 55.5%) +Train: Layer 1/3 Epoch 2/25 Final Prec@1 17.9688% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('max_pool_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_3x3', 4), ('avg_pool_3x3', 1)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('dil_conv_5x5', 3), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_3x3', 2), ('sep_conv_5x5', 1)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('dil_conv_5x5', 4), ('avg_pool_3x3', 0)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('max_pool_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_3x3', 4), ('avg_pool_3x3', 1)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('dil_conv_5x5', 3), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_3x3', 2), ('sep_conv_5x5', 1)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('dil_conv_5x5', 4), ('avg_pool_3x3', 0)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +####### ALPHA ####### +# Alpha - normal +tensor([[0.1248, 0.1249, 0.1255, 0.1250, 0.1249, 0.1249, 0.1250, 0.1251], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1248, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1251, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248], + [0.1249, 0.1249, 0.1251, 0.1251, 0.1249, 0.1252, 0.1247, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1252, 0.1249, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1246, 0.1248, 0.1251, 0.1250, 0.1251, 0.1251, 0.1251, 0.1252], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1249, 0.1251, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1252, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1249, 0.1250, 0.1252, 0.1249, 0.1252, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1249, 0.1248, 0.1251, 0.1251, 0.1249, 0.1251], + [0.1249, 0.1249, 0.1251, 0.1249, 0.1251, 0.1251, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1253, 0.1251, 0.1249], + [0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1249, 0.1250, 0.1250, 0.1252, 0.1251, 0.1251], + [0.1250, 0.1249, 0.1248, 0.1249, 0.1251, 0.1254, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1248, 0.1252, 0.1251, 0.1251, 0.1251], + [0.1251, 0.1250, 0.1247, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1248, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1250, 0.1251], + [0.1248, 0.1249, 0.1250, 0.1250, 0.1252, 0.1252, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1252, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1248], + [0.1249, 0.1252, 0.1251, 0.1251, 0.1248, 0.1251, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1249, 0.1249, 0.1250, 0.1251, 0.1249, 0.1252], + [0.1251, 0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1251, 0.1248], + [0.1251, 0.1252, 0.1248, 0.1248, 0.1247, 0.1252, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5003, 0.4997], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3329, 0.3337], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2503, 0.2498, 0.2498], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1999, 0.1998, 0.2002, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4999, 0.5001], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3328, 0.3337], device='cuda:0', grad_fn=) +tensor([0.2502, 0.2499, 0.2502, 0.2498], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2001, 0.2001, 0.1997, 0.2002], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1251, 0.1249, 0.1253, 0.1252, 0.1250, 0.1248, 0.1249], + [0.1249, 0.1252, 0.1250, 0.1249, 0.1248, 0.1250, 0.1251, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1247, 0.1250, 0.1250, 0.1254, 0.1252, 0.1246, 0.1249, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1252, 0.1248, 0.1248, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1252, 0.1251, 0.1249, 0.1250, 0.1248, 0.1251, 0.1249], + [0.1251, 0.1253, 0.1252, 0.1249, 0.1249, 0.1248, 0.1249, 0.1248], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1247, 0.1251, 0.1250, 0.1251], + [0.1251, 0.1253, 0.1249, 0.1249, 0.1251, 0.1249, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1250, 0.1247, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1247, 0.1251, 0.1249, 0.1250, 0.1252], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1252], + [0.1249, 0.1249, 0.1249, 0.1253, 0.1251, 0.1251, 0.1249, 0.1248], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1250, 0.1249, 0.1251, 0.1251, 0.1250, 0.1249, 0.1251], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1249, 0.1250, 0.1247, 0.1252, 0.1252, 0.1249, 0.1250], + [0.1249, 0.1252, 0.1249, 0.1249, 0.1252, 0.1248, 0.1252, 0.1249], + [0.1249, 0.1251, 0.1249, 0.1248, 0.1252, 0.1249, 0.1250, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1251, 0.1249, 0.1249, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1250, 0.1250, 0.1251, 0.1248, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1249, 0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1251], + [0.1249, 0.1252, 0.1248, 0.1252, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1249, 0.1251, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1250, 0.1249, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1250, 0.1250, 0.1251, 0.1253, 0.1251, 0.1247, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1249, 0.1249, 0.1250, 0.1252, 0.1252], + [0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1254, 0.1249]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5002, 0.4998], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3338, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2495, 0.2501, 0.2499, 0.2506], device='cuda:0', + grad_fn=) +tensor([0.1998, 0.2002, 0.1999, 0.2002, 0.1999], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4998, 0.5002], device='cuda:0', grad_fn=) +tensor([0.3332, 0.3335, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2499, 0.2499, 0.2498, 0.2504], device='cuda:0', + grad_fn=) +tensor([0.1998, 0.2001, 0.2003, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1251, 0.1250, 0.1253, 0.1248, 0.1250, 0.1249, 0.1251], + [0.1247, 0.1248, 0.1248, 0.1252, 0.1251, 0.1251, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1249, 0.1249, 0.1251, 0.1249, 0.1251, 0.1251, 0.1251], + [0.1249, 0.1249, 0.1249, 0.1251, 0.1252, 0.1249, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1249, 0.1253, 0.1249, 0.1248, 0.1250, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1248, 0.1252, 0.1252, 0.1249, 0.1248, 0.1252], + [0.1250, 0.1249, 0.1250, 0.1251, 0.1251, 0.1250, 0.1248, 0.1252], + [0.1248, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1249, 0.1252], + [0.1246, 0.1249, 0.1250, 0.1250, 0.1251, 0.1252, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1252, 0.1251, 0.1249, 0.1252, 0.1248, 0.1250, 0.1248], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1249, 0.1250, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1251, 0.1249, 0.1251, 0.1251, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1249, 0.1248, 0.1252, 0.1251], + [0.1251, 0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2496, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2003, 0.1997, 0.1998, 0.1998, 0.2004], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +10/22 12:48:58 AM | +10/22 12:48:58 AM | Parameters: +10/22 12:48:58 AM | ALPHA_LR=0.0003 +10/22 12:48:58 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:48:58 AM | AUX_WEIGHT=0.4 +10/22 12:48:58 AM | BATCH_SIZE=64 +10/22 12:48:58 AM | CELLS_NUM=3 +10/22 12:48:58 AM | CLEAN_ARCH=True +10/22 12:48:58 AM | CUTOUT_LENGTH=16 +10/22 12:48:58 AM | DATA_DIR=./cifar +10/22 12:48:58 AM | DATA_PATH=./data/ +10/22 12:48:58 AM | DATASET=cifar10 +10/22 12:48:58 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:48:58 AM | DISTRIBUTED=True +10/22 12:48:58 AM | DROP_PATH_PROB=0.2 +10/22 12:48:58 AM | ENSEMBLE=True +10/22 12:48:58 AM | GPUS=[0] +10/22 12:48:58 AM | INIT_CHANNELS=16 +10/22 12:48:58 AM | INPUT_CHANNELS=3 +10/22 12:48:58 AM | LAYER_NUM=3 +10/22 12:48:58 AM | LOCAL_RANK=0 +10/22 12:48:58 AM | LR_RATIO=0.5 +10/22 12:48:58 AM | MODEL_TYPE=cifar +10/22 12:48:58 AM | N_CLASSES=10 +10/22 12:48:58 AM | NAME=cifar10-search +10/22 12:48:58 AM | NO_REPEAT=False +10/22 12:48:58 AM | PATH=searchs/cifar10-search +10/22 12:48:58 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:48:58 AM | PRETRAIN_DECAY=0 +10/22 12:48:58 AM | PRETRAIN_EPOCHS=0 +10/22 12:48:58 AM | PRINT_FREQ=10 +10/22 12:48:58 AM | RETRAIN_EPOCHS=1 +10/22 12:48:58 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:48:58 AM | RETRAIN_SETTING=0 +10/22 12:48:58 AM | RETRAIN_UPDATE_W=True +10/22 12:48:58 AM | SAME_STRUCTURE=True +10/22 12:48:58 AM | SAMPLE_RATIO=0.2 +10/22 12:48:58 AM | SEARCH_ITER=25 +10/22 12:48:58 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:48:58 AM | SEED=0 +10/22 12:48:58 AM | SHORT_CONNECT=False +10/22 12:48:58 AM | SYNC_PARAM=True +10/22 12:48:58 AM | TEACHER2STUDENT=True +10/22 12:48:58 AM | TEST_DIR=/data/imagenet/val +10/22 12:48:58 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:48:58 AM | TRAIN_PORTION=0.5 +10/22 12:48:58 AM | UNROLLED=False +10/22 12:48:58 AM | USE_BETA=True +10/22 12:48:58 AM | VAL_DIR=/data/imagenet/train +10/22 12:48:58 AM | W_GRAD_CLIP=5.0 +10/22 12:48:58 AM | W_LR=0.05 +10/22 12:48:58 AM | W_LR_MIN=0.001 +10/22 12:48:58 AM | W_MOMENTUM=0.9 +10/22 12:48:58 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:48:58 AM | WORKERS=1 +10/22 12:48:58 AM | WORLD_SIZE=1 +10/22 12:48:58 AM | +10/22 12:48:58 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +10/22 12:49:16 AM | +10/22 12:49:16 AM | Parameters: +10/22 12:49:16 AM | ALPHA_LR=0.0003 +10/22 12:49:16 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:49:16 AM | AUX_WEIGHT=0.4 +10/22 12:49:16 AM | BATCH_SIZE=64 +10/22 12:49:16 AM | CELLS_NUM=3 +10/22 12:49:16 AM | CLEAN_ARCH=True +10/22 12:49:16 AM | CUTOUT_LENGTH=16 +10/22 12:49:16 AM | DATA_DIR=./cifar +10/22 12:49:16 AM | DATA_PATH=./data/ +10/22 12:49:16 AM | DATASET=cifar10 +10/22 12:49:16 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:49:16 AM | DISTRIBUTED=True +10/22 12:49:16 AM | DROP_PATH_PROB=0.2 +10/22 12:49:16 AM | ENSEMBLE=True +10/22 12:49:16 AM | GPUS=[0] +10/22 12:49:16 AM | INIT_CHANNELS=16 +10/22 12:49:16 AM | INPUT_CHANNELS=3 +10/22 12:49:16 AM | LAYER_NUM=3 +10/22 12:49:16 AM | LOCAL_RANK=0 +10/22 12:49:16 AM | LR_RATIO=0.5 +10/22 12:49:16 AM | MODEL_TYPE=cifar +10/22 12:49:16 AM | N_CLASSES=10 +10/22 12:49:16 AM | NAME=cifar10-search +10/22 12:49:16 AM | NO_REPEAT=False +10/22 12:49:16 AM | PATH=searchs/cifar10-search +10/22 12:49:16 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:49:16 AM | PRETRAIN_DECAY=0 +10/22 12:49:16 AM | PRETRAIN_EPOCHS=0 +10/22 12:49:16 AM | PRINT_FREQ=10 +10/22 12:49:16 AM | RETRAIN_EPOCHS=1 +10/22 12:49:16 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:49:16 AM | RETRAIN_SETTING=0 +10/22 12:49:16 AM | RETRAIN_UPDATE_W=True +10/22 12:49:16 AM | SAME_STRUCTURE=True +10/22 12:49:16 AM | SAMPLE_RATIO=0.2 +10/22 12:49:16 AM | SEARCH_ITER=25 +10/22 12:49:16 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:49:16 AM | SEED=0 +10/22 12:49:16 AM | SHORT_CONNECT=False +10/22 12:49:16 AM | SYNC_PARAM=True +10/22 12:49:16 AM | TEACHER2STUDENT=True +10/22 12:49:16 AM | TEST_DIR=/data/imagenet/val +10/22 12:49:16 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:49:16 AM | TRAIN_PORTION=0.5 +10/22 12:49:16 AM | UNROLLED=False +10/22 12:49:16 AM | USE_BETA=True +10/22 12:49:16 AM | VAL_DIR=/data/imagenet/train +10/22 12:49:16 AM | W_GRAD_CLIP=5.0 +10/22 12:49:16 AM | W_LR=0.05 +10/22 12:49:16 AM | W_LR_MIN=0.001 +10/22 12:49:16 AM | W_MOMENTUM=0.9 +10/22 12:49:16 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:49:16 AM | WORKERS=1 +10/22 12:49:16 AM | WORLD_SIZE=1 +10/22 12:49:16 AM | +10/22 12:49:16 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +10/22 12:49:46 AM | +10/22 12:49:46 AM | Parameters: +10/22 12:49:46 AM | ALPHA_LR=0.0003 +10/22 12:49:46 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:49:46 AM | AUX_WEIGHT=0.4 +10/22 12:49:46 AM | BATCH_SIZE=64 +10/22 12:49:46 AM | CELLS_NUM=3 +10/22 12:49:46 AM | CLEAN_ARCH=True +10/22 12:49:46 AM | CUTOUT_LENGTH=16 +10/22 12:49:46 AM | DATA_DIR=./cifar +10/22 12:49:46 AM | DATA_PATH=./data/ +10/22 12:49:46 AM | DATASET=cifar10 +10/22 12:49:46 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:49:46 AM | DISTRIBUTED=True +10/22 12:49:46 AM | DROP_PATH_PROB=0.2 +10/22 12:49:46 AM | ENSEMBLE=True +10/22 12:49:46 AM | GPUS=[0] +10/22 12:49:46 AM | INIT_CHANNELS=16 +10/22 12:49:46 AM | INPUT_CHANNELS=3 +10/22 12:49:46 AM | LAYER_NUM=3 +10/22 12:49:46 AM | LOCAL_RANK=0 +10/22 12:49:46 AM | LR_RATIO=0.5 +10/22 12:49:46 AM | MODEL_TYPE=cifar +10/22 12:49:46 AM | N_CLASSES=10 +10/22 12:49:46 AM | NAME=cifar10-search +10/22 12:49:46 AM | NO_REPEAT=False +10/22 12:49:46 AM | PATH=searchs/cifar10-search +10/22 12:49:46 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:49:46 AM | PRETRAIN_DECAY=0 +10/22 12:49:46 AM | PRETRAIN_EPOCHS=0 +10/22 12:49:46 AM | PRINT_FREQ=10 +10/22 12:49:46 AM | RETRAIN_EPOCHS=1 +10/22 12:49:46 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:49:46 AM | RETRAIN_SETTING=0 +10/22 12:49:46 AM | RETRAIN_UPDATE_W=True +10/22 12:49:46 AM | SAME_STRUCTURE=True +10/22 12:49:46 AM | SAMPLE_RATIO=0.2 +10/22 12:49:46 AM | SEARCH_ITER=25 +10/22 12:49:46 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:49:46 AM | SEED=0 +10/22 12:49:46 AM | SHORT_CONNECT=False +10/22 12:49:46 AM | SYNC_PARAM=True +10/22 12:49:46 AM | TEACHER2STUDENT=True +10/22 12:49:46 AM | TEST_DIR=/data/imagenet/val +10/22 12:49:46 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:49:46 AM | TRAIN_PORTION=0.5 +10/22 12:49:46 AM | UNROLLED=False +10/22 12:49:46 AM | USE_BETA=True +10/22 12:49:46 AM | VAL_DIR=/data/imagenet/train +10/22 12:49:46 AM | W_GRAD_CLIP=5.0 +10/22 12:49:46 AM | W_LR=0.05 +10/22 12:49:46 AM | W_LR_MIN=0.001 +10/22 12:49:46 AM | W_MOMENTUM=0.9 +10/22 12:49:46 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:49:46 AM | WORKERS=1 +10/22 12:49:46 AM | WORLD_SIZE=1 +10/22 12:49:46 AM | +10/22 12:49:46 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +10/22 12:51:31 AM | +10/22 12:51:31 AM | Parameters: +10/22 12:51:31 AM | ALPHA_LR=0.0003 +10/22 12:51:31 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:51:31 AM | AUX_WEIGHT=0.4 +10/22 12:51:31 AM | BATCH_SIZE=64 +10/22 12:51:31 AM | CELLS_NUM=3 +10/22 12:51:31 AM | CLEAN_ARCH=True +10/22 12:51:31 AM | CUTOUT_LENGTH=16 +10/22 12:51:31 AM | DATA_DIR=./cifar +10/22 12:51:31 AM | DATA_PATH=./data/ +10/22 12:51:31 AM | DATASET=cifar10 +10/22 12:51:31 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:51:31 AM | DISTRIBUTED=True +10/22 12:51:31 AM | DROP_PATH_PROB=0.2 +10/22 12:51:31 AM | ENSEMBLE=True +10/22 12:51:31 AM | GPUS=[0] +10/22 12:51:31 AM | INIT_CHANNELS=16 +10/22 12:51:31 AM | INPUT_CHANNELS=3 +10/22 12:51:31 AM | LAYER_NUM=3 +10/22 12:51:31 AM | LOCAL_RANK=0 +10/22 12:51:31 AM | LR_RATIO=0.5 +10/22 12:51:31 AM | MODEL_TYPE=cifar +10/22 12:51:31 AM | N_CLASSES=10 +10/22 12:51:31 AM | NAME=cifar10-search +10/22 12:51:31 AM | NO_REPEAT=False +10/22 12:51:31 AM | PATH=searchs/cifar10-search +10/22 12:51:31 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:51:31 AM | PRETRAIN_DECAY=0 +10/22 12:51:31 AM | PRETRAIN_EPOCHS=0 +10/22 12:51:31 AM | PRINT_FREQ=10 +10/22 12:51:31 AM | RETRAIN_EPOCHS=1 +10/22 12:51:31 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:51:31 AM | RETRAIN_SETTING=0 +10/22 12:51:31 AM | RETRAIN_UPDATE_W=True +10/22 12:51:31 AM | SAME_STRUCTURE=True +10/22 12:51:31 AM | SAMPLE_RATIO=0.2 +10/22 12:51:31 AM | SEARCH_ITER=25 +10/22 12:51:31 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:51:31 AM | SEED=0 +10/22 12:51:31 AM | SHORT_CONNECT=False +10/22 12:51:31 AM | SYNC_PARAM=True +10/22 12:51:31 AM | TEACHER2STUDENT=True +10/22 12:51:31 AM | TEST_DIR=/data/imagenet/val +10/22 12:51:31 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:51:31 AM | TRAIN_PORTION=0.5 +10/22 12:51:31 AM | UNROLLED=False +10/22 12:51:31 AM | USE_BETA=True +10/22 12:51:31 AM | VAL_DIR=/data/imagenet/train +10/22 12:51:31 AM | W_GRAD_CLIP=5.0 +10/22 12:51:31 AM | W_LR=0.05 +10/22 12:51:31 AM | W_LR_MIN=0.001 +10/22 12:51:31 AM | W_MOMENTUM=0.9 +10/22 12:51:31 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:51:31 AM | WORKERS=1 +10/22 12:51:31 AM | WORLD_SIZE=1 +10/22 12:51:31 AM | +10/22 12:51:31 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +10/22 12:52:27 AM | +10/22 12:52:27 AM | Parameters: +10/22 12:52:27 AM | ALPHA_LR=0.0003 +10/22 12:52:27 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:52:27 AM | AUX_WEIGHT=0.4 +10/22 12:52:27 AM | BATCH_SIZE=64 +10/22 12:52:27 AM | CELLS_NUM=3 +10/22 12:52:27 AM | CLEAN_ARCH=True +10/22 12:52:27 AM | CUTOUT_LENGTH=16 +10/22 12:52:27 AM | DATA_DIR=./cifar +10/22 12:52:27 AM | DATA_PATH=./data/ +10/22 12:52:27 AM | DATASET=cifar10 +10/22 12:52:27 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:52:27 AM | DISTRIBUTED=True +10/22 12:52:27 AM | DROP_PATH_PROB=0.2 +10/22 12:52:27 AM | ENSEMBLE=True +10/22 12:52:27 AM | GPUS=[0] +10/22 12:52:27 AM | INIT_CHANNELS=16 +10/22 12:52:27 AM | INPUT_CHANNELS=3 +10/22 12:52:27 AM | LAYER_NUM=3 +10/22 12:52:27 AM | LOCAL_RANK=0 +10/22 12:52:27 AM | LR_RATIO=0.5 +10/22 12:52:27 AM | MODEL_TYPE=cifar +10/22 12:52:27 AM | N_CLASSES=10 +10/22 12:52:27 AM | NAME=cifar10-search +10/22 12:52:27 AM | NO_REPEAT=False +10/22 12:52:27 AM | PATH=searchs/cifar10-search +10/22 12:52:27 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:52:27 AM | PRETRAIN_DECAY=0 +10/22 12:52:27 AM | PRETRAIN_EPOCHS=0 +10/22 12:52:27 AM | PRINT_FREQ=10 +10/22 12:52:27 AM | RETRAIN_EPOCHS=1 +10/22 12:52:27 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:52:27 AM | RETRAIN_SETTING=0 +10/22 12:52:27 AM | RETRAIN_UPDATE_W=True +10/22 12:52:27 AM | SAME_STRUCTURE=True +10/22 12:52:27 AM | SAMPLE_RATIO=0.2 +10/22 12:52:27 AM | SEARCH_ITER=25 +10/22 12:52:27 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:52:27 AM | SEED=0 +10/22 12:52:27 AM | SHORT_CONNECT=False +10/22 12:52:27 AM | SYNC_PARAM=True +10/22 12:52:27 AM | TEACHER2STUDENT=True +10/22 12:52:27 AM | TEST_DIR=/data/imagenet/val +10/22 12:52:27 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:52:27 AM | TRAIN_PORTION=0.5 +10/22 12:52:27 AM | UNROLLED=False +10/22 12:52:27 AM | USE_BETA=True +10/22 12:52:27 AM | VAL_DIR=/data/imagenet/train +10/22 12:52:27 AM | W_GRAD_CLIP=5.0 +10/22 12:52:27 AM | W_LR=0.05 +10/22 12:52:27 AM | W_LR_MIN=0.001 +10/22 12:52:27 AM | W_MOMENTUM=0.9 +10/22 12:52:27 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:52:27 AM | WORKERS=1 +10/22 12:52:27 AM | WORLD_SIZE=1 +10/22 12:52:27 AM | +10/22 12:52:27 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +10/22 12:54:00 AM | +10/22 12:54:00 AM | Parameters: +10/22 12:54:00 AM | ALPHA_LR=0.0003 +10/22 12:54:00 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:54:00 AM | AUX_WEIGHT=0.4 +10/22 12:54:00 AM | BATCH_SIZE=64 +10/22 12:54:00 AM | CELLS_NUM=3 +10/22 12:54:00 AM | CLEAN_ARCH=True +10/22 12:54:00 AM | CUTOUT_LENGTH=16 +10/22 12:54:00 AM | DATA_DIR=./cifar +10/22 12:54:00 AM | DATA_PATH=./data/ +10/22 12:54:00 AM | DATASET=cifar10 +10/22 12:54:00 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:54:00 AM | DISTRIBUTED=True +10/22 12:54:00 AM | DROP_PATH_PROB=0.2 +10/22 12:54:00 AM | ENSEMBLE=True +10/22 12:54:00 AM | GPUS=[0] +10/22 12:54:00 AM | INIT_CHANNELS=16 +10/22 12:54:00 AM | INPUT_CHANNELS=3 +10/22 12:54:00 AM | LAYER_NUM=3 +10/22 12:54:00 AM | LOCAL_RANK=0 +10/22 12:54:00 AM | LR_RATIO=0.5 +10/22 12:54:00 AM | MODEL_TYPE=cifar +10/22 12:54:00 AM | N_CLASSES=10 +10/22 12:54:00 AM | NAME=cifar10-search +10/22 12:54:00 AM | NO_REPEAT=False +10/22 12:54:00 AM | PATH=searchs/cifar10-search +10/22 12:54:00 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:54:00 AM | PRETRAIN_DECAY=0 +10/22 12:54:00 AM | PRETRAIN_EPOCHS=0 +10/22 12:54:00 AM | PRINT_FREQ=10 +10/22 12:54:00 AM | RETRAIN_EPOCHS=1 +10/22 12:54:00 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:54:00 AM | RETRAIN_SETTING=0 +10/22 12:54:00 AM | RETRAIN_UPDATE_W=True +10/22 12:54:00 AM | SAME_STRUCTURE=True +10/22 12:54:00 AM | SAMPLE_RATIO=0.2 +10/22 12:54:00 AM | SEARCH_ITER=25 +10/22 12:54:00 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:54:00 AM | SEED=0 +10/22 12:54:00 AM | SHORT_CONNECT=False +10/22 12:54:00 AM | SYNC_PARAM=True +10/22 12:54:00 AM | TEACHER2STUDENT=True +10/22 12:54:00 AM | TEST_DIR=/data/imagenet/val +10/22 12:54:00 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:54:00 AM | TRAIN_PORTION=0.5 +10/22 12:54:00 AM | UNROLLED=False +10/22 12:54:00 AM | USE_BETA=True +10/22 12:54:00 AM | VAL_DIR=/data/imagenet/train +10/22 12:54:00 AM | W_GRAD_CLIP=5.0 +10/22 12:54:00 AM | W_LR=0.05 +10/22 12:54:00 AM | W_LR_MIN=0.001 +10/22 12:54:00 AM | W_MOMENTUM=0.9 +10/22 12:54:00 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:54:00 AM | WORKERS=1 +10/22 12:54:00 AM | WORLD_SIZE=1 +10/22 12:54:00 AM | +10/22 12:54:00 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +10/22 12:55:34 AM | +10/22 12:55:34 AM | Parameters: +10/22 12:55:34 AM | ALPHA_LR=0.0003 +10/22 12:55:34 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:55:34 AM | AUX_WEIGHT=0.4 +10/22 12:55:34 AM | BATCH_SIZE=64 +10/22 12:55:34 AM | CELLS_NUM=3 +10/22 12:55:34 AM | CLEAN_ARCH=True +10/22 12:55:34 AM | CUTOUT_LENGTH=16 +10/22 12:55:34 AM | DATA_DIR=./cifar +10/22 12:55:34 AM | DATA_PATH=./data/ +10/22 12:55:34 AM | DATASET=cifar10 +10/22 12:55:34 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:55:34 AM | DISTRIBUTED=True +10/22 12:55:34 AM | DROP_PATH_PROB=0.2 +10/22 12:55:34 AM | ENSEMBLE=True +10/22 12:55:34 AM | GPUS=[0] +10/22 12:55:34 AM | INIT_CHANNELS=16 +10/22 12:55:34 AM | INPUT_CHANNELS=3 +10/22 12:55:34 AM | LAYER_NUM=3 +10/22 12:55:34 AM | LOCAL_RANK=0 +10/22 12:55:34 AM | LR_RATIO=0.5 +10/22 12:55:34 AM | MODEL_TYPE=cifar +10/22 12:55:34 AM | N_CLASSES=10 +10/22 12:55:34 AM | NAME=cifar10-search +10/22 12:55:34 AM | NO_REPEAT=False +10/22 12:55:34 AM | PATH=searchs/cifar10-search +10/22 12:55:34 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:55:34 AM | PRETRAIN_DECAY=0 +10/22 12:55:34 AM | PRETRAIN_EPOCHS=0 +10/22 12:55:34 AM | PRINT_FREQ=10 +10/22 12:55:34 AM | RETRAIN_EPOCHS=1 +10/22 12:55:34 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:55:34 AM | RETRAIN_SETTING=0 +10/22 12:55:34 AM | RETRAIN_UPDATE_W=True +10/22 12:55:34 AM | SAME_STRUCTURE=True +10/22 12:55:34 AM | SAMPLE_RATIO=0.2 +10/22 12:55:34 AM | SEARCH_ITER=25 +10/22 12:55:34 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:55:34 AM | SEED=0 +10/22 12:55:34 AM | SHORT_CONNECT=False +10/22 12:55:34 AM | SYNC_PARAM=True +10/22 12:55:34 AM | TEACHER2STUDENT=True +10/22 12:55:34 AM | TEST_DIR=/data/imagenet/val +10/22 12:55:34 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:55:34 AM | TRAIN_MAIN_FIRST=False +10/22 12:55:34 AM | TRAIN_PORTION=0.5 +10/22 12:55:34 AM | UNROLLED=False +10/22 12:55:34 AM | USE_BETA=True +10/22 12:55:34 AM | VAL_DIR=/data/imagenet/train +10/22 12:55:34 AM | W_GRAD_CLIP=5.0 +10/22 12:55:34 AM | W_LR=0.05 +10/22 12:55:34 AM | W_LR_MIN=0.001 +10/22 12:55:34 AM | W_MOMENTUM=0.9 +10/22 12:55:34 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:55:34 AM | WORKERS=1 +10/22 12:55:34 AM | WORLD_SIZE=1 +10/22 12:55:34 AM | +10/22 12:55:34 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +10/22 12:57:28 AM | +10/22 12:57:28 AM | Parameters: +10/22 12:57:28 AM | ALPHA_LR=0.0003 +10/22 12:57:28 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:57:28 AM | AUX_WEIGHT=0.4 +10/22 12:57:28 AM | BATCH_SIZE=64 +10/22 12:57:28 AM | CELLS_NUM=3 +10/22 12:57:28 AM | CLEAN_ARCH=True +10/22 12:57:28 AM | CUTOUT_LENGTH=16 +10/22 12:57:28 AM | DATA_DIR=./cifar +10/22 12:57:28 AM | DATA_PATH=./data/ +10/22 12:57:28 AM | DATASET=cifar10 +10/22 12:57:28 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:57:28 AM | DISTRIBUTED=True +10/22 12:57:28 AM | DROP_PATH_PROB=0.2 +10/22 12:57:28 AM | ENSEMBLE=True +10/22 12:57:28 AM | GPUS=[0] +10/22 12:57:28 AM | INIT_CHANNELS=16 +10/22 12:57:28 AM | INPUT_CHANNELS=3 +10/22 12:57:28 AM | LAYER_NUM=3 +10/22 12:57:28 AM | LOCAL_RANK=0 +10/22 12:57:28 AM | LR_RATIO=0.5 +10/22 12:57:28 AM | MODEL_TYPE=cifar +10/22 12:57:28 AM | N_CLASSES=10 +10/22 12:57:28 AM | NAME=cifar10-search +10/22 12:57:28 AM | NO_REPEAT=False +10/22 12:57:28 AM | PATH=searchs/cifar10-search +10/22 12:57:28 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:57:28 AM | PRETRAIN_DECAY=0 +10/22 12:57:28 AM | PRETRAIN_EPOCHS=0 +10/22 12:57:28 AM | PRINT_FREQ=10 +10/22 12:57:28 AM | RETRAIN_EPOCHS=1 +10/22 12:57:28 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:57:28 AM | RETRAIN_SETTING=0 +10/22 12:57:28 AM | RETRAIN_UPDATE_W=True +10/22 12:57:28 AM | SAME_STRUCTURE=True +10/22 12:57:28 AM | SAMPLE_RATIO=0.2 +10/22 12:57:28 AM | SEARCH_ITER=25 +10/22 12:57:28 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:57:28 AM | SEED=0 +10/22 12:57:28 AM | SHORT_CONNECT=False +10/22 12:57:28 AM | SYNC_PARAM=True +10/22 12:57:28 AM | TEACHER2STUDENT=True +10/22 12:57:28 AM | TEST_DIR=/data/imagenet/val +10/22 12:57:28 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:57:28 AM | TRAIN_MAIN_FIRST=False +10/22 12:57:28 AM | TRAIN_PORTION=0.5 +10/22 12:57:28 AM | UNROLLED=False +10/22 12:57:28 AM | USE_BETA=True +10/22 12:57:28 AM | VAL_DIR=/data/imagenet/train +10/22 12:57:28 AM | W_GRAD_CLIP=5.0 +10/22 12:57:28 AM | W_LR=0.05 +10/22 12:57:28 AM | W_LR_MIN=0.001 +10/22 12:57:28 AM | W_MOMENTUM=0.9 +10/22 12:57:28 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:57:28 AM | WORKERS=1 +10/22 12:57:28 AM | WORLD_SIZE=1 +10/22 12:57:28 AM | +10/22 12:57:28 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +10/22 12:58:12 AM | +10/22 12:58:12 AM | Parameters: +10/22 12:58:12 AM | ALPHA_LR=0.0003 +10/22 12:58:12 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:58:12 AM | AUX_WEIGHT=0.4 +10/22 12:58:12 AM | BATCH_SIZE=64 +10/22 12:58:12 AM | CELLS_NUM=3 +10/22 12:58:12 AM | CLEAN_ARCH=True +10/22 12:58:12 AM | CUTOUT_LENGTH=16 +10/22 12:58:12 AM | DATA_DIR=./cifar +10/22 12:58:12 AM | DATA_PATH=./data/ +10/22 12:58:12 AM | DATASET=cifar10 +10/22 12:58:12 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:58:12 AM | DISTRIBUTED=True +10/22 12:58:12 AM | DROP_PATH_PROB=0.2 +10/22 12:58:12 AM | ENSEMBLE=True +10/22 12:58:12 AM | GPUS=[0] +10/22 12:58:12 AM | INIT_CHANNELS=16 +10/22 12:58:12 AM | INPUT_CHANNELS=3 +10/22 12:58:12 AM | LAYER_NUM=3 +10/22 12:58:12 AM | LOCAL_RANK=0 +10/22 12:58:12 AM | LR_RATIO=0.5 +10/22 12:58:12 AM | MODEL_TYPE=cifar +10/22 12:58:12 AM | N_CLASSES=10 +10/22 12:58:12 AM | NAME=cifar10-search +10/22 12:58:12 AM | NO_REPEAT=False +10/22 12:58:12 AM | PATH=searchs/cifar10-search +10/22 12:58:12 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:58:12 AM | PRETRAIN_DECAY=0 +10/22 12:58:12 AM | PRETRAIN_EPOCHS=0 +10/22 12:58:12 AM | PRINT_FREQ=10 +10/22 12:58:12 AM | RETRAIN_EPOCHS=1 +10/22 12:58:12 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:58:12 AM | RETRAIN_SETTING=0 +10/22 12:58:12 AM | RETRAIN_UPDATE_W=True +10/22 12:58:12 AM | SAME_STRUCTURE=True +10/22 12:58:12 AM | SAMPLE_RATIO=0.2 +10/22 12:58:12 AM | SEARCH_ITER=25 +10/22 12:58:12 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:58:12 AM | SEED=0 +10/22 12:58:12 AM | SHORT_CONNECT=False +10/22 12:58:12 AM | SYNC_PARAM=True +10/22 12:58:12 AM | TEACHER2STUDENT=True +10/22 12:58:12 AM | TEST_DIR=/data/imagenet/val +10/22 12:58:12 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:58:12 AM | TRAIN_MAIN_FIRST=False +10/22 12:58:12 AM | TRAIN_PORTION=0.5 +10/22 12:58:12 AM | UNROLLED=False +10/22 12:58:12 AM | USE_BETA=True +10/22 12:58:12 AM | VAL_DIR=/data/imagenet/train +10/22 12:58:12 AM | W_GRAD_CLIP=5.0 +10/22 12:58:12 AM | W_LR=0.05 +10/22 12:58:12 AM | W_LR_MIN=0.001 +10/22 12:58:12 AM | W_MOMENTUM=0.9 +10/22 12:58:12 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:58:12 AM | WORKERS=1 +10/22 12:58:12 AM | WORLD_SIZE=1 +10/22 12:58:12 AM | +10/22 12:58:12 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +10/22 12:59:06 AM | +10/22 12:59:06 AM | Parameters: +10/22 12:59:06 AM | ALPHA_LR=0.0003 +10/22 12:59:06 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 12:59:06 AM | AUX_WEIGHT=0.4 +10/22 12:59:06 AM | BATCH_SIZE=64 +10/22 12:59:06 AM | CELLS_NUM=3 +10/22 12:59:06 AM | CLEAN_ARCH=True +10/22 12:59:06 AM | CUTOUT_LENGTH=16 +10/22 12:59:06 AM | DATA_DIR=./cifar +10/22 12:59:06 AM | DATA_PATH=./data/ +10/22 12:59:06 AM | DATASET=cifar10 +10/22 12:59:06 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 12:59:06 AM | DISTRIBUTED=True +10/22 12:59:06 AM | DROP_PATH_PROB=0.2 +10/22 12:59:06 AM | ENSEMBLE=True +10/22 12:59:06 AM | GPUS=[0] +10/22 12:59:06 AM | INIT_CHANNELS=16 +10/22 12:59:06 AM | INPUT_CHANNELS=3 +10/22 12:59:06 AM | LAYER_NUM=3 +10/22 12:59:06 AM | LOCAL_RANK=0 +10/22 12:59:06 AM | LR_RATIO=0.5 +10/22 12:59:06 AM | MODEL_TYPE=cifar +10/22 12:59:06 AM | N_CLASSES=10 +10/22 12:59:06 AM | NAME=cifar10-search +10/22 12:59:06 AM | NO_REPEAT=False +10/22 12:59:06 AM | PATH=searchs/cifar10-search +10/22 12:59:06 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 12:59:06 AM | PRETRAIN_DECAY=0 +10/22 12:59:06 AM | PRETRAIN_EPOCHS=0 +10/22 12:59:06 AM | PRINT_FREQ=10 +10/22 12:59:06 AM | RETRAIN_EPOCHS=1 +10/22 12:59:06 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 12:59:06 AM | RETRAIN_SETTING=0 +10/22 12:59:06 AM | RETRAIN_UPDATE_W=True +10/22 12:59:06 AM | SAME_STRUCTURE=True +10/22 12:59:06 AM | SAMPLE_RATIO=0.2 +10/22 12:59:06 AM | SEARCH_ITER=25 +10/22 12:59:06 AM | SEARCH_ITER_EPOCHS=1 +10/22 12:59:06 AM | SEED=0 +10/22 12:59:06 AM | SHORT_CONNECT=False +10/22 12:59:06 AM | SYNC_PARAM=True +10/22 12:59:06 AM | TEACHER2STUDENT=True +10/22 12:59:06 AM | TEST_DIR=/data/imagenet/val +10/22 12:59:06 AM | TRAIN_DIR=/data/imagenet/train +10/22 12:59:06 AM | TRAIN_MAIN_FIRST=False +10/22 12:59:06 AM | TRAIN_PORTION=0.5 +10/22 12:59:06 AM | UNROLLED=False +10/22 12:59:06 AM | USE_BETA=True +10/22 12:59:06 AM | VAL_DIR=/data/imagenet/train +10/22 12:59:06 AM | W_GRAD_CLIP=5.0 +10/22 12:59:06 AM | W_LR=0.05 +10/22 12:59:06 AM | W_LR_MIN=0.001 +10/22 12:59:06 AM | W_MOMENTUM=0.9 +10/22 12:59:06 AM | W_WEIGHT_DECAY=0.0003 +10/22 12:59:06 AM | WORKERS=1 +10/22 12:59:06 AM | WORLD_SIZE=1 +10/22 12:59:06 AM | +10/22 12:59:06 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +10/22 01:00:02 AM | +10/22 01:00:02 AM | Parameters: +10/22 01:00:02 AM | ALPHA_LR=0.0003 +10/22 01:00:02 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 01:00:02 AM | AUX_WEIGHT=0.4 +10/22 01:00:02 AM | BATCH_SIZE=64 +10/22 01:00:02 AM | CELLS_NUM=3 +10/22 01:00:02 AM | CLEAN_ARCH=True +10/22 01:00:02 AM | CUTOUT_LENGTH=16 +10/22 01:00:02 AM | DATA_DIR=./cifar +10/22 01:00:02 AM | DATA_PATH=./data/ +10/22 01:00:02 AM | DATASET=cifar10 +10/22 01:00:02 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 01:00:02 AM | DISTRIBUTED=True +10/22 01:00:02 AM | DROP_PATH_PROB=0.2 +10/22 01:00:02 AM | ENSEMBLE=True +10/22 01:00:02 AM | GPUS=[0] +10/22 01:00:02 AM | INIT_CHANNELS=16 +10/22 01:00:02 AM | INPUT_CHANNELS=3 +10/22 01:00:02 AM | LAYER_NUM=3 +10/22 01:00:02 AM | LOCAL_RANK=0 +10/22 01:00:02 AM | LR_RATIO=0.5 +10/22 01:00:02 AM | MODEL_TYPE=cifar +10/22 01:00:02 AM | N_CLASSES=10 +10/22 01:00:02 AM | NAME=cifar10-search +10/22 01:00:02 AM | NO_REPEAT=False +10/22 01:00:02 AM | PATH=searchs/cifar10-search +10/22 01:00:02 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 01:00:02 AM | PRETRAIN_DECAY=0 +10/22 01:00:02 AM | PRETRAIN_EPOCHS=0 +10/22 01:00:02 AM | PRINT_FREQ=10 +10/22 01:00:02 AM | RETRAIN_EPOCHS=1 +10/22 01:00:02 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 01:00:02 AM | RETRAIN_SETTING=0 +10/22 01:00:02 AM | RETRAIN_UPDATE_W=True +10/22 01:00:02 AM | SAME_STRUCTURE=True +10/22 01:00:02 AM | SAMPLE_RATIO=0.2 +10/22 01:00:02 AM | SEARCH_ITER=25 +10/22 01:00:02 AM | SEARCH_ITER_EPOCHS=1 +10/22 01:00:02 AM | SEED=0 +10/22 01:00:02 AM | SHORT_CONNECT=False +10/22 01:00:02 AM | SYNC_PARAM=True +10/22 01:00:02 AM | TEACHER2STUDENT=True +10/22 01:00:02 AM | TEST_DIR=/data/imagenet/val +10/22 01:00:02 AM | TRAIN_DIR=/data/imagenet/train +10/22 01:00:02 AM | TRAIN_MAIN_FIRST=False +10/22 01:00:02 AM | TRAIN_PORTION=0.5 +10/22 01:00:02 AM | UNROLLED=False +10/22 01:00:02 AM | USE_BETA=True +10/22 01:00:02 AM | VAL_DIR=/data/imagenet/train +10/22 01:00:02 AM | W_GRAD_CLIP=5.0 +10/22 01:00:02 AM | W_LR=0.05 +10/22 01:00:02 AM | W_LR_MIN=0.001 +10/22 01:00:02 AM | W_MOMENTUM=0.9 +10/22 01:00:02 AM | W_WEIGHT_DECAY=0.0003 +10/22 01:00:02 AM | WORKERS=1 +10/22 01:00:02 AM | WORLD_SIZE=1 +10/22 01:00:02 AM | +10/22 01:00:02 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%) +Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562% +10/22 01:01:30 AM | +10/22 01:01:30 AM | Parameters: +10/22 01:01:30 AM | ALPHA_LR=0.0003 +10/22 01:01:30 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 01:01:30 AM | AUX_WEIGHT=0.4 +10/22 01:01:30 AM | BATCH_SIZE=64 +10/22 01:01:30 AM | CELLS_NUM=3 +10/22 01:01:30 AM | CLEAN_ARCH=True +10/22 01:01:30 AM | CUTOUT_LENGTH=16 +10/22 01:01:30 AM | DATA_DIR=./cifar +10/22 01:01:30 AM | DATA_PATH=./data/ +10/22 01:01:30 AM | DATASET=cifar10 +10/22 01:01:30 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 01:01:30 AM | DISTRIBUTED=True +10/22 01:01:30 AM | DROP_PATH_PROB=0.2 +10/22 01:01:30 AM | ENSEMBLE=True +10/22 01:01:30 AM | GPUS=[0] +10/22 01:01:30 AM | INIT_CHANNELS=16 +10/22 01:01:30 AM | INPUT_CHANNELS=3 +10/22 01:01:30 AM | LAYER_NUM=3 +10/22 01:01:30 AM | LOCAL_RANK=0 +10/22 01:01:30 AM | LR_RATIO=0.5 +10/22 01:01:30 AM | MODEL_TYPE=cifar +10/22 01:01:30 AM | N_CLASSES=10 +10/22 01:01:30 AM | NAME=cifar10-search +10/22 01:01:30 AM | NO_REPEAT=False +10/22 01:01:30 AM | PATH=searchs/cifar10-search +10/22 01:01:30 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 01:01:30 AM | PRETRAIN_DECAY=0 +10/22 01:01:30 AM | PRETRAIN_EPOCHS=0 +10/22 01:01:30 AM | PRINT_FREQ=10 +10/22 01:01:30 AM | RETRAIN_EPOCHS=1 +10/22 01:01:30 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 01:01:30 AM | RETRAIN_SETTING=0 +10/22 01:01:30 AM | RETRAIN_UPDATE_W=True +10/22 01:01:30 AM | SAME_STRUCTURE=True +10/22 01:01:30 AM | SAMPLE_RATIO=0.2 +10/22 01:01:30 AM | SEARCH_ITER=25 +10/22 01:01:30 AM | SEARCH_ITER_EPOCHS=1 +10/22 01:01:30 AM | SEED=0 +10/22 01:01:30 AM | SHORT_CONNECT=False +10/22 01:01:30 AM | SYNC_PARAM=True +10/22 01:01:30 AM | TEACHER2STUDENT=True +10/22 01:01:30 AM | TEST_DIR=/data/imagenet/val +10/22 01:01:30 AM | TRAIN_DIR=/data/imagenet/train +10/22 01:01:30 AM | TRAIN_MAIN_FIRST=False +10/22 01:01:30 AM | TRAIN_PORTION=0.5 +10/22 01:01:30 AM | UNROLLED=False +10/22 01:01:30 AM | USE_BETA=True +10/22 01:01:30 AM | VAL_DIR=/data/imagenet/train +10/22 01:01:30 AM | W_GRAD_CLIP=5.0 +10/22 01:01:30 AM | W_LR=0.05 +10/22 01:01:30 AM | W_LR_MIN=0.001 +10/22 01:01:30 AM | W_MOMENTUM=0.9 +10/22 01:01:30 AM | W_WEIGHT_DECAY=0.0003 +10/22 01:01:30 AM | WORKERS=1 +10/22 01:01:30 AM | WORLD_SIZE=1 +10/22 01:01:30 AM | +10/22 01:01:30 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%) +Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562% +10/22 01:02:56 AM | +10/22 01:02:56 AM | Parameters: +10/22 01:02:56 AM | ALPHA_LR=0.0003 +10/22 01:02:56 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 01:02:56 AM | AUX_WEIGHT=0.4 +10/22 01:02:56 AM | BATCH_SIZE=64 +10/22 01:02:56 AM | CELLS_NUM=3 +10/22 01:02:56 AM | CLEAN_ARCH=True +10/22 01:02:56 AM | CUTOUT_LENGTH=16 +10/22 01:02:56 AM | DATA_DIR=./cifar +10/22 01:02:56 AM | DATA_PATH=./data/ +10/22 01:02:56 AM | DATASET=cifar10 +10/22 01:02:56 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 01:02:56 AM | DISTRIBUTED=True +10/22 01:02:56 AM | DROP_PATH_PROB=0.2 +10/22 01:02:56 AM | ENSEMBLE=True +10/22 01:02:56 AM | GPUS=[0] +10/22 01:02:56 AM | INIT_CHANNELS=16 +10/22 01:02:56 AM | INPUT_CHANNELS=3 +10/22 01:02:56 AM | LAYER_NUM=3 +10/22 01:02:56 AM | LOCAL_RANK=0 +10/22 01:02:56 AM | LR_RATIO=0.5 +10/22 01:02:56 AM | MODEL_TYPE=cifar +10/22 01:02:56 AM | N_CLASSES=10 +10/22 01:02:56 AM | NAME=cifar10-search +10/22 01:02:56 AM | NO_REPEAT=False +10/22 01:02:56 AM | PATH=searchs/cifar10-search +10/22 01:02:56 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 01:02:56 AM | PRETRAIN_DECAY=0 +10/22 01:02:56 AM | PRETRAIN_EPOCHS=0 +10/22 01:02:56 AM | PRINT_FREQ=10 +10/22 01:02:56 AM | RETRAIN_EPOCHS=1 +10/22 01:02:56 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 01:02:56 AM | RETRAIN_SETTING=0 +10/22 01:02:56 AM | RETRAIN_UPDATE_W=True +10/22 01:02:56 AM | SAME_STRUCTURE=True +10/22 01:02:56 AM | SAMPLE_RATIO=0.2 +10/22 01:02:56 AM | SEARCH_ITER=25 +10/22 01:02:56 AM | SEARCH_ITER_EPOCHS=1 +10/22 01:02:56 AM | SEED=0 +10/22 01:02:56 AM | SHORT_CONNECT=False +10/22 01:02:56 AM | SYNC_PARAM=True +10/22 01:02:56 AM | TEACHER2STUDENT=True +10/22 01:02:56 AM | TEST_DIR=/data/imagenet/val +10/22 01:02:56 AM | TRAIN_DIR=/data/imagenet/train +10/22 01:02:56 AM | TRAIN_MAIN_FIRST=False +10/22 01:02:56 AM | TRAIN_PORTION=0.5 +10/22 01:02:56 AM | UNROLLED=False +10/22 01:02:56 AM | USE_BETA=True +10/22 01:02:56 AM | VAL_DIR=/data/imagenet/train +10/22 01:02:56 AM | W_GRAD_CLIP=5.0 +10/22 01:02:56 AM | W_LR=0.05 +10/22 01:02:56 AM | W_LR_MIN=0.001 +10/22 01:02:56 AM | W_MOMENTUM=0.9 +10/22 01:02:56 AM | W_WEIGHT_DECAY=0.0003 +10/22 01:02:56 AM | WORKERS=1 +10/22 01:02:56 AM | WORLD_SIZE=1 +10/22 01:02:56 AM | +10/22 01:02:56 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%) +Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562% +10/22 01:03:57 AM | +10/22 01:03:57 AM | Parameters: +10/22 01:03:57 AM | ALPHA_LR=0.0003 +10/22 01:03:57 AM | ALPHA_WEIGHT_DECAY=0.001 +10/22 01:03:57 AM | AUX_WEIGHT=0.4 +10/22 01:03:57 AM | BATCH_SIZE=64 +10/22 01:03:57 AM | CELLS_NUM=3 +10/22 01:03:57 AM | CLEAN_ARCH=True +10/22 01:03:57 AM | CUTOUT_LENGTH=16 +10/22 01:03:57 AM | DATA_DIR=./cifar +10/22 01:03:57 AM | DATA_PATH=./data/ +10/22 01:03:57 AM | DATASET=cifar10 +10/22 01:03:57 AM | DIST_URL=tcp://127.0.0.1:23343 +10/22 01:03:57 AM | DISTRIBUTED=True +10/22 01:03:57 AM | DROP_PATH_PROB=0.2 +10/22 01:03:57 AM | ENSEMBLE=True +10/22 01:03:57 AM | GPUS=[0] +10/22 01:03:57 AM | INIT_CHANNELS=16 +10/22 01:03:57 AM | INPUT_CHANNELS=3 +10/22 01:03:57 AM | LAYER_NUM=3 +10/22 01:03:57 AM | LOCAL_RANK=0 +10/22 01:03:57 AM | LR_RATIO=0.5 +10/22 01:03:57 AM | MODEL_TYPE=cifar +10/22 01:03:57 AM | N_CLASSES=10 +10/22 01:03:57 AM | NAME=cifar10-search +10/22 01:03:57 AM | NO_REPEAT=False +10/22 01:03:57 AM | PATH=searchs/cifar10-search +10/22 01:03:57 AM | PLOT_PATH=searchs/cifar10-search/plots +10/22 01:03:57 AM | PRETRAIN_DECAY=0 +10/22 01:03:57 AM | PRETRAIN_EPOCHS=0 +10/22 01:03:57 AM | PRINT_FREQ=10 +10/22 01:03:57 AM | RETRAIN_EPOCHS=1 +10/22 01:03:57 AM | RETRAIN_PATH=searchs/cifar10-search/retrains +10/22 01:03:57 AM | RETRAIN_SETTING=0 +10/22 01:03:57 AM | RETRAIN_UPDATE_W=True +10/22 01:03:57 AM | SAME_STRUCTURE=True +10/22 01:03:57 AM | SAMPLE_RATIO=0.2 +10/22 01:03:57 AM | SEARCH_ITER=25 +10/22 01:03:57 AM | SEARCH_ITER_EPOCHS=1 +10/22 01:03:57 AM | SEED=0 +10/22 01:03:57 AM | SHORT_CONNECT=False +10/22 01:03:57 AM | SYNC_PARAM=True +10/22 01:03:57 AM | TEACHER2STUDENT=True +10/22 01:03:57 AM | TEST_DIR=/data/imagenet/val +10/22 01:03:57 AM | TRAIN_DIR=/data/imagenet/train +10/22 01:03:57 AM | TRAIN_MAIN_FIRST=False +10/22 01:03:57 AM | TRAIN_PORTION=0.5 +10/22 01:03:57 AM | UNROLLED=False +10/22 01:03:57 AM | USE_BETA=True +10/22 01:03:57 AM | VAL_DIR=/data/imagenet/train +10/22 01:03:57 AM | W_GRAD_CLIP=5.0 +10/22 01:03:57 AM | W_LR=0.05 +10/22 01:03:57 AM | W_LR_MIN=0.001 +10/22 01:03:57 AM | W_MOMENTUM=0.9 +10/22 01:03:57 AM | W_WEIGHT_DECAY=0.0003 +10/22 01:03:57 AM | WORKERS=1 +10/22 01:03:57 AM | WORLD_SIZE=1 +10/22 01:03:57 AM | +10/22 01:03:57 AM | Logger is set - training start +####### ALPHA ####### +# Alpha - normal +tensor([[0.1249, 0.1248, 0.1254, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1249, 0.1251, 0.1250, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1251, 0.1247, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252], + [0.1249, 0.1251, 0.1252, 0.1250, 0.1249, 0.1249, 0.1250, 0.1250], + [0.1248, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1251, 0.1251], + [0.1248, 0.1250, 0.1249, 0.1251, 0.1250, 0.1251, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1251, 0.1252, 0.1250, 0.1249, 0.1248, 0.1251], + [0.1252, 0.1250, 0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1249], + [0.1248, 0.1252, 0.1250, 0.1248, 0.1251, 0.1252, 0.1248, 0.1251], + [0.1249, 0.1249, 0.1252, 0.1250, 0.1250, 0.1249, 0.1250, 0.1249]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1249, 0.1252, 0.1251, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1248, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1250], + [0.1251, 0.1249, 0.1248, 0.1248, 0.1251, 0.1252, 0.1249, 0.1251], + [0.1251, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1250, 0.1252, 0.1250], + [0.1252, 0.1251, 0.1248, 0.1250, 0.1248, 0.1249, 0.1252, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1252, 0.1249, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251, 0.1249], + [0.1248, 0.1251, 0.1250, 0.1250, 0.1248, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1248, 0.1249, 0.1252, 0.1249, 0.1252], + [0.1250, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1252, 0.1248], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1248, 0.1250, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3330, 0.3332, 0.3338], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2503, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.1999, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3339, 0.3326, 0.3336], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2503, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2001, 0.2000, 0.1998, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250, 0.1248, 0.1248], + [0.1250, 0.1251, 0.1249, 0.1248, 0.1249, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1250, 0.1250, 0.1253, 0.1252, 0.1247, 0.1249, 0.1251], + [0.1250, 0.1250, 0.1249, 0.1251, 0.1252, 0.1249, 0.1249, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1249, 0.1252, 0.1249, 0.1251, 0.1249]], + device='cuda:0', grad_fn=) +tensor([[0.1249, 0.1250, 0.1249, 0.1250, 0.1250, 0.1249, 0.1251, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1251, 0.1248, 0.1249, 0.1250, 0.1250], + [0.1249, 0.1248, 0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1252], + [0.1250, 0.1251, 0.1250, 0.1248, 0.1251, 0.1250, 0.1249, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1253, 0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1251, 0.1250, 0.1250, 0.1247, 0.1250, 0.1250, 0.1249, 0.1252], + [0.1248, 0.1251, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1249, 0.1250, 0.1253, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1250, 0.1249, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1249, 0.1251, 0.1248, 0.1251, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1249, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1252, 0.1249, 0.1250, 0.1248, 0.1251, 0.1251, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1251, 0.1248, 0.1251, 0.1249], + [0.1249, 0.1250, 0.1249, 0.1250, 0.1251, 0.1250, 0.1251, 0.1252]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1251, 0.1250, 0.1248, 0.1250, 0.1251, 0.1249, 0.1250], + [0.1250, 0.1249, 0.1251, 0.1249, 0.1250, 0.1251, 0.1250, 0.1250], + [0.1249, 0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1252, 0.1251], + [0.1250, 0.1251, 0.1248, 0.1251, 0.1251, 0.1249, 0.1252, 0.1248]], + device='cuda:0', grad_fn=) +tensor([[0.1251, 0.1250, 0.1251, 0.1250, 0.1251, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250, 0.1251], + [0.1250, 0.1251, 0.1251, 0.1249, 0.1251, 0.1250, 0.1248, 0.1251], + [0.1248, 0.1250, 0.1250, 0.1250, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1248, 0.1252, 0.1251, 0.1248, 0.1249, 0.1252, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5007, 0.4993], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3337, 0.3329], device='cuda:0', grad_fn=) +tensor([0.2498, 0.2500, 0.2499, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2001, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4997, 0.5003], device='cuda:0', grad_fn=) +tensor([0.3331, 0.3335, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2499, 0.2498, 0.2503], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2001, 0.1999, 0.1998], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1251, 0.1250, 0.1252, 0.1250, 0.1248, 0.1249, 0.1250], + [0.1248, 0.1249, 0.1249, 0.1252, 0.1249, 0.1252, 0.1251, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1252, 0.1248, 0.1250, 0.1250], + [0.1252, 0.1250, 0.1248, 0.1252, 0.1249, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1251, 0.1249, 0.1251, 0.1251, 0.1249, 0.1248, 0.1251], + [0.1250, 0.1248, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249, 0.1251], + [0.1250, 0.1251, 0.1250, 0.1251, 0.1248, 0.1249, 0.1249, 0.1251], + [0.1248, 0.1250, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) +tensor([[0.1248, 0.1251, 0.1250, 0.1250, 0.1251, 0.1250, 0.1251, 0.1249], + [0.1251, 0.1248, 0.1252, 0.1250, 0.1250, 0.1251, 0.1250, 0.1249], + [0.1251, 0.1251, 0.1248, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1249, 0.1251, 0.1249, 0.1251, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1251, 0.1249, 0.1251, 0.1250, 0.1250, 0.1248, 0.1250, 0.1251]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1251, 0.1252, 0.1248, 0.1251, 0.1250, 0.1249, 0.1248], + [0.1248, 0.1250, 0.1250, 0.1251, 0.1251, 0.1250, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1250, 0.1252, 0.1251, 0.1251, 0.1249, 0.1249, 0.1249, 0.1249], + [0.1250, 0.1251, 0.1249, 0.1249, 0.1250, 0.1249, 0.1252, 0.1250], + [0.1251, 0.1251, 0.1250, 0.1252, 0.1249, 0.1249, 0.1252, 0.1246]], + device='cuda:0') +tensor([[0.1249, 0.1251, 0.1251, 0.1249, 0.1250, 0.1252, 0.1248, 0.1249], + [0.1250, 0.1249, 0.1252, 0.1250, 0.1250, 0.1248, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1251, 0.1252, 0.1249, 0.1248, 0.1248, 0.1252], + [0.1250, 0.1253, 0.1249, 0.1250, 0.1251, 0.1247, 0.1249, 0.1252]], + device='cuda:0') +tensor([[0.1252, 0.1251, 0.1247, 0.1250, 0.1250, 0.1249, 0.1249, 0.1252], + [0.1249, 0.1250, 0.1248, 0.1251, 0.1249, 0.1252, 0.1250, 0.1251], + [0.1250, 0.1252, 0.1250, 0.1250, 0.1251, 0.1249, 0.1249, 0.1250], + [0.1249, 0.1251, 0.1250, 0.1250, 0.1249, 0.1249, 0.1251, 0.1250], + [0.1250, 0.1250, 0.1249, 0.1249, 0.1249, 0.1253, 0.1250, 0.1251]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3335, 0.3330, 0.3335], device='cuda:0', grad_fn=) +tensor([0.2503, 0.2498, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.1998, 0.1999, 0.2000, 0.2002], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0') +tensor([0.3336, 0.3333, 0.3331], device='cuda:0') +tensor([0.2502, 0.2503, 0.2498, 0.2498], device='cuda:0') +tensor([0.2001, 0.1999, 0.2003, 0.1999, 0.1997], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 1/25 Step 000/002 Loss 2.295 Prec@(1,5) (9.4%, 48.4%) +Train: Layer 1/3 Epoch 1/25 Step 001/002 Loss 2.302 Prec@(1,5) (10.9%, 47.7%) +Train: Layer 1/3 Epoch 1/25 Final Prec@1 10.9375% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 0.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 0 LR 0.05 +Retrain: Layer 1/3 Epoch 1/25 Step 000/002 Loss 3.463 Loss_distill 1.154 Prec@(1,5) (9.4%, 48.4%) +Retrain: Layer 1/3 Epoch 1/25 Final Prec@1 10.1562% +Valid: Layer 1/3 Epoch 1/25 Step 000/157 Loss 2.302 Prec@(1,5) (6.2%, 51.6%) +Valid: Layer 1/3 Epoch 1/25 Step 010/157 Loss 2.303 Prec@(1,5) (10.4%, 49.1%) +Valid: Layer 1/3 Epoch 1/25 Step 020/157 Loss 2.303 Prec@(1,5) (9.7%, 47.9%) +Valid: Layer 1/3 Epoch 1/25 Step 030/157 Loss 2.303 Prec@(1,5) (9.6%, 48.9%) +Valid: Layer 1/3 Epoch 1/25 Step 040/157 Loss 2.303 Prec@(1,5) (9.5%, 49.8%) +Valid: Layer 1/3 Epoch 1/25 Step 050/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%) +Valid: Layer 1/3 Epoch 1/25 Step 060/157 Loss 2.303 Prec@(1,5) (9.8%, 49.4%) +Valid: Layer 1/3 Epoch 1/25 Step 070/157 Loss 2.303 Prec@(1,5) (9.7%, 49.6%) +Valid: Layer 1/3 Epoch 1/25 Step 080/157 Loss 2.303 Prec@(1,5) (9.8%, 49.3%) +Valid: Layer 1/3 Epoch 1/25 Step 090/157 Loss 2.303 Prec@(1,5) (9.9%, 49.3%) +Valid: Layer 1/3 Epoch 1/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.6%, 49.1%) +Valid: Layer 1/3 Epoch 1/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.8%, 49.6%) +Valid: Layer 1/3 Epoch 1/25 Step 120/157 Loss 2.303 Prec@(1,5) (9.8%, 49.8%) +Valid: Layer 1/3 Epoch 1/25 Step 130/157 Loss 2.303 Prec@(1,5) (9.8%, 50.0%) +Valid: Layer 1/3 Epoch 1/25 Step 140/157 Loss 2.303 Prec@(1,5) (9.8%, 50.0%) +Valid: Layer 1/3 Epoch 1/25 Step 150/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%) +Valid: Layer 1/3 Epoch 1/25 Step 156/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%) +Valid: Layer 1/3 Epoch 1/25 Final Prec@1 10.0000% +Final best Prec@1 = 10.0000% +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3333, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3332, 0.3334, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.4999, 0.5001], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3333, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2499, 0.2501, 0.2500, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2001, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0') +tensor([0.3333, 0.3333, 0.3333], device='cuda:0') +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0') +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 2/25 Step 000/002 Loss 2.273 Prec@(1,5) (15.6%, 62.5%) +Train: Layer 1/3 Epoch 2/25 Step 001/002 Loss 2.264 Prec@(1,5) (18.0%, 57.8%) +Train: Layer 1/3 Epoch 2/25 Final Prec@1 17.9688% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('sep_conv_3x3', 1), ('sep_conv_5x5', 0)], [('sep_conv_5x5', 2), ('skip_connect', 1)], [('dil_conv_5x5', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 4), ('dil_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('sep_conv_3x3', 1), ('max_pool_3x3', 2)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 1), ('sep_conv_5x5', 0)], [('sep_conv_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_3x3', 3), ('avg_pool_3x3', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('max_pool_3x3', 0)], [('avg_pool_3x3', 2), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 4), ('max_pool_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 0)], [('dil_conv_3x3', 3), ('sep_conv_5x5', 1)], [('dil_conv_5x5', 4), ('sep_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 10.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 1 LR 0.05 +Retrain: Layer 1/3 Epoch 2/25 Step 000/002 Loss 3.433 Loss_distill 1.145 Prec@(1,5) (17.2%, 53.1%) +Retrain: Layer 1/3 Epoch 2/25 Final Prec@1 10.9375% +Valid: Layer 1/3 Epoch 2/25 Step 000/157 Loss 2.301 Prec@(1,5) (15.6%, 59.4%) +Valid: Layer 1/3 Epoch 2/25 Step 010/157 Loss 2.303 Prec@(1,5) (14.9%, 49.7%) +Valid: Layer 1/3 Epoch 2/25 Step 020/157 Loss 2.304 Prec@(1,5) (12.1%, 49.9%) +Valid: Layer 1/3 Epoch 2/25 Step 030/157 Loss 2.304 Prec@(1,5) (10.8%, 49.7%) +Valid: Layer 1/3 Epoch 2/25 Step 040/157 Loss 2.303 Prec@(1,5) (11.1%, 50.0%) +Valid: Layer 1/3 Epoch 2/25 Step 050/157 Loss 2.303 Prec@(1,5) (10.4%, 49.9%) +Valid: Layer 1/3 Epoch 2/25 Step 060/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%) +Valid: Layer 1/3 Epoch 2/25 Step 070/157 Loss 2.303 Prec@(1,5) (10.0%, 49.8%) +Valid: Layer 1/3 Epoch 2/25 Step 080/157 Loss 2.303 Prec@(1,5) (10.2%, 50.0%) +Valid: Layer 1/3 Epoch 2/25 Step 090/157 Loss 2.303 Prec@(1,5) (10.0%, 49.6%) +Valid: Layer 1/3 Epoch 2/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%) +Valid: Layer 1/3 Epoch 2/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.9%, 49.8%) +Valid: Layer 1/3 Epoch 2/25 Step 120/157 Loss 2.303 Prec@(1,5) (9.8%, 49.9%) +Valid: Layer 1/3 Epoch 2/25 Step 130/157 Loss 2.303 Prec@(1,5) (10.0%, 50.2%) +Valid: Layer 1/3 Epoch 2/25 Step 140/157 Loss 2.303 Prec@(1,5) (10.0%, 50.1%) +Valid: Layer 1/3 Epoch 2/25 Step 150/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%) +Valid: Layer 1/3 Epoch 2/25 Step 156/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%) +Valid: Layer 1/3 Epoch 2/25 Final Prec@1 10.0000% +Final best Prec@1 = 10.0000% +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.4999, 0.5001], device='cuda:0', grad_fn=) +tensor([0.3332, 0.3334, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.1999, 0.2000, 0.2001], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4999, 0.5001], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2501, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2500, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3333, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2500, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.1999, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3335, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.1999, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0') +tensor([0.3333, 0.3333, 0.3333], device='cuda:0') +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0') +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 3/25 Step 000/002 Loss 2.251 Prec@(1,5) (18.8%, 68.8%) +Train: Layer 1/3 Epoch 3/25 Step 001/002 Loss 2.230 Prec@(1,5) (25.8%, 72.7%) +Train: Layer 1/3 Epoch 3/25 Final Prec@1 25.7812% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('skip_connect', 0), ('sep_conv_3x3', 1)], [('sep_conv_5x5', 2), ('avg_pool_3x3', 0)], [('dil_conv_5x5', 0), ('sep_conv_3x3', 2)], [('avg_pool_3x3', 0), ('dil_conv_3x3', 4)]], normal_concat=range(2, 6), reduce=[[('sep_conv_5x5', 1), ('max_pool_3x3', 0)], [('sep_conv_3x3', 1), ('avg_pool_3x3', 2)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 3)], [('dil_conv_3x3', 4), ('avg_pool_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 0), ('sep_conv_3x3', 2)], [('sep_conv_3x3', 1), ('dil_conv_3x3', 2)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_5x5', 1)], [('dil_conv_3x3', 0), ('dil_conv_5x5', 1)], [('avg_pool_3x3', 3), ('dil_conv_5x5', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('avg_pool_3x3', 0), ('avg_pool_3x3', 1)], [('sep_conv_5x5', 1), ('dil_conv_5x5', 0)], [('sep_conv_5x5', 3), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 4), ('sep_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 10.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 2 LR 0.05 +Retrain: Layer 1/3 Epoch 3/25 Step 000/002 Loss 3.435 Loss_distill 1.145 Prec@(1,5) (10.9%, 67.2%) +Retrain: Layer 1/3 Epoch 3/25 Final Prec@1 7.8125% +Valid: Layer 1/3 Epoch 3/25 Step 000/157 Loss 2.303 Prec@(1,5) (10.9%, 45.3%) +Valid: Layer 1/3 Epoch 3/25 Step 010/157 Loss 2.301 Prec@(1,5) (10.4%, 52.4%) +Valid: Layer 1/3 Epoch 3/25 Step 020/157 Loss 2.303 Prec@(1,5) (9.2%, 51.9%) +Valid: Layer 1/3 Epoch 3/25 Step 030/157 Loss 2.303 Prec@(1,5) (9.3%, 51.0%) +Valid: Layer 1/3 Epoch 3/25 Step 040/157 Loss 2.304 Prec@(1,5) (9.1%, 50.3%) +Valid: Layer 1/3 Epoch 3/25 Step 050/157 Loss 2.303 Prec@(1,5) (9.1%, 50.3%) +Valid: Layer 1/3 Epoch 3/25 Step 060/157 Loss 2.304 Prec@(1,5) (9.5%, 50.3%) +Valid: Layer 1/3 Epoch 3/25 Step 070/157 Loss 2.303 Prec@(1,5) (9.7%, 50.3%) +Valid: Layer 1/3 Epoch 3/25 Step 080/157 Loss 2.303 Prec@(1,5) (9.7%, 50.3%) +Valid: Layer 1/3 Epoch 3/25 Step 090/157 Loss 2.303 Prec@(1,5) (9.4%, 50.1%) +Valid: Layer 1/3 Epoch 3/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.6%, 50.4%) +Valid: Layer 1/3 Epoch 3/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.9%, 50.6%) +Valid: Layer 1/3 Epoch 3/25 Step 120/157 Loss 2.303 Prec@(1,5) (10.0%, 50.4%) +Valid: Layer 1/3 Epoch 3/25 Step 130/157 Loss 2.303 Prec@(1,5) (10.1%, 50.2%) +Valid: Layer 1/3 Epoch 3/25 Step 140/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%) +Valid: Layer 1/3 Epoch 3/25 Step 150/157 Loss 2.303 Prec@(1,5) (10.0%, 50.1%) +Valid: Layer 1/3 Epoch 3/25 Step 156/157 Loss 2.303 Prec@(1,5) (10.0%, 50.0%) +Valid: Layer 1/3 Epoch 3/25 Final Prec@1 10.0000% +Final best Prec@1 = 10.0000% +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1251, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3332, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.4999, 0.5001], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.4999, 0.5001], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2499, 0.2500, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2001, 0.2000, 0.2000, 0.1999], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2000, 0.1999, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2500, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.2000, 0.1999, 0.2001], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0') +tensor([0.3333, 0.3333, 0.3333], device='cuda:0') +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0') +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0') +##################### +Train: Layer 1/3 Epoch 4/25 Step 000/002 Loss 2.214 Prec@(1,5) (26.6%, 68.8%) +Train: Layer 1/3 Epoch 4/25 Step 001/002 Loss 2.188 Prec@(1,5) (29.7%, 75.8%) +Train: Layer 1/3 Epoch 4/25 Final Prec@1 29.6875% +Stage: 0 Layer: 1 genotype = Genotype(normal=[[('sep_conv_3x3', 1), ('skip_connect', 0)], [('sep_conv_5x5', 2), ('sep_conv_5x5', 1)], [('sep_conv_3x3', 2), ('skip_connect', 1)], [('dil_conv_5x5', 2), ('sep_conv_5x5', 0)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 1), ('max_pool_3x3', 0)], [('avg_pool_3x3', 1), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 1), ('sep_conv_5x5', 2)], [('avg_pool_3x3', 1), ('sep_conv_5x5', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('avg_pool_3x3', 1)], [('avg_pool_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_3x3', 0), ('sep_conv_3x3', 3)], [('avg_pool_3x3', 1), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('avg_pool_3x3', 0), ('dil_conv_3x3', 1)], [('sep_conv_3x3', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('dil_conv_3x3', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 genotype = Genotype(normal=[[('avg_pool_3x3', 0), ('avg_pool_3x3', 1)], [('avg_pool_3x3', 1), ('avg_pool_3x3', 2)], [('sep_conv_5x5', 3), ('avg_pool_3x3', 0)], [('avg_pool_3x3', 4), ('max_pool_3x3', 0)]], normal_concat=range(2, 6), reduce=[[('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)], [('max_pool_3x3', 1), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Final best Prec@1 = 10.0000% +Stage: 0 Layer: 1 Best Genotype = Genotype(normal=[[('skip_connect', 0), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 2), ('max_pool_3x3', 0)], [('skip_connect', 1), ('skip_connect', 0)], [('avg_pool_3x3', 3), ('skip_connect', 4)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 0), ('dil_conv_3x3', 1)], [('dil_conv_3x3', 0), ('dil_conv_3x3', 2)], [('dil_conv_5x5', 0), ('sep_conv_5x5', 2)], [('dil_conv_5x5', 4), ('dil_conv_3x3', 2)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 2 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('dil_conv_5x5', 1)], [('sep_conv_5x5', 1), ('sep_conv_3x3', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 1)], [('sep_conv_3x3', 3), ('sep_conv_5x5', 1)]], normal_concat=range(2, 6), reduce=[[('dil_conv_3x3', 1), ('sep_conv_3x3', 0)], [('sep_conv_5x5', 1), ('sep_conv_5x5', 0)], [('avg_pool_3x3', 3), ('avg_pool_3x3', 0)], [('sep_conv_5x5', 2), ('dil_conv_5x5', 4)]], reduce_concat=range(2, 6)) +Stage: 0 Layer: 3 Best Genotype = Genotype(normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], [('sep_conv_3x3', 2), ('dil_conv_3x3', 0)], [('sep_conv_5x5', 0), ('dil_conv_3x3', 3)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 4)]], normal_concat=range(2, 6), reduce=[[('skip_connect', 0), ('sep_conv_5x5', 1)], [('avg_pool_3x3', 0), ('dil_conv_5x5', 1)], [('skip_connect', 1), ('dil_conv_3x3', 0)], [('avg_pool_3x3', 2), ('max_pool_3x3', 0)]], reduce_concat=range(2, 6)) +Retrain Epoch 3 LR 0.05 +Retrain: Layer 1/3 Epoch 4/25 Step 000/002 Loss 3.459 Loss_distill 1.153 Prec@(1,5) (9.4%, 48.4%) +Retrain: Layer 1/3 Epoch 4/25 Final Prec@1 10.9375% +Valid: Layer 1/3 Epoch 4/25 Step 000/157 Loss 2.298 Prec@(1,5) (10.9%, 54.7%) +Valid: Layer 1/3 Epoch 4/25 Step 010/157 Loss 2.304 Prec@(1,5) (9.8%, 50.7%) +Valid: Layer 1/3 Epoch 4/25 Step 020/157 Loss 2.303 Prec@(1,5) (10.3%, 50.7%) +Valid: Layer 1/3 Epoch 4/25 Step 030/157 Loss 2.303 Prec@(1,5) (10.0%, 49.8%) +Valid: Layer 1/3 Epoch 4/25 Step 040/157 Loss 2.304 Prec@(1,5) (9.9%, 49.5%) +Valid: Layer 1/3 Epoch 4/25 Step 050/157 Loss 2.304 Prec@(1,5) (9.7%, 49.8%) +Valid: Layer 1/3 Epoch 4/25 Step 060/157 Loss 2.304 Prec@(1,5) (10.2%, 50.1%) +Valid: Layer 1/3 Epoch 4/25 Step 070/157 Loss 2.304 Prec@(1,5) (10.2%, 50.3%) +Valid: Layer 1/3 Epoch 4/25 Step 080/157 Loss 2.304 Prec@(1,5) (9.9%, 50.3%) +Valid: Layer 1/3 Epoch 4/25 Step 090/157 Loss 2.303 Prec@(1,5) (9.8%, 50.7%) +Valid: Layer 1/3 Epoch 4/25 Step 100/157 Loss 2.303 Prec@(1,5) (9.9%, 50.4%) +Valid: Layer 1/3 Epoch 4/25 Step 110/157 Loss 2.303 Prec@(1,5) (9.9%, 50.3%) +Valid: Layer 1/3 Epoch 4/25 Step 120/157 Loss 2.303 Prec@(1,5) (10.0%, 50.3%) +Valid: Layer 1/3 Epoch 4/25 Step 130/157 Loss 2.303 Prec@(1,5) (10.0%, 50.2%) +Valid: Layer 1/3 Epoch 4/25 Step 140/157 Loss 2.303 Prec@(1,5) (10.0%, 50.3%) +Valid: Layer 1/3 Epoch 4/25 Step 150/157 Loss 2.304 Prec@(1,5) (10.0%, 50.1%) +Valid: Layer 1/3 Epoch 4/25 Step 156/157 Loss 2.304 Prec@(1,5) (10.0%, 50.0%) +Valid: Layer 1/3 Epoch 4/25 Final Prec@1 10.0000% +Final best Prec@1 = 10.0000% +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1249, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3332, 0.3333, 0.3334], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2501, 0.2500, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3334, 0.3332], device='cuda:0', grad_fn=) +tensor([0.2499, 0.2501, 0.2500, 0.2499], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1251, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +##################### +####### BETA ####### +# Beta - normal +tensor([0.5000, 0.5000], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2501, 0.2500, 0.2500, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.1999, 0.2000, 0.2000, 0.2000, 0.2001], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5002, 0.4998], device='cuda:0', grad_fn=) +tensor([0.3334, 0.3334, 0.3332], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2501, 0.2499, 0.2500], device='cuda:0', + grad_fn=) +tensor([0.2000, 0.2000, 0.2000, 0.1999, 0.2000], device='cuda:0', + grad_fn=) +##################### +####### ALPHA ####### +# Alpha - normal +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1249], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0', grad_fn=) + +# Alpha - reduce +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +tensor([[0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250], + [0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250, 0.1250]], + device='cuda:0') +##################### +####### BETA ####### +# Beta - normal +tensor([0.5001, 0.4999], device='cuda:0', grad_fn=) +tensor([0.3333, 0.3334, 0.3333], device='cuda:0', grad_fn=) +tensor([0.2500, 0.2500, 0.2499, 0.2501], device='cuda:0', + grad_fn=) +tensor([0.2001, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0', + grad_fn=) + +# Beta - reduce +tensor([0.5000, 0.5000], device='cuda:0') +tensor([0.3333, 0.3333, 0.3333], device='cuda:0') +tensor([0.2500, 0.2500, 0.2500, 0.2500], device='cuda:0') +tensor([0.2000, 0.2000, 0.2000, 0.2000, 0.2000], device='cuda:0') +##################### diff --git a/benchmark201/search/cifar10-search/tb/readme.md b/benchmark201/search/cifar10-search/tb/readme.md new file mode 100644 index 0000000..e69de29 diff --git a/benchmark201/search/imagenet-search/imagenet-search.log b/benchmark201/search/imagenet-search/imagenet-search.log new file mode 100644 index 0000000..e69de29 diff --git a/benchmark201/search/imagenet-search/tb/readme.md b/benchmark201/search/imagenet-search/tb/readme.md new file mode 100644 index 0000000..e69de29 diff --git a/benchmark201/utils/genotypes.py b/benchmark201/utils/genotypes.py new file mode 100644 index 0000000..987687b --- /dev/null +++ b/benchmark201/utils/genotypes.py @@ -0,0 +1,352 @@ +""" Genotypes + - Genotype: normal/reduce gene + normal/reduce cell output connection (concat) + - gene: discrete ops information (w/o output connection) + - dag: real ops (can be mixed or discrete, but Genotype has only discrete information itself) +""" +from collections import namedtuple +import torch +import torch.nn as nn +import torch.nn.functional as F +from copy import deepcopy +from models import ops + +Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') + +def to_dag(C_in, gene, reduction, bn_affine=True): + """ generate discrete ops from gene """ + dag = nn.ModuleList() + for edges in gene: + row = nn.ModuleList() + for op_name, s_idx in edges: + # reduction cell & from input nodes => stride = 2 + stride = 2 if reduction and s_idx < 2 else 1 + op = ops.OPS[op_name](C_in, stride, bn_affine) + if not isinstance(op, ops.Identity): # Identity does not use drop path + op = nn.Sequential( + op, + ops.DropPath_() + ) + op.s_idx = s_idx + row.append(op) + dag.append(row) + + return dag + + +def from_str(s): + """ generate genotype from string + e.g. "Genotype( + normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], + [('sep_conv_3x3', 1), ('dil_conv_3x3', 2)], + [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)], + [('sep_conv_3x3', 1), ('dil_conv_3x3', 4)]], + normal_concat=range(2, 6), + reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], + [('max_pool_3x3', 0), ('skip_connect', 2)], + [('max_pool_3x3', 0), ('skip_connect', 2)], + [('max_pool_3x3', 0), ('skip_connect', 2)]], + reduce_concat=range(2, 6))" + """ + + genotype = eval(s) + + return genotype + + +def parse(alpha, beta, k): + """ + parse continuous alpha to discrete gene. + alpha is ParameterList: + ParameterList [ + Parameter(n_edges1, n_ops), + Parameter(n_edges2, n_ops), + ... + ] + + beta is ParameterList: + ParameterList [ + Parameter(n_edges1), + Parameter(n_edges2), + ... + ] + + gene is list: + [ + [('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)], + [('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)], + ... + ] + each node has two edges (k=2) in CNN. + """ + + gene = [] + assert PRIMITIVES[-1] == 'none' # assume last PRIMITIVE is 'none' + + # 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge + # 2) Choose top-k edges per node by edge score (top-1 weight in edge) + # output the connect idx[(node_idx, connect_idx, op_idx).... () ()] + connect_idx = [] + for edges, w in zip(alpha, beta): + # edges: Tensor(n_edges, n_ops) + edge_max, primitive_indices = torch.topk((w.view(-1, 1) * edges)[:, :-1], 1) # ignore 'none' + topk_edge_values, topk_edge_indices = torch.topk(edge_max.view(-1), k) + node_gene = [] + node_idx = [] + for edge_idx in topk_edge_indices: + prim_idx = primitive_indices[edge_idx] + prim = PRIMITIVES[prim_idx] + node_gene.append((prim, edge_idx.item())) + node_idx.append((edge_idx.item(), prim_idx.item())) + + gene.append(node_gene) + connect_idx.append(node_idx) + + return gene, connect_idx + +def parse_gumbel(alpha, beta, k): + """ + parse continuous alpha to discrete gene. + alpha is ParameterList: + ParameterList [ + Parameter(n_edges1, n_ops), + Parameter(n_edges2, n_ops), + ... + ] + + beta is ParameterList: + ParameterList [ + Parameter(n_edges1), + Parameter(n_edges2), + ... + ] + + gene is list: + [ + [('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)], + [('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)], + ... + ] + each node has two edges (k=2) in CNN. + """ + + gene = [] + assert PRIMITIVES[-1] == 'none' # assume last PRIMITIVE is 'none' + + # 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge + # 2) Choose top-k edges per node by edge score (top-1 weight in edge) + # output the connect idx[(node_idx, connect_idx, op_idx).... () ()] + connect_idx = [] + for edges, w in zip(alpha, beta): + # edges: Tensor(n_edges, n_ops) + discrete_a = F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True) + for i in range(k-1): + discrete_a = discrete_a + F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True) + discrete_a = discrete_a.reshape(-1, len(PRIMITIVES)-1) + reserved_edge = (discrete_a>0).nonzero() + + node_gene = [] + node_idx = [] + for i in range(reserved_edge.shape[0]): + edge_idx = reserved_edge[i][0].item() + prim_idx = reserved_edge[i][1].item() + prim = PRIMITIVES[prim_idx] + node_gene.append((prim, edge_idx)) + node_idx.append((edge_idx, prim_idx)) + + gene.append(node_gene) + connect_idx.append(node_idx) + + return gene, connect_idx + + + +def get_combination(space, num): + combs = [] + for i in range(num): + if i == 0: + for func in space: + combs.append( [(func, i)] ) + else: + new_combs = [] + for string in combs: + for func in space: + xstring = string + [(func, i)] + new_combs.append( xstring ) + combs = new_combs + return combs + + +class Structure: + + def __init__(self, genotype): + assert isinstance(genotype, list) or isinstance(genotype, tuple), 'invalid class of genotype : {:}'.format(type(genotype)) + self.node_num = len(genotype) + 1 + self.nodes = [] + self.node_N = [] + for idx, node_info in enumerate(genotype): + assert isinstance(node_info, list) or isinstance(node_info, tuple), 'invalid class of node_info : {:}'.format(type(node_info)) + assert len(node_info) >= 1, 'invalid length : {:}'.format(len(node_info)) + for node_in in node_info: + assert isinstance(node_in, list) or isinstance(node_in, tuple), 'invalid class of in-node : {:}'.format(type(node_in)) + assert len(node_in) == 2 and node_in[1] <= idx, 'invalid in-node : {:}'.format(node_in) + self.node_N.append( len(node_info) ) + self.nodes.append( tuple(deepcopy(node_info)) ) + + def tolist(self, remove_str): + # convert this class to the list, if remove_str is 'none', then remove the 'none' operation. + # note that we re-order the input node in this function + # return the-genotype-list and success [if unsuccess, it is not a connectivity] + genotypes = [] + for node_info in self.nodes: + node_info = list( node_info ) + node_info = sorted(node_info, key=lambda x: (x[1], x[0])) + node_info = tuple(filter(lambda x: x[0] != remove_str, node_info)) + if len(node_info) == 0: return None, False + genotypes.append( node_info ) + return genotypes, True + + def node(self, index): + assert index > 0 and index <= len(self), 'invalid index={:} < {:}'.format(index, len(self)) + return self.nodes[index] + + def tostr(self): + strings = [] + for node_info in self.nodes: + string = '|'.join([x[0]+'~{:}'.format(x[1]) for x in node_info]) + string = '|{:}|'.format(string) + strings.append( string ) + return '+'.join(strings) + + def check_valid(self): + nodes = {0: True} + for i, node_info in enumerate(self.nodes): + sums = [] + for op, xin in node_info: + if op == 'none' or nodes[xin] is False: x = False + else: x = True + sums.append( x ) + nodes[i+1] = sum(sums) > 0 + return nodes[len(self.nodes)] + + def to_unique_str(self, consider_zero=False): + # this is used to identify the isomorphic cell, which rerquires the prior knowledge of operation + # two operations are special, i.e., none and skip_connect + nodes = {0: '0'} + for i_node, node_info in enumerate(self.nodes): + cur_node = [] + for op, xin in node_info: + if consider_zero is None: + x = '('+nodes[xin]+')' + '@{:}'.format(op) + elif consider_zero: + if op == 'none' or nodes[xin] == '#': x = '#' # zero + elif op == 'skip_connect': x = nodes[xin] + else: x = '('+nodes[xin]+')' + '@{:}'.format(op) + else: + if op == 'skip_connect': x = nodes[xin] + else: x = '('+nodes[xin]+')' + '@{:}'.format(op) + cur_node.append(x) + nodes[i_node+1] = '+'.join( sorted(cur_node) ) + return nodes[ len(self.nodes) ] + + def check_valid_op(self, op_names): + for node_info in self.nodes: + for inode_edge in node_info: + #assert inode_edge[0] in op_names, 'invalid op-name : {:}'.format(inode_edge[0]) + if inode_edge[0] not in op_names: return False + return True + + def __repr__(self): + return ('{name}({node_num} nodes with {node_info})'.format(name=self.__class__.__name__, node_info=self.tostr(), **self.__dict__)) + + def __len__(self): + return len(self.nodes) + 1 + + def __getitem__(self, index): + return self.nodes[index] + + @staticmethod + def str2structure(xstr): + assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr)) + nodestrs = xstr.split('+') + genotypes = [] + for i, node_str in enumerate(nodestrs): + inputs = list(filter(lambda x: x != '', node_str.split('|'))) + for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput) + inputs = ( xi.split('~') for xi in inputs ) + input_infos = tuple( (op, int(IDX)) for (op, IDX) in inputs) + genotypes.append( input_infos ) + return Structure( genotypes ) + + @staticmethod + def str2fullstructure(xstr, default_name='none'): + assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr)) + nodestrs = xstr.split('+') + genotypes = [] + for i, node_str in enumerate(nodestrs): + inputs = list(filter(lambda x: x != '', node_str.split('|'))) + for xinput in inputs: assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput) + inputs = ( xi.split('~') for xi in inputs ) + input_infos = list( (op, int(IDX)) for (op, IDX) in inputs) + all_in_nodes= list(x[1] for x in input_infos) + for j in range(i): + if j not in all_in_nodes: input_infos.append((default_name, j)) + node_info = sorted(input_infos, key=lambda x: (x[1], x[0])) + genotypes.append( tuple(node_info) ) + return Structure( genotypes ) + + @staticmethod + def gen_all(search_space, num, return_ori): + assert isinstance(search_space, list) or isinstance(search_space, tuple), 'invalid class of search-space : {:}'.format(type(search_space)) + assert num >= 2, 'There should be at least two nodes in a neural cell instead of {:}'.format(num) + all_archs = get_combination(search_space, 1) + for i, arch in enumerate(all_archs): + all_archs[i] = [ tuple(arch) ] + + for inode in range(2, num): + cur_nodes = get_combination(search_space, inode) + new_all_archs = [] + for previous_arch in all_archs: + for cur_node in cur_nodes: + new_all_archs.append( previous_arch + [tuple(cur_node)] ) + all_archs = new_all_archs + if return_ori: + return all_archs + else: + return [Structure(x) for x in all_archs] + + +ResNet_CODE = Structure( + [(('nor_conv_3x3', 0), ), # node-1 + (('nor_conv_3x3', 1), ), # node-2 + (('skip_connect', 0), ('skip_connect', 2))] # node-3 + ) + +AllConv3x3_CODE = Structure( + [(('nor_conv_3x3', 0), ), # node-1 + (('nor_conv_3x3', 0), ('nor_conv_3x3', 1)), # node-2 + (('nor_conv_3x3', 0), ('nor_conv_3x3', 1), ('nor_conv_3x3', 2))] # node-3 + ) + +AllFull_CODE = Structure( + [(('skip_connect', 0), ('nor_conv_1x1', 0), ('nor_conv_3x3', 0), ('avg_pool_3x3', 0)), # node-1 + (('skip_connect', 0), ('nor_conv_1x1', 0), ('nor_conv_3x3', 0), ('avg_pool_3x3', 0), ('skip_connect', 1), ('nor_conv_1x1', 1), ('nor_conv_3x3', 1), ('avg_pool_3x3', 1)), # node-2 + (('skip_connect', 0), ('nor_conv_1x1', 0), ('nor_conv_3x3', 0), ('avg_pool_3x3', 0), ('skip_connect', 1), ('nor_conv_1x1', 1), ('nor_conv_3x3', 1), ('avg_pool_3x3', 1), ('skip_connect', 2), ('nor_conv_1x1', 2), ('nor_conv_3x3', 2), ('avg_pool_3x3', 2))] # node-3 + ) + +AllConv1x1_CODE = Structure( + [(('nor_conv_1x1', 0), ), # node-1 + (('nor_conv_1x1', 0), ('nor_conv_1x1', 1)), # node-2 + (('nor_conv_1x1', 0), ('nor_conv_1x1', 1), ('nor_conv_1x1', 2))] # node-3 + ) + +AllIdentity_CODE = Structure( + [(('skip_connect', 0), ), # node-1 + (('skip_connect', 0), ('skip_connect', 1)), # node-2 + (('skip_connect', 0), ('skip_connect', 1), ('skip_connect', 2))] # node-3 + ) + +architectures = {'resnet' : ResNet_CODE, + 'all_c3x3': AllConv3x3_CODE, + 'all_c1x1': AllConv1x1_CODE, + 'all_idnt': AllIdentity_CODE, + 'all_full': AllFull_CODE} diff --git a/benchmark201/utils/get_info.py b/benchmark201/utils/get_info.py new file mode 100644 index 0000000..7750a15 --- /dev/null +++ b/benchmark201/utils/get_info.py @@ -0,0 +1,41 @@ +import os +import argparse + +parser = argparse.ArgumentParser(description='supernet training') +parser.add_argument('path', type=str, default='train', + help='mode') + +args = parser.parse_args() + +def main(): + file_path = args.path + info = {} + cnt = 0 + dataset_idx = 0 + dataset = ['cifar10-valid', 'cifar10', 'cifar100', 'ImageNet16-120'] + acc = [['train', 'valid'], ['train', 'test'], ['train', 'valid', 'test'], ['train', 'valid', 'test']] + with open(file_path, 'r') as f: + for line in f: + line = line.split(' ') + if 'datasets' in line: + cnt = cnt + 1 + info[cnt] = {} + dataset_idx = 0 + if line[0] in dataset: + top1 = [] + info[cnt][line[0]] = {} + for item in line: + if '%' in item: + item = item.split("%")[0] + top1.append(float(item)) + if len(top1) > 0: + for value, name in zip(top1, acc[dataset_idx]): + info[cnt][line[0]][name] = value + + dataset_idx = dataset_idx + 1 + + for key in info.keys(): + print(key, info[key]) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/benchmark201/utils/utils.py b/benchmark201/utils/utils.py new file mode 100644 index 0000000..4c223fc --- /dev/null +++ b/benchmark201/utils/utils.py @@ -0,0 +1,134 @@ +""" Utilities """ +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +import logging +import shutil +import torch +import torch.distributed as dist +import numpy as np + +def distill(result): + result = result.split('\n') + cifar10 = result[5].replace(' ', '').split(':') + cifar100 = result[7].replace(' ', '').split(':') + imagenet16 = result[9].replace(' ', '').split(':') + + cifar10_train = float(cifar10[1].strip(',test')[-7:-2].strip('=')) + cifar10_test = float(cifar10[2][-7:-2].strip('=')) + cifar100_train = float(cifar100[1].strip(',valid')[-7:-2].strip('=')) + cifar100_valid = float(cifar100[2].strip(',test')[-7:-2].strip('=')) + cifar100_test = float(cifar100[3][-7:-2].strip('=')) + imagenet16_train = float(imagenet16[1].strip(',valid')[-7:-2].strip('=')) + imagenet16_valid = float(imagenet16[2].strip(',test')[-7:-2].strip('=')) + imagenet16_test = float(imagenet16[3][-7:-2].strip('=')) + + return cifar10_train, cifar10_test, cifar100_train, cifar100_valid, \ + cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test + +class AverageMeter(): + """ Computes and stores the average and current value """ + def __init__(self): + self.reset() + + def reset(self): + """ Reset all statistics """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ Update statistics """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + +def get_logger(file_path): + """ Make python logger """ + logger = logging.getLogger('cdarts') + log_format = '%(asctime)s | %(message)s' + formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') + file_handler = logging.FileHandler(file_path) + file_handler.setFormatter(formatter) + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(formatter) + + logger.addHandler(file_handler) + logger.addHandler(stream_handler) + logger.setLevel(logging.INFO) + + return logger + + +def param_size(model): + """ Compute parameter size in MB """ + n_params = sum( + np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head')) + return n_params / 1e6 + +def print_speed(i, i_time, n, logger): + """print_speed(index, index_time, total_iteration)""" + average_time = i_time + remaining_time = (n - i) * average_time + remaining_day = math.floor(remaining_time / 86400) + remaining_hour = math.floor(remaining_time / 3600 - remaining_day * 24) + remaining_min = math.floor(remaining_time / 60 - remaining_day * 1440 - remaining_hour * 60) + logger.info('Progress: %d / %d [%d%%], Speed: %.3f s/iter, ETA %d:%02d:%02d (D:H:M)\n' % (i, n, i/n*100, average_time, remaining_day, remaining_hour, remaining_min)) + logger.info('\nPROGRESS: {:.2f}%\n'.format(100 * i / n)) + + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(1.0 / batch_size)) + + return res + +def save_checkpoint(state, ckpt_dir, is_best=False): + filename = os.path.join(ckpt_dir, 'checkpoint.pth.tar') + torch.save(state, filename) + if is_best: + best_filename = os.path.join(ckpt_dir, 'best.pth.tar') + torch.save(state, best_filename) + # shutil.copyfile(filename, best_filename) + +def reduce_tensor(tensor, world_size): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= world_size + return rt + +def drop_path_(x, drop_prob, training): + if training and drop_prob > 0.: + keep_prob = 1. - drop_prob + # per data point mask; assuming x in cuda. + mask = torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) + x.div_(keep_prob).mul_(mask) + + return x + +def adjust_lr(optimizer, epoch, config): + # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large + if config.epochs - epoch > 5: + lr = config.lr * (config.epochs - 5 - epoch) / (config.epochs - 5) + else: + lr = config.lr * (config.epochs - epoch) / ((config.epochs - 5) * 5) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + return lr diff --git a/benchmark201/utils/visualize.py b/benchmark201/utils/visualize.py new file mode 100644 index 0000000..aaa67d6 --- /dev/null +++ b/benchmark201/utils/visualize.py @@ -0,0 +1,74 @@ +""" Network architecture visualizer using graphviz """ +import sys +from graphviz import Digraph +import utils.genotypes as gt + + +def plot(genotype, file_path, caption=None): + """ make DAG plot and save to file_path as .png """ + edge_attr = { + 'fontsize': '20', + 'fontname': 'times' + } + node_attr = { + 'style': 'filled', + 'shape': 'rect', + 'align': 'center', + 'fontsize': '20', + 'height': '0.5', + 'width': '0.5', + 'penwidth': '2', + 'fontname': 'times' + } + g = Digraph( + format='png', + edge_attr=edge_attr, + node_attr=node_attr, + engine='dot') + g.body.extend(['rankdir=LR']) + + # input nodes + g.node("c_{k-2}", fillcolor='darkseagreen2') + g.node("c_{k-1}", fillcolor='darkseagreen2') + + # intermediate nodes + n_nodes = len(genotype) + for i in range(n_nodes): + g.node(str(i), fillcolor='lightblue') + + for i, edges in enumerate(genotype): + for op, j in edges: + if j == 0: + u = "c_{k-2}" + elif j == 1: + u = "c_{k-1}" + else: + u = str(j-2) + + v = str(i) + g.edge(u, v, label=op, fillcolor="gray") + + # output node + g.node("c_{k}", fillcolor='palegoldenrod') + for i in range(n_nodes): + g.edge(str(i), "c_{k}", fillcolor="gray") + + # add image caption + if caption: + g.attr(label=caption, overlap='false', fontsize='20', fontname='times') + + g.render(file_path, view=False) + + +if __name__ == '__main__': + if len(sys.argv) != 2: + raise ValueError("usage:\n python {} GENOTYPE".format(sys.argv[0])) + + genotype_str = sys.argv[1] + try: + genotype = gt.from_str(genotype_str) + except AttributeError: + raise ValueError("Cannot parse {}".format(genotype_str)) + + plot(genotype.normal, "normal") + plot(genotype.reduce, "reduction") diff --git a/experiments/retrain/cifar10-retrain/cifar10-retrain.log b/experiments/retrain/cifar10-retrain/cifar10-retrain.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/retrain/cifar10-retrain/tb/readme.md b/experiments/retrain/cifar10-retrain/tb/readme.md new file mode 100644 index 0000000..e69de29 diff --git a/experiments/retrain/imagenet-retrain/imagenet-retrain.log b/experiments/retrain/imagenet-retrain/imagenet-retrain.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/retrain/imagenet-retrain/tb/readme.md b/experiments/retrain/imagenet-retrain/tb/readme.md new file mode 100644 index 0000000..e69de29 diff --git a/experiments/search/cifar10-search/cifar10-search.log b/experiments/search/cifar10-search/cifar10-search.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/search/cifar10-search/tb/readme.md b/experiments/search/cifar10-search/tb/readme.md new file mode 100644 index 0000000..e69de29 diff --git a/experiments/search/imagenet-search/imagenet-search.log b/experiments/search/imagenet-search/imagenet-search.log new file mode 100644 index 0000000..e69de29 diff --git a/experiments/search/imagenet-search/tb/readme.md b/experiments/search/imagenet-search/tb/readme.md new file mode 100644 index 0000000..e69de29 diff --git a/lib/config.py b/lib/config.py new file mode 100644 index 0000000..d2b2650 --- /dev/null +++ b/lib/config.py @@ -0,0 +1,229 @@ +""" Config class for search/augment """ +import argparse +import os +from functools import partial +import torch + + +def get_parser(name): + """ make default formatted parser """ + parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + # print default value always + parser.add_argument = partial(parser.add_argument, help=' ') + return parser + + +def parse_gpus(gpus): + if gpus == 'all': + return list(range(torch.cuda.device_count())) + else: + return [int(s) for s in gpus.split(',')] + + +class BaseConfig(argparse.Namespace): + def print_params(self, prtf=print): + prtf("") + prtf("Parameters:") + for attr, value in sorted(vars(self).items()): + prtf("{}={}".format(attr.upper(), value)) + prtf("") + + def as_markdown(self): + """ Return configs as markdown format """ + text = "|name|value| \n|-|-| \n" + for attr, value in sorted(vars(self).items()): + text += "|{}|{}| \n".format(attr, value) + + return text + + +class SearchConfig(BaseConfig): + def build_parser(self): + parser = get_parser("Search config") + parser.add_argument('--name', required=True) + ########### basic settings ############ + parser.add_argument('--dataset', default='imagenet', help='CIFAR10 / MNIST / FashionMNIST / imagenet') + parser.add_argument('--model_type', type=str, default='cifar', help='cifar or imagenet') + parser.add_argument('--data_dir', type=str, default='experiments/data/cifar', help='cifar dataset') + parser.add_argument('--train_dir', type=str, default='experiments/data/imagenet/train', help='') + parser.add_argument('--val_dir', type=str, default='experiments/data/imagenet/train', help='') + parser.add_argument('--test_dir', type=str, default='experiments/data/imagenet/val', help='') + parser.add_argument('--param_pool_path', type=str, default=None, help='') + parser.add_argument('--input_channels', type=int, default=3) + parser.add_argument('--init_channels', type=int, default=16) + parser.add_argument('--stem_multiplier', type=int, default=3) + parser.add_argument('--n_classes', type=int, default=10) + parser.add_argument('--batch_size', type=int, default=128, help='batch size') + parser.add_argument('--print_freq', type=int, default=50, help='print frequency') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument('--workers', type=int, default=4, help='# of workers') + parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. ' + '`all` indicates use all gpus.') + parser.add_argument('--sample_ratio', type=float, default=0.2, help='imagenet sample ratio') + parser.add_argument('--resume', action='store_true', default=False, help='resnet stem(pretrain)') + + ########### learning rate ############ + parser.add_argument('--w_lr', type=float, default=0.05, help='lr for weights') + parser.add_argument('--lr_ratio', type=float, default=0.5, help='lr for trained layers') + parser.add_argument('--w_lr_min', type=float, default=0.001, help='minimum lr for weights') + parser.add_argument('--w_momentum', type=float, default=0.9, help='momentum for weights') + parser.add_argument('--w_weight_decay', type=float, default=3e-4, + help='weight decay for weights') + parser.add_argument('--w_grad_clip', type=float, default=5., + help='gradient clipping for weights') + parser.add_argument('--alpha_lr', type=float, default=6e-4, help='lr for alpha') + parser.add_argument('--alpha_weight_decay', type=float, default=1e-3, + help='weight decay for alpha') + + ########### alternate training ############ + parser.add_argument('--res_stem', action='store_true', default=False, help='resnet stem(pretrain)') + parser.add_argument('--layer_num', type=int, default=3, help='layer need to be replaced') + parser.add_argument('--cells_num', type=int, default=3, help='cells num of one layer') + parser.add_argument('--pretrain_epochs', type=int, default=5, help='# of training epochs') + parser.add_argument('--pretrain_decay', type=int, default=5, help='pretrain epochs') + parser.add_argument('--random_times', type=int, default=10, help='# of training epochs') + parser.add_argument('--random_epochs', type=int, default=3, help='# of training epochs') + parser.add_argument('--search_iter', type=int, default=5, help='times of search') + parser.add_argument('--search_iter_epochs', type=int, default=5, help='# of training epochs') + parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss') + parser.add_argument('--one_stage', action='store_true', default=False, help='one_stage search') + parser.add_argument('--same_structure', action='store_true', default=False, help='same_structure search and retrain') + parser.add_argument('--clean_arch', action='store_true', default=False, help='clean archs each epoch') + parser.add_argument('--sync_param', action='store_true', default=False, help='whether to sync param') + parser.add_argument('--ensemble_sum', action='store_true', default=False, help='ensemble sum or concat') + parser.add_argument('--ensemble_param', action='store_true', default=False, help='whether to learn ensemble params') + parser.add_argument('--use_beta', action='store_true', default=False, help='whether to use beta arch param') + parser.add_argument('--bn_affine', action='store_true', default=False, help='main bn affine') + parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to sync bn') + parser.add_argument('--use_apex', action='store_true', default=False, help='whether to apex') + parser.add_argument('--regular', action='store_true', default=False, help='resnet stem(pretrain)') + parser.add_argument('--regular_ratio', type=float, default=0.5, help='regular ratio') + parser.add_argument('--regular_coeff', type=float, default=5, help='regular coefficient') + parser.add_argument('--repeat_cell', action='store_true', default=False, help='use repeat cell') + parser.add_argument('--fix_head', action='store_true', default=False, help='whether to fix head') + parser.add_argument('--share_fc', action='store_true', default=False, help='whether to share fc') + parser.add_argument('--nasnet_lr', type=float, default=0.1, help='lr of nasnet') + parser.add_argument('--nasnet_warmup', type=int, default=5, help='warm up of nasnet') + parser.add_argument('--loss_alpha', type=float, default=1, help='loss alpha') + parser.add_argument('--loss_T', type=float, default=2, help='loss T') + parser.add_argument('--interactive_type', type=int, default=0, help='0 kl 1 cosine 2 mse 3 sl1') + parser.add_argument('--gumbel_sample', action='store_true', default=False, help='whether to use gumbel sample') + parser.add_argument('--sample_pretrain', action='store_true', default=False, help='sample_pretrain') + + + ########### data augument ############ + parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight') + parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') + parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path prob') + parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa') + parser.add_argument('--mixup_alpha', default=0., type=float, + help='mixup interpolation coefficient (default: 1)') + + ########### distributed ############ + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training') + parser.add_argument('--distributed', action='store_true', help='Run model distributed mode.') + + + return parser + + def __init__(self): + parser = self.build_parser() + args = parser.parse_args() + super().__init__(**vars(args)) + + self.data_path = './experiments/data/' + self.path = os.path.join('experiments', 'search', self.name) + self.resume_path = os.path.join(self.path, 'search_resume.pth.tar') + self.plot_path = os.path.join(self.path, 'plots') + self.retrain_path = os.path.join(self.path, 'retrain') + self.gpus = parse_gpus(self.gpus) + + +class AugmentConfig(BaseConfig): + def build_parser(self): + parser = get_parser("Augment config") + parser.add_argument('--name', required=True) + parser.add_argument('--dataset', required=True, help='cifar10 / cifar100 / imagenet') + parser.add_argument('--model_type', type=str, default='cifar', help='cifar or imagenet') + + parser.add_argument('--data_dir', type=str, default='experiments/data/cifar', help='cifar dataset') + parser.add_argument('--train_dir', type=str, default='experiments/data/imagenet/train', help='') + parser.add_argument('--test_dir', type=str, default='experiments/data/imagenet/val', help='') + parser.add_argument('--cell_file', type=str, default='CDARTS/cells/cifar_genotype.json', help='') + parser.add_argument('--resume', action='store_true', default=False, help='resnet stem(pretrain)') + + parser.add_argument('--n_classes', type=int, default=10) + parser.add_argument('--input_channels', type=int, default=3) + parser.add_argument('--stem_multiplier', type=int, default=3) + + ########### alternate training ############ + parser.add_argument('--res_stem', action='store_true', default=False, help='resnet stem(pretrain)') + parser.add_argument('--layer_num', type=int, default=3, help='layer need to be replaced') + parser.add_argument('--cells_num', type=int, default=3, help='cells num of one layer') + parser.add_argument('--same_structure', action='store_true', default=False, help='same_structure search and retrain') + parser.add_argument('--ensemble_sum', action='store_true', default=False, help='whether to ensemble') + parser.add_argument('--ensemble_param', action='store_true', default=False, help='whether to learn ensemble params') + parser.add_argument('--use_beta', action='store_true', default=False, help='whether to use beta arch param') + parser.add_argument('--bn_affine', action='store_true', default=False, help='main bn affine') + parser.add_argument('--repeat_cell', action='store_true', default=False, help='use repeat cell') + parser.add_argument('--fix_head', action='store_true', default=False, help='whether to fix head') + parser.add_argument('--share_fc', action='store_true', default=False, help='whether to share fc') + parser.add_argument('--sample_pretrain', action='store_true', default=False, help='sample_pretrain') + + parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa') + parser.add_argument('--mixup_alpha', default=0., type=float, + help='mixup interpolation coefficient (default: 1)') + parser.add_argument('--resume_name', type=str, default='retrain_resume.pth.tar') + + parser.add_argument('--batch_size', type=int, default=128, help='batch size') + parser.add_argument('--lr', type=float, default=0.025, help='lr for weights') + parser.add_argument('--momentum', type=float, default=0.9, help='momentum') + parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay') + parser.add_argument('--grad_clip', type=float, default=5., + help='gradient clipping for weights') + parser.add_argument('--print_freq', type=int, default=200, help='print frequency') + parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. ' + '`all` indicates use all gpus.') + parser.add_argument('--epochs', type=int, default=600, help='# of training epochs') + parser.add_argument('--warmup_epochs', type=int, default=5, help='# warmup') + parser.add_argument('--init_channels', type=int, default=36) + parser.add_argument('--layers', type=int, default=20, help='# of layers') + parser.add_argument('--seed', type=int, default=0, help='random seed') + parser.add_argument('--workers', type=int, default=4, help='# of workers') + parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight') + parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') + parser.add_argument('--sample_archs', type=int, default=1, help='sample arch num') + parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing') + parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path prob') + + ########### distributed ############ + parser.add_argument("--local_rank", default=0, type=int) + parser.add_argument("--world_size", default=1, type=int) + parser.add_argument('--use_amp', action='store_true', default=False, help='whether to use amp') + parser.add_argument('--opt-level', type=str, default='O1') + + parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training') + parser.add_argument('--fp16', action='store_true', + help='Run model fp16 mode.') + parser.add_argument('--distributed', action='store_true', + help='Run model distributed mode.') + + parser.add_argument('--static-loss-scale', type=float, default=1, + help='Static loss scale, positive power of 2 values can improve fp16 convergence.') + parser.add_argument('--dynamic-loss-scale', action='store_true', + help='Use dynamic loss scaling. If supplied, this argument supersedes ' + + '--static-loss-scale.') + return parser + + def __init__(self): + parser = self.build_parser() + args = parser.parse_args() + super().__init__(**vars(args)) + + self.data_path = './experiments/data/' + self.path = os.path.join('experiments', 'retrain', self.name) + self.gpus = parse_gpus(self.gpus) + self.resume_path = os.path.join(self.path, self.resume_name) + diff --git a/lib/core/augment_function.py b/lib/core/augment_function.py new file mode 100644 index 0000000..1cf3901 --- /dev/null +++ b/lib/core/augment_function.py @@ -0,0 +1,130 @@ +import torch +import torch.nn as nn +from lib.utils import utils +from lib.datasets import data_utils +from lib.models.loss import CrossEntropyLabelSmooth + +def train(train_loader, model, optimizer, epoch, writer, logger, config): + device = torch.device("cuda") + if config.label_smooth > 0: + criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device) + else: + criterion = nn.CrossEntropyLoss().to(device) + + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + step_num = len(train_loader) + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} LR {}".format(epoch, cur_lr)) + writer.add_scalar('train/lr', cur_lr, cur_step) + + model.train() + + for step, (X, y) in enumerate(train_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True) + + optimizer.zero_grad() + logits, logits_aux = model(X) + # loss = criterion(logits, y) + loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam) + if config.aux_weight > 0: + # loss_aux = criterion(logits_aux, y) + loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam) + loss = loss + config.aux_weight * loss_aux + + if config.use_amp: + from apex import amp + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip) + optimizer.step() + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, + step_num, losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('train/loss', reduced_loss.item(), cur_step) + writer.add_scalar('train/top1', prec1.item(), cur_step) + writer.add_scalar('train/top5', prec5.item(), cur_step) + cur_step += 1 + + if config.local_rank == 0: + logger.info("Train: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + +def validate(valid_loader, model, epoch, cur_step, writer, logger, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + logits, _ = model(X) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + + return top1.avg, top5.avg diff --git a/lib/core/pretrain_function.py b/lib/core/pretrain_function.py new file mode 100644 index 0000000..3c83b44 --- /dev/null +++ b/lib/core/pretrain_function.py @@ -0,0 +1,342 @@ +import torch +import torch.nn as nn +from lib.utils import utils +from lib.datasets import data_utils +from lib.models.loss import CrossEntropyLabelSmooth + +def train(train_loader, model, optimizer, epoch, writer, logger, config): + device = torch.device("cuda") + if config.label_smooth > 0: + criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device) + else: + criterion = nn.CrossEntropyLoss().to(device) + + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + step_num = len(train_loader) + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} LR {}".format(epoch, cur_lr)) + writer.add_scalar('train/lr', cur_lr, cur_step) + + model.train() + + for step, (X, y) in enumerate(train_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True) + + optimizer.zero_grad() + logits, logits_aux = model(X, layer_idx=0, super_flag=True, pretrain_flag=True) + loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam) + if config.aux_weight > 0: + # loss_aux = criterion(logits_aux, y) + loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam) + loss = loss + config.aux_weight * loss_aux + + if config.use_amp: + from apex import amp + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip) + optimizer.step() + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, + step_num, losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('train/loss', reduced_loss.item(), cur_step) + writer.add_scalar('train/top1', prec1.item(), cur_step) + writer.add_scalar('train/top5', prec5.item(), cur_step) + cur_step += 1 + + if config.local_rank == 0: + logger.info("Train: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + +def validate(valid_loader, model, epoch, cur_step, writer, logger, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + + return top1.avg, top5.avg + + +def sample_train(train_loader, model, optimizer, epoch, writer, logger, config): + device = torch.device("cuda") + if config.label_smooth > 0: + criterion = CrossEntropyLabelSmooth(config.n_classes, config.label_smooth).to(device) + else: + criterion = nn.CrossEntropyLoss().to(device) + + step_num = len(train_loader) + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} LR {}".format(epoch, cur_lr)) + writer.add_scalar('train/lr', cur_lr, cur_step) + + model.train() + + for step, (X, y) in enumerate(train_loader): + + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + X, target_a, target_b, lam = data_utils.mixup_data(X, y, config.mixup_alpha, use_cuda=True) + + optimizer.zero_grad() + + all_losses = [] + all_logits = [] + for i in range(config.sample_archs): + ### sample new arch ### + model.module.init_arch_params(layer_idx=0) + genotypes = [] + for i in range(config.layer_num): + genotype, connect = model.module.generate_genotype(i) + genotypes.append(genotype) + + model.module.genotypes[i] = genotype + model.module.connects[i] = connect + + logits, logits_aux = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True) + all_logits.append(logits) + loss = data_utils.mixup_criterion(criterion, logits, target_a, target_b, lam) + if config.aux_weight > 0: + # loss_aux = criterion(logits_aux, y) + loss_aux = data_utils.mixup_criterion(criterion, logits_aux, target_a, target_b, lam) + loss = loss + config.aux_weight * loss_aux + + all_losses.append(loss) + + ''' + for j, genotype in enumerate(genotypes): + if config.local_rank == 0: + logger.info("Random stage: {} layer: {} genotype = {}".format(i, j, genotype)) + ''' + + loss = torch.sum(torch.stack(all_losses)) + + if config.use_amp: + from apex import amp + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + + # tricks + for p in model.module.parameters(): + if p.grad is not None and p.grad.sum() == 0: + p.grad = None + + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.grad_clip) + optimizer.step() + + for i, logits in enumerate(all_logits): + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(all_losses[i].data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = all_losses[i].data + + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train: Epoch {:2d}/{} Step {:03d}/{:03d} Sample idx {} Loss {:.3f} " + "Prec@(1,5) ({:.1%}, {:.1%})".format( + epoch+1, config.epochs, step, step_num, i, + reduced_loss.item(), prec1.item(), prec5.item())) + + if config.local_rank == 0: + writer.add_scalar('train/loss', reduced_loss.item(), cur_step) + writer.add_scalar('train/top1', prec1.item(), cur_step) + writer.add_scalar('train/top5', prec5.item(), cur_step) + cur_step += 1 + + + +def sample_validate(valid_loader, model, epoch, cur_step, writer, logger, config): + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + for i in range(config.sample_archs): + ### sample new arch ### + model.module.init_arch_params(layer_idx=0) + genotypes = [] + for i in range(config.layer_num): + genotype, connect = model.module.generate_genotype(i) + genotypes.append(genotype) + + model.module.genotypes[i] = genotype + model.module.connects[i] = connect + + logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Sample_index {} Loss {:.3f} " + "Prec@(1,5) ({:.1%}, {:.1%})".format( + epoch+1, config.epochs, step, step_num, i, + reduced_loss.item(), prec1.item(), prec5.item())) + + if config.local_rank == 0: + writer.add_scalar('val/loss', reduced_loss.item(), cur_step) + writer.add_scalar('val/top1', prec1.item(), cur_step) + writer.add_scalar('val/top5', prec5.item(), cur_step) + + return prec1.item(), prec5.item() + + +def test_sample(valid_loader, model, epoch, cur_step, writer, logger, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + + model.module.init_arch_params(layer_idx=0) + genotypes = [] + + for i in range(config.layer_num): + genotype, connect = model.module.generate_genotype(i) + genotypes.append(genotype) + + model.module.genotypes[i] = genotype + model.module.connects[i] = connect + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + # logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True) + logits, _ = model(X, layer_idx=0, super_flag=True, pretrain_flag=True, is_slim=True) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + epoch+1, config.epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + epoch+1, config.epochs, top1.avg)) + + return top1.avg, top5.avg \ No newline at end of file diff --git a/lib/core/search_function.py b/lib/core/search_function.py new file mode 100644 index 0000000..b479264 --- /dev/null +++ b/lib/core/search_function.py @@ -0,0 +1,254 @@ +import torch +import torch.nn as nn +from lib.utils import utils +from lib.models.loss import Loss_interactive + +def search(train_loader, valid_loader, model, optimizer, w_optim, alpha_optim, layer_idx, epoch, writer, logger, config): + # interactive retrain and kl + + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + losses_interactive = utils.AverageMeter() + losses_cls = utils.AverageMeter() + losses_reg = utils.AverageMeter() + + step_num = len(train_loader) + step_num = int(step_num * config.sample_ratio) + + cur_step = epoch*step_num + cur_lr_search = w_optim.param_groups[0]['lr'] + cur_lr_main = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Train Epoch {} Search LR {}".format(epoch, cur_lr_search)) + logger.info("Train Epoch {} Main LR {}".format(epoch, cur_lr_main)) + writer.add_scalar('retrain/lr', cur_lr_search, cur_step) + + model.train() + + for step, ((trn_X, trn_y), (val_X, val_y)) in enumerate(zip(train_loader, valid_loader)): + if step > step_num: + break + + trn_X, trn_y = trn_X.to(device, non_blocking=True), trn_y.to(device, non_blocking=True) + val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True) + N = trn_X.size(0) + + #use valid data + alpha_optim.zero_grad() + optimizer.zero_grad() + + logits_search, emsemble_logits_search = model(val_X, layer_idx, super_flag=True) + logits_main, emsemble_logits_main= model(val_X, layer_idx, super_flag=False) + + loss_cls = (criterion(logits_search, val_y) + criterion(logits_main, val_y)) / config.loss_alpha + loss_interactive = Loss_interactive(emsemble_logits_search, emsemble_logits_main, config.loss_T, config.interactive_type) * config.loss_alpha + + loss_regular = 0 * loss_cls + if config.regular: + reg_decay = max(config.regular_coeff * (1 - float(epoch-config.pretrain_epochs)/((config.search_iter-config.pretrain_epochs)*config.search_iter_epochs*config.regular_ratio)), 0) + # normal cell + op_opt = ['max_pool_3x3', 'avg_pool_3x3', 'skip_connect'] + op_groups = [] + for idx in range(layer_idx, 3): + for op_dx in op_opt: + op_groups.append((idx - layer_idx, op_dx)) + loss_regular = loss_regular + model.module.add_alpha_regularization(op_groups, weight_decay=reg_decay, method='L1', reduce=False) + + # reduction cell + # op_opt = [] + op_opt = ['max_pool_3x3', 'avg_pool_3x3', 'skip_connect'] + op_groups = [] + for i in range(layer_idx, 3): + for op_dx in op_opt: + op_groups.append((i - layer_idx, op_dx)) + loss_regular = loss_regular + model.module.add_alpha_regularization(op_groups, weight_decay=reg_decay, method='L1', normal=False) + + + loss = loss_cls + loss_interactive + loss_regular + loss.backward() + nn.utils.clip_grad_norm_(model.module.parameters(), config.w_grad_clip) + optimizer.step() + alpha_optim.step() + + prec1, prec5 = utils.accuracy(logits_main, val_y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + reduced_loss_interactive = utils.reduce_tensor(loss_interactive.data, config.world_size) + reduced_loss_cls = utils.reduce_tensor(loss_cls.data, config.world_size) + reduced_loss_reg = utils.reduce_tensor(loss_regular.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + + else: + reduced_loss = loss.data + reduced_loss_interactive = loss_interactive.data + reduced_loss_cls = loss_cls.data + reduced_loss_reg = loss_regular.data + + losses.update(reduced_loss.item(), N) + losses_interactive.update(reduced_loss_interactive.item(), N) + losses_cls.update(reduced_loss_cls.item(), N) + losses_reg.update(reduced_loss_reg.item(), N) + + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Train_2: Layer {}/{} Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Loss_interactive {losses_interactive.avg:.3f} Losses_cls {losses_cls.avg:.3f} Losses_reg {losses_reg.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + layer_idx+1, config.layer_num, epoch+1, config.search_iter*config.search_iter_epochs, step, + step_num, losses=losses, losses_interactive=losses_interactive, losses_cls=losses_cls, + losses_reg=losses_reg, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('retrain/loss', reduced_loss.item(), cur_step) + writer.add_scalar('retrain/top1', prec1.item(), cur_step) + writer.add_scalar('retrain/top5', prec5.item(), cur_step) + cur_step += 1 + + + w_optim.zero_grad() + logits_search_train, _ = model(trn_X, layer_idx, super_flag=True) + loss_cls_train = criterion(logits_search_train, trn_y) + loss_train = loss_cls_train + loss_train.backward() + # gradient clipping + nn.utils.clip_grad_norm_(model.module.parameters(), config.w_grad_clip) + # only update w + w_optim.step() + + # alpha_optim.step() + if config.distributed: + reduced_loss_cls_train = utils.reduce_tensor(loss_cls_train.data, config.world_size) + reduced_loss_train = utils.reduce_tensor(loss_train.data, config.world_size) + else: + reduced_loss_cls_train = reduced_loss_cls_train.data + reduced_loss_train = reduced_loss_train.data + + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num-1): + logger.info( + "Train_1: Loss_cls: {:.3f} Loss: {:.3f}".format( + reduced_loss_cls_train.item(), reduced_loss_train.item()) + ) + + + if config.local_rank == 0: + logger.info("Train_2: Layer {}/{} Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + layer_idx+1, config.layer_num, epoch+1, config.search_iter*config.search_iter_epochs, top1.avg)) + + +def retrain_warmup(valid_loader, model, optimizer, layer_idx, epoch, writer, logger, super_flag, retrain_epochs, config): + + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + step_num = len(valid_loader) + step_num = int(step_num * config.sample_ratio) + + cur_step = epoch*step_num + cur_lr = optimizer.param_groups[0]['lr'] + if config.local_rank == 0: + logger.info("Warmup Epoch {} LR {:.3f}".format(epoch+1, cur_lr)) + writer.add_scalar('warmup/lr', cur_lr, cur_step) + + model.train() + + for step, (val_X, val_y) in enumerate(valid_loader): + if step > step_num: + break + + val_X, val_y = val_X.to(device, non_blocking=True), val_y.to(device, non_blocking=True) + N = val_X.size(0) + + optimizer.zero_grad() + logits_main, _ = model(val_X, layer_idx, super_flag=super_flag) + loss = criterion(logits_main, val_y) + loss.backward() + + nn.utils.clip_grad_norm_(model.module.parameters(), config.w_grad_clip) + optimizer.step() + + prec1, prec5 = utils.accuracy(logits_main, val_y, topk=(1, 5)) + if config.distributed: + reduced_loss = utils.reduce_tensor(loss.data, config.world_size) + prec1 = utils.reduce_tensor(prec1, config.world_size) + prec5 = utils.reduce_tensor(prec5, config.world_size) + + else: + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + if config.local_rank == 0 and (step % config.print_freq == 0 or step == step_num): + logger.info( + "Warmup: Layer {}/{} Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + layer_idx+1, config.layer_num, epoch+1, retrain_epochs, step, + step_num, losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('retrain/loss', reduced_loss.item(), cur_step) + writer.add_scalar('retrain/top1', prec1.item(), cur_step) + writer.add_scalar('retrain/top5', prec5.item(), cur_step) + cur_step += 1 + + if config.local_rank == 0: + logger.info("Warmup: Layer {}/{} Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + layer_idx+1, config.layer_num, epoch+1, retrain_epochs, top1.avg)) + +def validate(valid_loader, model, layer_idx, epoch, cur_step, writer, logger, super_flag, config): + top1 = utils.AverageMeter() + top5 = utils.AverageMeter() + losses = utils.AverageMeter() + + model.eval() + device = torch.device("cuda") + criterion = nn.CrossEntropyLoss().to(device) + + with torch.no_grad(): + for step, (X, y) in enumerate(valid_loader): + X, y = X.to(device, non_blocking=True), y.to(device, non_blocking=True) + N = X.size(0) + + logits, _ = model(X, layer_idx, super_flag=False) + loss = criterion(logits, y) + + prec1, prec5 = utils.accuracy(logits, y, topk=(1, 5)) + + reduced_loss = loss.data + + losses.update(reduced_loss.item(), N) + top1.update(prec1.item(), N) + top5.update(prec5.item(), N) + + torch.cuda.synchronize() + step_num = len(valid_loader) + + if (step % config.print_freq == 0 or step == step_num-1) and config.local_rank == 0: + logger.info( + "Valid: Layer {}/{} Epoch {:2d}/{} Step {:03d}/{:03d} Loss {losses.avg:.3f} " + "Prec@(1,5) ({top1.avg:.1%}, {top5.avg:.1%})".format( + layer_idx+1, config.layer_num, epoch+1, config.search_iter*config.search_iter_epochs, step, step_num, + losses=losses, top1=top1, top5=top5)) + + if config.local_rank == 0: + writer.add_scalar('val/loss', losses.avg, cur_step) + writer.add_scalar('val/top1', top1.avg, cur_step) + writer.add_scalar('val/top5', top5.avg, cur_step) + + logger.info("Valid: Layer {}/{} Epoch {:2d}/{} Final Prec@1 {:.4%}".format( + layer_idx+1, config.layer_num, epoch+1, config.search_iter*config.search_iter_epochs, top1.avg)) + + return top1.avg \ No newline at end of file diff --git a/lib/datasets/cifar.py b/lib/datasets/cifar.py new file mode 100644 index 0000000..db7bf38 --- /dev/null +++ b/lib/datasets/cifar.py @@ -0,0 +1,102 @@ +import torch +import numpy as np +import torchvision.datasets as dset +import torchvision.transforms as transforms +from lib.datasets.data_utils import SubsetDistributedSampler +from lib.datasets.data_utils import CIFAR10Policy, Cutout + + +def data_transforms_cifar(config, cutout=False): + CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] + CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] + + if config.use_aa: + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4, fill=128), + transforms.RandomHorizontalFlip(), CIFAR10Policy(), + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + else: + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + + + if cutout: + train_transform.transforms.append(Cutout(config.cutout_length)) + + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + return train_transform, valid_transform + +def get_search_datasets(config): + + dataset = config.dataset.lower() + if dataset == 'cifar10': + dset_cls = dset.CIFAR10 + n_classes = 10 + elif dataset == 'cifar100': + dset_cls = dset.CIFAR100 + n_classes = 100 + else: + raise Exception("Not support dataset!") + + train_transform, valid_transform = data_transforms_cifar(config, cutout=False) + train_data = dset_cls(root=config.data_dir, train=True, download=True, transform=train_transform) + test_data = dset_cls(root=config.data_dir, train=False, download=True, transform=valid_transform) + + num_train = len(train_data) + indices = list(range(num_train)) + split_mid = int(np.floor(0.5 * num_train)) + + train_sampler = SubsetDistributedSampler(train_data, indices[:split_mid]) + valid_sampler = SubsetDistributedSampler(train_data, indices[split_mid:num_train]) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + valid_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=valid_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, valid_loader], [train_sampler, valid_sampler] + +def get_augment_datasets(config): + + dataset = config.dataset.lower() + if dataset == 'cifar10': + dset_cls = dset.CIFAR10 + elif dataset == 'cifar100': + dset_cls = dset.CIFAR100 + else: + raise Exception("Not support dataset!") + + train_transform, valid_transform = data_transforms_cifar(config, cutout=True) + train_data = dset_cls(root=config.data_dir, train=True, download=True, transform=train_transform) + test_data = dset_cls(root=config.data_dir, train=False, download=True, transform=valid_transform) + + + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + test_loader = torch.utils.data.DataLoader( + test_data, batch_size=config.batch_size, + sampler=test_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, test_loader], [train_sampler, test_sampler] + diff --git a/lib/datasets/data_utils.py b/lib/datasets/data_utils.py new file mode 100644 index 0000000..eef2575 --- /dev/null +++ b/lib/datasets/data_utils.py @@ -0,0 +1,393 @@ +import math +import torch +import random +import numpy as np +import torch.distributed as dist +from torch.utils.data import Sampler +from PIL import Image, ImageEnhance, ImageOps + +class SubsetDistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size. + + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + shuffle (optional): If true (default), sampler will shuffle the indices + """ + + def __init__(self, dataset, indices, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.indices = indices + self.num_samples = int(math.ceil(len(self.indices) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + if self.shuffle: + # indices = torch.randperm(len(self.dataset), generator=g).tolist() + indices = list(self.indices[i] for i in torch.randperm(len(self.indices))) + else: + # indices = list(range(len(self.dataset))) + indices = self.indices + + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class data_prefetcher(): + def __init__(self, loader): + self.loader = iter(loader) + self.stream = torch.cuda.Stream() + self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1) + self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1) + # With Amp, it isn't necessary to manually convert data to half. + # if args.fp16: + # self.mean = self.mean.half() + # self.std = self.std.half() + self.preload() + + def preload(self): + try: + self.next_input, self.next_target = next(self.loader) + except StopIteration: + self.next_input = None + self.next_target = None + return + with torch.cuda.stream(self.stream): + self.next_input = self.next_input.cuda(non_blocking=True) + self.next_target = self.next_target.cuda(non_blocking=True) + # With Amp, it isn't necessary to manually convert data to half. + # if args.fp16: + # self.next_input = self.next_input.half() + # else: + self.next_input = self.next_input.float() + self.next_input = self.next_input.sub_(self.mean).div_(self.std) + + def next(self): + torch.cuda.current_stream().wait_stream(self.stream) + input = self.next_input + target = self.next_target + self.preload() + return input, target + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + + return img + +class ImageNetPolicy(object): + """ Randomly choose one of the best 24 Sub-policies on ImageNet. + Example: + >>> policy = ImageNetPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> ImageNetPolicy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor), + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + + SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor), + SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor), + SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor), + SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor), + + SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor), + SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor), + SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + + SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor), + SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor), + SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor), + SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor), + SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor), + + SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor), + SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor), + SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor), + SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment ImageNet Policy" + + +class CIFAR10Policy(object): + """ Randomly choose one of the best 25 Sub-policies on CIFAR10. + Example: + >>> policy = CIFAR10Policy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> CIFAR10Policy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor), + SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor), + SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor), + SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor), + + SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor), + SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor), + SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor), + SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor), + + SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor), + SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor), + SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor), + SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor), + SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor), + + SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor), + SubPolicy(0.2, "equalize", 8, 0.6, "equalize", 4, fillcolor), + SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor), + SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor), + SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor), + + SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor), + SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor), + SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor), + SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment CIFAR10 Policy" + + +class SVHNPolicy(object): + """ Randomly choose one of the best 25 Sub-policies on SVHN. + Example: + >>> policy = SVHNPolicy() + >>> transformed = policy(image) + Example as a PyTorch Transform: + >>> transform=transforms.Compose([ + >>> transforms.Resize(256), + >>> SVHNPolicy(), + >>> transforms.ToTensor()]) + """ + def __init__(self, fillcolor=(128, 128, 128)): + self.policies = [ + SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor), + SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor), + SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor), + SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor), + SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor), + SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor), + SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor), + + SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor), + SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor), + SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor), + SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor), + SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor), + + SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor), + SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor), + SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor), + SubPolicy(0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor), + SubPolicy(0.1, "shearX", 6, 0.6, "invert", 5, fillcolor), + + SubPolicy(0.7, "solarize", 2, 0.6, "translateY", 7, fillcolor), + SubPolicy(0.8, "shearY", 4, 0.8, "invert", 8, fillcolor), + SubPolicy(0.7, "shearX", 9, 0.8, "translateY", 3, fillcolor), + SubPolicy(0.8, "shearY", 5, 0.7, "autocontrast", 3, fillcolor), + SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor) + ] + + + def __call__(self, img): + policy_idx = random.randint(0, len(self.policies) - 1) + return self.policies[policy_idx](img) + + def __repr__(self): + return "AutoAugment SVHN Policy" + + +class SubPolicy(object): + def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)): + ranges = { + "shearX": np.linspace(0, 0.3, 10), + "shearY": np.linspace(0, 0.3, 10), + "translateX": np.linspace(0, 150 / 331, 10), + "translateY": np.linspace(0, 150 / 331, 10), + "rotate": np.linspace(0, 30, 10), + "color": np.linspace(0.0, 0.9, 10), + "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), + "solarize": np.linspace(256, 0, 10), + "contrast": np.linspace(0.0, 0.9, 10), + "sharpness": np.linspace(0.0, 0.9, 10), + "brightness": np.linspace(0.0, 0.9, 10), + "autocontrast": [0] * 10, + "equalize": [0] * 10, + "invert": [0] * 10 + } + + # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand + def rotate_with_fill(img, magnitude): + rot = img.convert("RGBA").rotate(magnitude) + return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode) + + func = { + "shearX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "shearY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0), + Image.BICUBIC, fillcolor=fillcolor), + "translateX": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0), + fillcolor=fillcolor), + "translateY": lambda img, magnitude: img.transform( + img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])), + fillcolor=fillcolor), + "rotate": lambda img, magnitude: rotate_with_fill(img, magnitude), + "color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])), + "posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude), + "solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude), + "contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance( + 1 + magnitude * random.choice([-1, 1])), + "autocontrast": lambda img, magnitude: ImageOps.autocontrast(img), + "equalize": lambda img, magnitude: ImageOps.equalize(img), + "invert": lambda img, magnitude: ImageOps.invert(img) + } + + self.p1 = p1 + self.operation1 = func[operation1] + self.magnitude1 = ranges[operation1][magnitude_idx1] + self.p2 = p2 + self.operation2 = func[operation2] + self.magnitude2 = ranges[operation2][magnitude_idx2] + + + def __call__(self, img): + if random.random() < self.p1: img = self.operation1(img, self.magnitude1) + if random.random() < self.p2: img = self.operation2(img, self.magnitude2) + return img + +def fast_collate(batch): + imgs = [img[0] for img in batch] + targets = torch.tensor([target[1] for target in batch], dtype=torch.int64) + w = imgs[0].size[0] + h = imgs[0].size[1] + tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 ) + for i, img in enumerate(imgs): + nump_array = np.asarray(img, dtype=np.uint8) + if(nump_array.ndim < 3): + nump_array = np.expand_dims(nump_array, axis=-1) + nump_array = np.rollaxis(nump_array, 2) + + tensor[i] += torch.from_numpy(nump_array) + + return tensor, targets + +def mixup_data(x, y, alpha=1.0, use_cuda=True): + '''Returns mixed inputs, pairs of targets, and lambda''' + if alpha > 0: + lam = np.random.beta(alpha, alpha) + else: + lam = 1 + + batch_size = x.size()[0] + if use_cuda: + index = torch.randperm(batch_size).cuda() + else: + index = torch.randperm(batch_size) + + mixed_x = lam * x + (1 - lam) * x[index, :] + y_a, y_b = y, y[index] + return mixed_x, y_a, y_b, lam + + +def mixup_criterion(criterion, pred, y_a, y_b, lam): + return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) + diff --git a/lib/datasets/imagenet.py b/lib/datasets/imagenet.py new file mode 100644 index 0000000..d5ddd40 --- /dev/null +++ b/lib/datasets/imagenet.py @@ -0,0 +1,102 @@ +import torch +import numpy as np +import torchvision.datasets as dset +import torchvision.transforms as transforms +from lib.datasets.data_utils import SubsetDistributedSampler +from lib.datasets.data_utils import ImageNetPolicy + +def get_search_datasets(config): + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + train_data = dset.ImageFolder( + config.train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ColorJitter( + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.2), + transforms.ToTensor(), + normalize, + ])) + + test_data = dset.ImageFolder( + config.test_dir, + transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + num_train = len(train_data) + indices = list(range(num_train)) + split_mid = int(np.floor(0.5 * num_train)) + + train_sampler = SubsetDistributedSampler(train_data, indices[:split_mid]) + valid_sampler = SubsetDistributedSampler(train_data, indices[split_mid:num_train]) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + valid_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=valid_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, valid_loader], [train_sampler, valid_sampler] + +def get_augment_datasets(config): + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + if config.use_aa: + train_data = dset.ImageFolder( + config.train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + ImageNetPolicy(), + transforms.ToTensor(), + normalize, + ])) + else: + train_data = dset.ImageFolder( + config.train_dir, + transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ColorJitter( + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.2), + transforms.ToTensor(), + normalize, + ])) + + test_data = dset.ImageFolder( + config.test_dir, + transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + normalize, + ])) + + train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) + test_sampler = torch.utils.data.distributed.DistributedSampler(test_data) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=config.batch_size, + sampler=train_sampler, + pin_memory=True, num_workers=config.workers) + + test_loader = torch.utils.data.DataLoader( + test_data, batch_size=config.batch_size, + sampler=test_sampler, + pin_memory=True, num_workers=config.workers) + + return [train_loader, test_loader], [train_sampler, test_sampler] + diff --git a/lib/models/augment_cells.py b/lib/models/augment_cells.py new file mode 100644 index 0000000..7deab0f --- /dev/null +++ b/lib/models/augment_cells.py @@ -0,0 +1,44 @@ +""" CNN cell for network augmentation """ +import torch +import torch.nn as nn +from lib.models import ops +import lib.utils.genotypes as gt + + +class AugmentCell(nn.Module): + """ Cell for augmentation + Each edge is discrete. + """ + def __init__(self, genotype, C_pp, C_p, C, reduction_p, reduction, bn_affine=True): + super().__init__() + self.reduction = reduction + self.n_nodes = len(genotype.normal) + + if reduction_p: + self.preproc0 = ops.FactorizedReduce(C_pp, C, affine=bn_affine) + else: + self.preproc0 = ops.StdConv(C_pp, C, 1, 1, 0, affine=bn_affine) + self.preproc1 = ops.StdConv(C_p, C, 1, 1, 0, affine=bn_affine) + + # generate dag + if reduction: + gene = genotype.reduce + self.concat = genotype.reduce_concat + else: + gene = genotype.normal + self.concat = genotype.normal_concat + + self.dag = gt.to_dag(C, gene, reduction, bn_affine) + + def forward(self, s0, s1): + s0 = self.preproc0(s0) + s1 = self.preproc1(s1) + + states = [s0, s1] + for edges in self.dag: + s_cur = sum(op(states[op.s_idx]) for op in edges) + states.append(s_cur) + + s_out = torch.cat([states[i] for i in self.concat], dim=1) + + return s_out diff --git a/lib/models/aux_head.py b/lib/models/aux_head.py new file mode 100644 index 0000000..88750f2 --- /dev/null +++ b/lib/models/aux_head.py @@ -0,0 +1,99 @@ +import torch +import torch.nn as nn + + +class DistillHeadCIFAR(nn.Module): + + def __init__(self, C, size, num_classes, bn_affine=True): + """assuming input size 8x8 or 16x16""" + super(DistillHeadCIFAR, self).__init__() + self.features = nn.Sequential( + nn.ReLU(), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), # image size = 2 x 2 / 6 x 6 + nn.Conv2d(C, 128, 1, bias=False), + # nn.BatchNorm2d(128, affine=bn_affine, track_running_stats=False), + nn.BatchNorm2d(128, affine=bn_affine), + nn.ReLU(), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768, affine=bn_affine), + nn.ReLU() + ) + self.classifier = nn.Linear(768, num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + x = self.features(x) + x = self.gap(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + +class DistillHeadImagenet(nn.Module): + + def __init__(self, C, size, num_classes, bn_affine=True): + """assuming input size 7x7 or 14x14""" + super(DistillHeadImagenet, self).__init__() + self.features = nn.Sequential( + nn.ReLU(), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), # image size = 2 x 2 / 6 x 6 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128, affine=bn_affine), + nn.ReLU(), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768, affine=bn_affine), + nn.ReLU() + ) + self.classifier = nn.Linear(768, num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + x = self.features(x) + x = self.gap(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + +class AuxiliaryHeadCIFAR(nn.Module): + + def __init__(self, C, size=5, num_classes=10): + """assuming input size 8x8""" + super(AuxiliaryHeadCIFAR, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(size, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + + +class AuxiliaryHeadImageNet(nn.Module): + + def __init__(self, C, size=5, num_classes=1000): + """assuming input size 7x7""" + super(AuxiliaryHeadImageNet, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(size, stride=2, padding=0, count_include_pad=False), + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + # NOTE: This batchnorm was omitted in my earlier implementation due to a typo. + # Commenting it out for consistency with the experiments in the paper. + # nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0),-1)) + return x diff --git a/lib/models/cdarts_controller.py b/lib/models/cdarts_controller.py new file mode 100644 index 0000000..bff7df0 --- /dev/null +++ b/lib/models/cdarts_controller.py @@ -0,0 +1,807 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import lib.utils.genotypes as gt +import logging +import copy + +from lib.models import ops +from lib.models.search_cells import SearchCell +from lib.models.augment_cells import AugmentCell +from lib.models.aux_head import AuxiliaryHeadCIFAR, AuxiliaryHeadImageNet, DistillHeadCIFAR, DistillHeadImagenet +from lib.models.model_augment import ModelAug + +class CDARTSController(nn.Module): + """ CDARTS Controller""" + def __init__(self, config, criterion, n_nodes=4, stem_multiplier=3, genotypes={}): + """ + args: + + """ + super(CDARTSController, self).__init__() + + # some settings + self.n_nodes = n_nodes + self.n_ops = len(gt.PRIMITIVES) + self.criterion = criterion + self.layer_num = config.layer_num + self.c_in = config.input_channels + self.num_classes = config.n_classes + # cifar10 or imagenet + self.model_type = config.model_type + self.stem_multiplier = stem_multiplier + self.init_channel = config.init_channels + self.res_stem = config.res_stem + self.ensemble_sum = config.ensemble_sum + self.use_ensemble_param = config.ensemble_param + self.use_beta = config.use_beta + self.bn_affine = config.bn_affine + self.repeat_cell = config.repeat_cell + self.fix_head = config.fix_head + self.share_fc = config.share_fc + self.sample_pretrain = config.sample_pretrain + + if self.model_type == 'cifar': + self.layers = [3, 3, 2] + self.layers_reduction = [True, True, False] + self.augment_layers = [7, 7, 6] + self.nas_layers = nn.ModuleList([None, None, None]) + + elif self.model_type == 'imagenet': + if self.res_stem: + self.layers = [2, 2, 2, 2] + self.nas_layers = nn.ModuleList([None, None, None, None]) + self.layers_reduction = [False, True, True, True] + self.augment_layers = [3, 4, 3, 4] + else: + self.layers = [3, 3, 2] + self.nas_layers = nn.ModuleList([None, None, None]) + self.layers_reduction = [True, True, False] + self.augment_layers = [5, 5, 4] + else: + raise Exception("Wrong model type!") + + # use genotypes to generate search layers + self.genotypes = genotypes + self.connects = {} + self.fc_super = None + self.fc_nas = None + self.distill_aux_c1 = None + self.distill_aux_c2 = None + self.feature_extractor = None + self.gap = nn.AdaptiveAvgPool2d(1) + + self.super_layers = nn.ModuleList() + self.super_layers_arch = nn.ModuleList() + + self.super_layers_pool = nn.ModuleList() + self.super_layers_pool_arch = nn.ModuleList() + self.model_main = None + + self.build_init_model() + + ######################## ---------------------------- ######################## + ######################## Functions for update modules ######################## + ######################## ---------------------------- ######################## + def build_init_model(self): + self.extractor_grad = True + if self.model_type == 'cifar': + self.feature_extractor = self.cifar_stem(self.init_channel * self.stem_multiplier) + reduction_p = False + elif self.model_type == 'imagenet': + if self.res_stem: + self.feature_extractor = self.resnet_stem(self.init_channel * self.stem_multiplier) + reduction_p = False + else: + self.feature_extractor = self.imagenet_stem(self.init_channel * self.stem_multiplier) + reduction_p = True + else: + raise Exception("error! not support now!") + + c_p = self.init_channel * self.stem_multiplier + c_pp = self.init_channel * self.stem_multiplier + c_cur = self.init_channel + self.super_layers_pool_arch.append(self.pretrain_architecture_params(self.n_ops)) + + if self.repeat_cell: + self.super_layers_arch.append(self.add_architecture_params(self.n_ops)) + + for layer_idx in range(self.layer_num): + reduction = self.layers_reduction[layer_idx] + + super_layer = self.add_super_layer(c_cur, c_p, c_pp, reduction_p, reduction, self.layers[layer_idx]) + super_layer_pool = self.add_super_layer(c_cur, c_p, c_pp, reduction_p, reduction, self.augment_layers[layer_idx], is_slim=self.sample_pretrain) + super_layer_arch = self.add_architecture_params(self.n_ops) + + self.freeze_unused_params(super_layer_arch, reduction, self.layers[layer_idx]) + self.super_layers.append(super_layer) + self.super_layers_pool.append(super_layer_pool) + if not self.repeat_cell: + self.super_layers_arch.append(super_layer_arch) + + if reduction: + c_p = c_cur * 2 * self.n_nodes + else: + c_p = c_cur * self.n_nodes + + if self.res_stem: + c_pp = c_p + reduction_p = False + else: + c_pp = c_cur * self.n_nodes + reduction_p = reduction + + if layer_idx == self.layer_num-3: + self.distill_aux_c1 = c_p + if layer_idx == self.layer_num-2: + self.distill_aux_c2 = c_p + + if reduction: + c_cur = c_cur * 2 + else: + c_cur = c_cur + + self.fc_super = nn.Linear(c_p, self.num_classes) + if self.share_fc: + self.fc_nas = self.fc_super + else: + self.fc_nas = nn.Linear(c_p, self.num_classes) + + if self.use_ensemble_param: + self.ensemble_param = nn.Parameter(0.333*torch.rand(3), requires_grad=True) + else: + self.ensemble_param = nn.Parameter(0.333*torch.ones(3), requires_grad=False) + if self.model_type == 'cifar': + self.distill_aux_head1 = DistillHeadCIFAR(self.distill_aux_c1, 6, self.num_classes, bn_affine=False) + self.distill_aux_head2 = DistillHeadCIFAR(self.distill_aux_c2, 6, self.num_classes, bn_affine=False) + elif self.model_type == 'imagenet': + if self.res_stem: + self.distill_aux_head1 = DistillHeadImagenet(self.distill_aux_c1, 14, self.num_classes, bn_affine=False) + self.distill_aux_head2 = DistillHeadImagenet(self.distill_aux_c2, 6, self.num_classes, bn_affine=False) + else: + self.distill_aux_head1 = DistillHeadImagenet(self.distill_aux_c1, 6, self.num_classes, bn_affine=False) + self.distill_aux_head2 = DistillHeadImagenet(self.distill_aux_c2, 5, self.num_classes, bn_affine=False) + else: + raise Exception("error! not support now!") + + + self.fix_structure() + + def fix_structure(self): + if self.fix_head: + for n, p in self.distill_aux_head1.named_parameters(): + p.requires_grad = False + for n, p in self.distill_aux_head2.named_parameters(): + p.requires_grad = False + + def fix_pre_layers(self, layer_idx=0): + for i in range(layer_idx): + for name, param in self.super_layers_arch[i].named_parameters(): + param.requires_grad=False + + def build_nas_layers(self, layer_idx, best_genotype, same_structure=False): + c_p = self.init_channel * self.stem_multiplier + c_pp = self.init_channel * self.stem_multiplier + c_cur = self.init_channel + if self.model_type == 'cifar': + reduction_p = False + elif self.model_type == 'imagenet': + if self.res_stem: + reduction_p = False + else: + reduction_p = True + else: + raise Exception("error! not support now!") + + for i in range(self.layer_num): + reduction = self.layers_reduction[i] + + if i == layer_idx: + break + + if reduction: + c_p = c_cur * 2 * self.n_nodes + else: + c_p = c_cur * self.n_nodes + + if self.res_stem: + c_pp = c_p + reduction_p = False + else: + c_pp = c_cur * self.n_nodes + reduction_p = reduction + + if reduction: + c_cur = c_cur * 2 + else: + c_cur = c_cur + + # once model search is well trained, transfor model params from model_search to model_main + # genotype = self.generate_genotype(self.model_search.arch_params) + if same_structure: + nas_layer = self.generate_nas_layer(c_cur, c_p, c_pp, reduction_p, reduction, best_genotype, self.layers[layer_idx], bn_affine=self.bn_affine) + else: + nas_layer = self.generate_nas_layer(c_cur, c_p, c_pp, reduction_p, reduction, best_genotype, self.augment_layers[layer_idx], bn_affine=self.bn_affine) + self.genotypes[layer_idx] = best_genotype + self.nas_layers[layer_idx] = nas_layer + + def build_augment_model(self, init_channel, genotypes_dict): + if len(genotypes_dict.keys()) == 0: + raise Exception("error! genotypes is empty!") + else: + self.extractor_grad = True + if self.model_type == 'cifar': + feature_extractor = self.cifar_stem(self.init_channel * self.stem_multiplier) + reduction_p = False + elif self.model_type == 'imagenet': + if self.res_stem: + feature_extractor = self.resnet_stem(self.init_channel * self.stem_multiplier) + reduction_p = False + else: + feature_extractor = self.imagenet_stem(self.init_channel * self.stem_multiplier) + reduction_p = True + else: + raise Exception("error! not support now!") + + c_p = self.init_channel * self.stem_multiplier + c_pp = self.init_channel * self.stem_multiplier + c_cur = self.init_channel + + for layer_idx, genotype in genotypes_dict.items(): + reduction = self.layers_reduction[layer_idx] + nas_layer = self.generate_nas_layer(c_cur, c_p, c_pp, reduction_p, reduction, genotype, self.augment_layers[layer_idx]) + self.nas_layers[layer_idx] = nas_layer + + if reduction: + c_p = c_cur * 2 * self.n_nodes + else: + c_p = c_cur * self.n_nodes + + if self.res_stem: + c_pp = c_p + reduction_p = False + else: + c_pp = c_cur * self.n_nodes + reduction_p = reduction + + if reduction: + c_cur = c_cur * 2 + else: + c_cur = c_cur + + if layer_idx == self.layer_num-2: + c_aux = c_p + + if self.model_type == 'cifar': + aux_head = AuxiliaryHeadCIFAR(c_aux, 5, self.num_classes) + elif self.model_type == 'imagenet': + if self.res_stem: + aux_head = AuxiliaryHeadImageNet(c_aux, 12, self.num_classes) + else: + aux_head = AuxiliaryHeadImageNet(c_aux, 5, self.num_classes) + else: + aux_head = None + + # super_layers = copy.deepcopy(self.super_layers) + # super_layers_arch = copy.deepcopy(self.super_layers_arch) + nas_layers = copy.deepcopy(self.nas_layers) + fc = copy.deepcopy(self.fc_nas) + self.model_main = ModelAug(feature_extractor, nas_layers, fc, n_nodes=self.n_nodes, aux_head=aux_head) + + def freeze_unused_params(self, super_layer_arch, reduction, cell_num): + if not reduction: + for name, param in super_layer_arch.named_parameters(): + if name.startswith('1') or name.startswith('3'): + param.requires_grad=False + elif cell_num == 1 and reduction: + for name, param in super_layer_arch.named_parameters(): + if name.startswith('0') or name.startswith('2'): + param.requires_grad=False + else: + pass + + def param_copy(self, target_model, model): + if model: + for target_param, param in zip(target_model.parameters(), model.parameters()): + target_param.data.copy_(param.data) + + def param_copy_plus(self, target_model, model): + model_dict_keys = model.state_dict().keys() + for n, p in target_model.named_parameters(): + if n in model_dict_keys: + p.data.copy_(model.state_dict()[n]) + + def copy_params_from_super_layer(self, layer_idx): + super_layer = self.super_layers_pool[layer_idx] + nas_layer = self.nas_layers[layer_idx] + connect_dict = self.connects[layer_idx] + normal_cell_connect = connect_dict['normal'] + reduce_cell_connect = connect_dict['reduce'] + + for super_cell, nas_cell in zip(super_layer, nas_layer): + # copy preproc0 and preproc1 + self.param_copy_plus(nas_cell.preproc0, super_cell.preproc0) + self.param_copy_plus(nas_cell.preproc1, super_cell.preproc1) + + if super_cell.reduction: + cell_connect = reduce_cell_connect + else: + cell_connect = normal_cell_connect + + for i, (super_hidden, nas_hidden) in enumerate(zip(super_cell.dag, nas_cell.dag)): + hidden_connect = cell_connect[i] + # k = 2 + for j in range(len(hidden_connect)): + connect = hidden_connect[j] + super_edge = super_hidden[connect[0]] + super_op = super_edge._ops[connect[1]] + nas_edge = nas_hidden[j] + if isinstance(nas_edge, ops.Identity): + break + nas_op = nas_edge[0] + # copy params + self.param_copy_plus(nas_op, super_op) + # self.param_copy(super_op, nas_op) + + def copy_params_from_nas_layer(self, layer_idx): + super_layer = self.super_layers_pool[layer_idx] + nas_layer = self.nas_layers[layer_idx] + connect_dict = self.connects[layer_idx] + normal_cell_connect = connect_dict['normal'] + reduce_cell_connect = connect_dict['reduce'] + + for super_cell, nas_cell in zip(super_layer, nas_layer): + # copy preproc0 and preproc1 + self.param_copy_plus(super_cell.preproc0, nas_cell.preproc0) + self.param_copy_plus(super_cell.preproc1, nas_cell.preproc1) + + if super_cell.reduction: + cell_connect = reduce_cell_connect + else: + cell_connect = normal_cell_connect + + for i, (super_hidden, nas_hidden) in enumerate(zip(super_cell.dag, nas_cell.dag)): + hidden_connect = cell_connect[i] + # k = 2 + for j in range(len(hidden_connect)): + connect = hidden_connect[j] + super_edge = super_hidden[connect[0]] + super_op = super_edge._ops[connect[1]] + nas_edge = nas_hidden[j] + if isinstance(nas_edge, ops.Identity): + break + nas_op = nas_edge[0] + # copy params + self.param_copy_plus(super_op, nas_op) + # self.param_copy(super_op, nas_op) + + ######################## -------------------------- ######################## + ######################## Functions for layer search ######################## + ######################## -------------------------- ######################## + + def add_super_layer(self, C_cur, C_p, C_pp, reduction_p=False, reduction_cur=False, cell_num=3, is_slim=False): + cells = nn.ModuleList() + # reduction_idx = (cell_num + 1) // 2 - 1 + # the first cell(block) is downsample + # reduction_idx = 0 + if self.res_stem: + reduction_idx = 0 + else: + reduction_idx = cell_num - 1 + + for i in range(cell_num): + if i == reduction_idx and reduction_cur: + C_cur *= 2 + reduction = True + else: + reduction = False + cell = SearchCell(self.n_nodes, C_pp, C_p, C_cur, reduction_p, reduction, is_slim) + reduction_p = reduction + cells.append(cell) + C_cur_out = C_cur * self.n_nodes + C_pp, C_p = C_p, C_cur_out + + return cells + + def add_architecture_params(self, n_ops): + arch_params = nn.ModuleList() + + alpha_normal = nn.ParameterList() + alpha_reduce = nn.ParameterList() + beta_normal = nn.ParameterList() + beta_reduce = nn.ParameterList() + + for i in range(self.n_nodes): + alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, n_ops))) + alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, n_ops))) + if self.use_beta: + beta_normal.append(nn.Parameter(1e-3*torch.randn(i+2))) + beta_reduce.append(nn.Parameter(1e-3*torch.randn(i+2))) + else: + beta_normal.append(nn.Parameter(1e-1*torch.ones(i+2), requires_grad=False)) + beta_reduce.append(nn.Parameter(1e-1*torch.ones(i+2), requires_grad=False)) + + arch_params.append(alpha_normal) + arch_params.append(alpha_reduce) + arch_params.append(beta_normal) + arch_params.append(beta_reduce) + + return arch_params + + def pretrain_architecture_params(self, n_ops): + arch_params = nn.ModuleList() + + alpha_normal = nn.ParameterList() + alpha_reduce = nn.ParameterList() + beta_normal = nn.ParameterList() + beta_reduce = nn.ParameterList() + + for i in range(self.n_nodes): + alpha_normal.append(nn.Parameter(1e-3*torch.ones(i+2, n_ops), requires_grad=False)) + alpha_reduce.append(nn.Parameter(1e-3*torch.ones(i+2, n_ops), requires_grad=False)) + beta_normal.append(nn.Parameter(1e-1*torch.ones(i+2), requires_grad=False)) + beta_reduce.append(nn.Parameter(1e-1*torch.ones(i+2), requires_grad=False)) + + arch_params.append(alpha_normal) + arch_params.append(alpha_reduce) + arch_params.append(beta_normal) + arch_params.append(beta_reduce) + + return arch_params + + ######################## ---------------------------- ######################## + ######################## Functions for layer generate ######################## + ######################## ---------------------------- ######################## + + def generate_nas_layer(self, C_cur, C_p, C_pp, reduction_p, reduction_cur, genotype, cell_num=3, bn_affine=True): + cells = nn.ModuleList() + # reduction_idx = (cell_num + 1) // 2 - 1 + # the first cell(block) is downsample + # reduction_idx = 0 + if self.res_stem: + reduction_idx = 0 + else: + reduction_idx = cell_num - 1 + + for i in range(cell_num): + if i == reduction_idx and reduction_cur: + C_cur *= 2 + reduction = True + else: + reduction = False + + cell = AugmentCell(genotype, C_pp, C_p, C_cur, reduction_p, reduction, bn_affine) + reduction_p = reduction + cells.append(cell) + C_cur_out = C_cur * len(cell.concat) + C_pp, C_p = C_p, C_cur_out + + return cells + + ######################## ---------------------------- ######################## + ######################## Functions for stem ######################## + ######################## ---------------------------- ######################## + def resnet_stem(self, inplanes=64): + C_in = self.c_in + feature_extractor = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(C_in, inplanes, kernel_size=7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(inplanes), + nn.ReLU(inplace=True), + # the layer1 is concated with maxpool + nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + ) + feature_extractor.append(stem) + return feature_extractor + + def cifar_stem(self, init_channel): + C_in = self.c_in + C_cur = init_channel + feature_extractor = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(C_cur) + ) + feature_extractor.append(stem) + return feature_extractor + + def imagenet_stem(self, init_channel): + C_in = self.c_in + C_cur = init_channel + feature_extractor = nn.ModuleList() + stem0 = nn.Sequential( + nn.Conv2d(C_in, C_cur // 2, kernel_size=3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur // 2), + nn.ReLU(inplace=True), + nn.Conv2d(C_cur // 2, C_cur, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur), + ) + + stem1 = nn.Sequential( + nn.ReLU(inplace=True), + nn.Conv2d(C_cur, C_cur, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur), + ) + feature_extractor.append(stem0) + feature_extractor.append(stem1) + return feature_extractor + + ######################## ---------------------------- ######################## + ######################## Functions for forward ######################## + ######################## ---------------------------- ######################## + + def extract_features(self, im): + # feature_extractor is nn.ModuleList() + if len(self.feature_extractor) == 1: + s0 = self.feature_extractor[0](im) + s1 = s0 + return [s0, s1] + elif len(self.feature_extractor) == 2: + s0 = self.feature_extractor[0](im) + s1 = self.feature_extractor[1](s0) + return [s0, s1] + else: + raise NotImplementedError + + def init_arch_params(self, layer_idx): + init_arch_params = self.add_architecture_params(n_ops=len(ops.PRIMITIVES)) + for i in range(layer_idx, len(self.super_layers_arch)): + target_arch = self.super_layers_arch[i] + self.param_copy(target_arch, init_arch_params) + + for i in range(layer_idx, len(self.super_layers_pool_arch)): + target_arch = self.super_layers_pool_arch[i] + self.param_copy(target_arch, init_arch_params) + + del init_arch_params + + def freeze_arch_params(self, layer_idx=0): + for i in range(self.super_layers_num): + if i != layer_idx: + for name, param in self.super_layers_arch[i].named_parameters(): + param.requires_grad=False + else: + for name, param in self.super_layers_arch[i].named_parameters(): + param.requires_grad=True + + def print_arch_params(self, logger, layer_idx=0): + # remove formats + if self.repeat_cell: + alpha_normal, alpha_reduce, beta_normal, beta_reduce = self.super_layers_arch[0] + else: + alpha_normal, alpha_reduce, beta_normal, beta_reduce = self.super_layers_arch[layer_idx] + org_formatters = [] + for handler in logger.handlers: + org_formatters.append(handler.formatter) + handler.setFormatter(logging.Formatter("%(message)s")) + + logger.info("####### ALPHA #######") + logger.info("# Alpha - normal") + for alpha in alpha_normal: + logger.info(F.softmax(alpha, dim=-1)) + + logger.info("\n# Alpha - reduce") + for alpha in alpha_reduce: + logger.info(F.softmax(alpha, dim=-1)) + logger.info("#####################") + + if self.use_beta: + logger.info("####### BETA #######") + logger.info("# Beta - normal") + for beta in beta_normal: + logger.info(F.softmax(beta, dim=-1)) + + logger.info("\n# Beta - reduce") + for beta in beta_reduce: + logger.info(F.softmax(beta, dim=-1)) + logger.info("#####################") + + def generate_genotype(self, layer_idx=0): + # arch_params list + if self.repeat_cell: + alpha_normal, alpha_reduce, beta_normal, beta_reduce = self.super_layers_arch[0] + else: + alpha_normal, alpha_reduce, beta_normal, beta_reduce = self.super_layers_arch[layer_idx] + + weights_normal = [F.softmax(alpha, dim=-1) for alpha in alpha_normal] + weights_reduce = [F.softmax(alpha, dim=-1) for alpha in alpha_reduce] + weights_edge_normal = [F.softmax(beta, dim=0) for beta in beta_normal] + weights_edge_reduce = [F.softmax(beta, dim=0) for beta in beta_reduce] + + gene_normal, connect_normal = gt.parse(weights_normal, weights_edge_normal, k=2) + gene_reduce, connect_reduce = gt.parse(weights_reduce, weights_edge_reduce, k=2) + connect_dict = {"normal": connect_normal, "reduce": connect_reduce} + concat = range(2, 2+self.n_nodes) # concat all intermediate nodes + + return gt.Genotype(normal=gene_normal, normal_concat=concat, reduce=gene_reduce, reduce_concat=concat), connect_dict + + def generate_genotype_gumbel(self, layer_idx=0): + # arch_params list + if self.repeat_cell: + alpha_normal, alpha_reduce, beta_normal, beta_reduce = self.super_layers_arch[0] + else: + alpha_normal, alpha_reduce, beta_normal, beta_reduce = self.super_layers_arch[layer_idx] + + weights_normal = [F.softmax(alpha, dim=-1) for alpha in alpha_normal] + weights_reduce = [F.softmax(alpha, dim=-1) for alpha in alpha_reduce] + weights_edge_normal = [F.softmax(beta, dim=0) for beta in beta_normal] + weights_edge_reduce = [F.softmax(beta, dim=0) for beta in beta_reduce] + + gene_normal, connect_normal = gt.parse_gumbel(weights_normal, weights_edge_normal, k=2) + gene_reduce, connect_reduce = gt.parse_gumbel(weights_reduce, weights_edge_reduce, k=2) + connect_dict = {"normal": connect_normal, "reduce": connect_reduce} + concat = range(2, 2+self.n_nodes) # concat all intermediate nodes + + return gt.Genotype(normal=gene_normal, normal_concat=concat, reduce=gene_reduce, reduce_concat=concat), connect_dict + + def get_aux_logits(self, idx, s1): + if idx == self.layer_num-3: + return self.distill_aux_head1(s1) + if idx == self.layer_num-2: + return self.distill_aux_head2(s1) + return None + + def forward(self, x, layer_idx, super_flag=True, pretrain_flag=False, is_slim=False): + # layer_idx, which stage we are + # if super_flag, forward supernetwork else forward nas network + # if pretrain_flag, foward supernetwork pool + if pretrain_flag: + super_layers_num = len(self.super_layers) + nas_layers_num = 0 + super_layers = self.super_layers_pool + super_layers_arch = self.super_layers_pool_arch + else: + if super_flag: + super_layers = self.super_layers + super_layers_arch = self.super_layers_arch + nas_layers = self.nas_layers + nas_layers_num = len(self.nas_layers[:layer_idx]) + super_layers_num = len(self.super_layers[layer_idx:]) + else: + nas_layers = self.nas_layers + nas_layers_num = len(self.nas_layers) + super_layers_num = 0 + + outputs = [] + s0, s1 = self.extract_features(x) + + for i in range(nas_layers_num): + s0, s1 = self.forward_nas_layer(s0, s1, nas_layers[i]) + logit = self.get_aux_logits(i, s1) + if logit is not None: + outputs.append(logit) + + aux_logits = None + for j in range(super_layers_num): + k = nas_layers_num + j + if self.repeat_cell or pretrain_flag: + s0, s1 = self.forward_super_layer(s0, s1, super_layers[k], super_layers_arch[0], is_slim) + if k == self.layer_num-2: + aux_logits = self.distill_aux_head2(s1) + else: + s0, s1 = self.forward_super_layer(s0, s1, super_layers[k], super_layers_arch[k], is_slim) + + if not pretrain_flag: + logit = self.get_aux_logits(k, s1) + if logit is not None: + outputs.append(logit) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + if super_flag: + logits = self.fc_super(out) + else: + logits = self.fc_nas(out) + + if pretrain_flag: + return logits, aux_logits + + outputs.append(logits) + logits_output = logits + + ensemble_param = F.softmax(self.ensemble_param, dim=0) + if self.ensemble_sum: + em_output = ensemble_param[0] * outputs[0] + ensemble_param[1] * outputs[1] + ensemble_param[2] * outputs[2] + else: + em_output = torch.cat((ensemble_param[0] * outputs[0], ensemble_param[1] * outputs[1], ensemble_param[2] * outputs[2]), 0) + + return logits_output, em_output + # return em_output, em_output + + def process_alpha(self, alpha_param, beta_param): + weights_normal = [F.softmax(alpha, dim=-1) for alpha in alpha_param] + weights_edge_normal = [F.softmax(beta, dim=0) for beta in beta_param] + output_alpha = nn.ParameterList() + for alpha in weights_normal: + output_alpha.append(nn.Parameter(torch.zeros_like(alpha), requires_grad=False)) + + connect_idx = [] + k = 2 + for idx, (edges, w) in enumerate(zip(weights_normal, weights_edge_normal)): + # edges: Tensor(n_edges, n_ops) + edge_max, primitive_indices = torch.topk((w.view(-1, 1) * edges)[:, :-1], 1) # ignore 'none' + topk_edge_values, topk_edge_indices = torch.topk(edge_max.view(-1), k) + node_idx = [] + for edge_idx in topk_edge_indices: + prim_idx = primitive_indices[edge_idx] + node_idx.append((edge_idx.item(), prim_idx.item())) + output_alpha[idx][edge_idx.item(), prim_idx.item()] = 1. + + connect_idx.append(node_idx) + + return output_alpha + + def forward_super_layer(self, s0, s1, super_layer, arch_params, is_slim=False): + # arch_params: list + # super_layer: cells (2 / 3) + + alpha_normal, alpha_reduce, beta_normal, beta_reduce = arch_params + if is_slim: + weights_normal = self.process_alpha(alpha_normal, beta_normal) + weights_edge_normal = [F.softmax(beta, dim=0) for beta in beta_normal] + weights_reduce = self.process_alpha(alpha_reduce, beta_reduce) + weights_edge_reduce = [F.softmax(beta, dim=0) for beta in beta_reduce] + else: + weights_normal = [F.softmax(alpha, dim=-1) for alpha in alpha_normal] + weights_edge_normal = [F.softmax(beta, dim=0) for beta in beta_normal] + weights_reduce = [F.softmax(alpha, dim=-1) for alpha in alpha_reduce] + weights_edge_reduce = [F.softmax(beta, dim=0) for beta in beta_reduce] + + for cell in super_layer: + weights = weights_reduce if cell.reduction else weights_normal + weights_edge = weights_edge_reduce if cell.reduction else weights_edge_normal + s0, s1 = s1, cell(s0, s1, weights, weights_edge) + + return s0, s1 + + def forward_nas_layer(self, s0, s1, nas_layer): + + for cell in nas_layer: + s0, s1 = s1, cell(s0, s1) + + return s0, s1 + + def loss(self, X, y): + logits = self.forward(X) + return self.criterion(logits, y) + + def add_alpha_regularization(self, operations, weight_decay=0.0005, method='L2', normal=True, reduce=True): + if method == 'L2': + reg_loss = torch.tensor(0.).to(torch.device("cuda")) + for operation in operations: + if self.repeat_cell: + stage, operation = operation + stage = 0 + else: + stage, operation = operation + if normal: + for node in self.super_layers_arch[stage][0]: + for connection in node: + reg_loss += connection[ops.PRIMITIVES.index(operation)] * \ + connection[ops.PRIMITIVES.index(operation)] + if reduce: + for node in self.super_layers_arch[stage][1]: + for connection in node: + reg_loss += connection[ops.PRIMITIVES.index(operation)] * \ + connection[ops.PRIMITIVES.index(operation)] + return reg_loss * weight_decay + elif method == 'L1': + reg_loss = torch.tensor(0.).cuda() + for operation in operations: + if self.repeat_cell: + stage, operation = operation + stage = 0 + else: + stage, operation = operation + + if normal: + for node in self.super_layers_arch[stage][0]: + for connection in node: + reg_loss += abs(connection[ops.PRIMITIVES.index(operation)]) + if reduce: + for node in self.super_layers_arch[stage][1]: + for connection in node: + reg_loss += abs(connection[ops.PRIMITIVES.index(operation)]) + return reg_loss * weight_decay + else: + raise ValueError('Method isn\'t supported') \ No newline at end of file diff --git a/lib/models/loss.py b/lib/models/loss.py new file mode 100644 index 0000000..5dfbc91 --- /dev/null +++ b/lib/models/loss.py @@ -0,0 +1,36 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +cos = nn.CosineSimilarity(dim=1, eps=1e-6) +mse = nn.MSELoss() +smooth_l1 = nn.SmoothL1Loss() + +class CrossEntropyLabelSmooth(nn.Module): + + def __init__(self, num_classes, epsilon): + super(CrossEntropyLabelSmooth, self).__init__() + self.num_classes = num_classes + self.epsilon = epsilon + self.logsoftmax = nn.LogSoftmax(dim=1) + + def forward(self, inputs, targets): + log_probs = self.logsoftmax(inputs) + targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1) + targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes + loss = (-targets * log_probs).mean(0).sum() + return loss + +def Loss_interactive(outputs, teacher_outputs, T=2, interactive_type=0): + if interactive_type==0: + loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1), F.softmax(teacher_outputs/T, dim=1)) + elif interactive_type==1: + # Cosine distance + loss = -torch.mean(cos(outputs, teacher_outputs)) + elif interactive_type==2: + loss = mse(outputs, teacher_outputs) + elif interactive_type == 3: + loss = smooth_l1(outputs, teacher_outputs) + else: + raise Exception("Wrong interactive type!") + return loss * (T * T) diff --git a/lib/models/model_augment.py b/lib/models/model_augment.py new file mode 100644 index 0000000..47160d2 --- /dev/null +++ b/lib/models/model_augment.py @@ -0,0 +1,57 @@ +import torch.nn as nn +from lib.models import ops + + +class ModelAug(nn.Module): + + def __init__(self, feature_extractor, nas_layers, fc_layer, n_nodes=4, aux_head=None): + """ + args: + + """ + super(ModelAug, self).__init__() + self.feature_extractor = feature_extractor + + self.nas_layers = nas_layers + self.nas_layers_num = len(nas_layers) + self.fc = fc_layer + self.aux_head = aux_head + self.gap = nn.AdaptiveAvgPool2d(1) + + def forward(self, x): + logits_aux = None + if len(self.feature_extractor) == 1: + s0 = self.feature_extractor[0](x) + s1 = s0 + elif len(self.feature_extractor) == 2: + s0 = self.feature_extractor[0](x) + s1 = self.feature_extractor[1](s0) + else: + raise NotImplementedError + + + sp = s1 + for i in range(self.nas_layers_num): + s0, s1 = self.forward_nas_layer(s0, s1, self.nas_layers[i]) + # if i == (self.nas_layers_num * 2 // 3 - 1): + if i == (self.nas_layers_num - 2): + if self.training: + logits_aux = self.aux_head(s1) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.fc(out) + return logits, logits_aux + + def forward_nas_layer(self, s0, s1, nas_layer): + + for cell in nas_layer: + s0, s1 = s1, cell(s0, s1) + + return s0, s1 + + def drop_path_prob(self, p): + """ Set drop path probability """ + for module in self.modules(): + if isinstance(module, ops.DropPath_): + module.p = p diff --git a/lib/models/model_test.py b/lib/models/model_test.py new file mode 100644 index 0000000..ad4ef24 --- /dev/null +++ b/lib/models/model_test.py @@ -0,0 +1,167 @@ +import torch.nn as nn +from lib.models.augment_cells import AugmentCell + +class ModelTest(nn.Module): + + def __init__(self, genotypes_dict, model_type, res_stem=False, init_channel=96, stem_multiplier=3, n_nodes=4, num_classes=1000): + """ + args: + + """ + super(ModelTest, self).__init__() + self.c_in = 3 + self.init_channel = init_channel + self.stem_multiplier = stem_multiplier + self.num_classes = num_classes + self.n_nodes = n_nodes + self.model_type = model_type + self.res_stem = res_stem + + if self.model_type == 'cifar': + reduction_p = False + self.layers_reduction = [True, True, False] + self.augment_layers = [7, 7, 6] + self.nas_layers = nn.ModuleList([None, None, None]) + self.feature_extractor = self.cifar_stem(self.init_channel * self.stem_multiplier) + + elif self.model_type == 'imagenet': + if self.res_stem: + reduction_p = False + self.nas_layers = nn.ModuleList([None, None, None, None]) + self.layers_reduction = [False, True, True, True] + self.augment_layers = [3, 4, 3, 4] + self.feature_extractor = self.resnet_stem(self.init_channel * self.stem_multiplier) + else: + reduction_p = True + self.nas_layers = nn.ModuleList([None, None, None]) + self.layers_reduction = [True, True, False] + self.augment_layers = [5, 5, 4] + self.feature_extractor = self.imagenet_stem(self.init_channel * self.stem_multiplier) + else: + raise Exception("Wrong model type!") + + self.nas_layers_num = len(self.nas_layers) + c_p = self.init_channel * self.stem_multiplier + c_pp = self.init_channel * self.stem_multiplier + c_cur = self.init_channel + + for layer_idx, genotype in genotypes_dict.items(): + reduction = self.layers_reduction[layer_idx] + nas_layer = self.generate_nas_layer(c_cur, c_p, c_pp, reduction_p, reduction, genotype, self.augment_layers[layer_idx]) + self.nas_layers[layer_idx] = nas_layer + + if reduction: + c_p = c_cur * 2 * self.n_nodes + else: + c_p = c_cur * self.n_nodes + + if self.res_stem: + c_pp = c_p + reduction_p = False + else: + c_pp = c_cur * self.n_nodes + reduction_p = reduction + + if reduction: + c_cur = c_cur * 2 + else: + c_cur = c_cur + + self.fc = nn.Linear(c_p, self.num_classes) + self.gap = nn.AdaptiveAvgPool2d(1) + + def generate_nas_layer(self, C_cur, C_p, C_pp, reduction_p, reduction_cur, genotype, cell_num=3, bn_affine=True): + cells = nn.ModuleList() + if self.res_stem: + reduction_idx = 0 + else: + reduction_idx = cell_num - 1 + + for i in range(cell_num): + if i == reduction_idx and reduction_cur: + C_cur *= 2 + reduction = True + else: + reduction = False + + cell = AugmentCell(genotype, C_pp, C_p, C_cur, reduction_p, reduction, bn_affine) + reduction_p = reduction + cells.append(cell) + C_cur_out = C_cur * len(cell.concat) + C_pp, C_p = C_p, C_cur_out + + return cells + + def forward(self, x): + s0, s1 = self.extract_features(x) + for i in range(self.nas_layers_num): + s0, s1 = self.forward_nas_layer(s0, s1, self.nas_layers[i]) + + out = self.gap(s1) + out = out.view(out.size(0), -1) # flatten + logits = self.fc(out) + return logits, logits + + def forward_nas_layer(self, s0, s1, nas_layer): + for cell in nas_layer: + s0, s1 = s1, cell(s0, s1) + return s0, s1 + + def extract_features(self, im): + # feature_extractor is nn.ModuleList() + if len(self.feature_extractor) == 1: + s0 = self.feature_extractor[0](im) + s1 = s0 + return [s0, s1] + elif len(self.feature_extractor) == 2: + s0 = self.feature_extractor[0](im) + s1 = self.feature_extractor[1](s0) + return [s0, s1] + else: + raise NotImplementedError + + def resnet_stem(self, inplanes=64): + C_in = self.c_in + feature_extractor = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(C_in, inplanes, kernel_size=7, stride=2, padding=3, bias=False), + nn.BatchNorm2d(inplanes), + nn.ReLU(inplace=True), + # the layer1 is concated with maxpool + nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + ) + feature_extractor.append(stem) + return feature_extractor + + def cifar_stem(self, init_channel): + C_in = self.c_in + C_cur = init_channel + feature_extractor = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False), + nn.BatchNorm2d(C_cur) + ) + feature_extractor.append(stem) + return feature_extractor + + def imagenet_stem(self, init_channel): + C_in = self.c_in + C_cur = init_channel + feature_extractor = nn.ModuleList() + stem0 = nn.Sequential( + nn.Conv2d(C_in, C_cur // 2, kernel_size=3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur // 2), + nn.ReLU(inplace=True), + nn.Conv2d(C_cur // 2, C_cur, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur), + ) + + stem1 = nn.Sequential( + nn.ReLU(inplace=True), + nn.Conv2d(C_cur, C_cur, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C_cur), + ) + feature_extractor.append(stem0) + feature_extractor.append(stem1) + return feature_extractor + diff --git a/lib/models/ops.py b/lib/models/ops.py new file mode 100644 index 0000000..2edfac8 --- /dev/null +++ b/lib/models/ops.py @@ -0,0 +1,272 @@ +""" Operations """ +import torch +import torch.nn as nn +# from models.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d +# BatchNorm2d = SynchronizedBatchNorm2d +# from apex.parallel import SyncBatchNorm +# BatchNorm2d = SyncBatchNorm +BatchNorm2d = nn.BatchNorm2d + +OPS = { + 'none': lambda C, stride, affine: Zero(stride), + 'avg_pool_3x3': lambda C, stride, affine: Pool('avg', C, 3, stride, 1, affine=affine), + 'max_pool_3x3': lambda C, stride, affine: Pool('max', C, 3, stride, 1, affine=affine), + 'skip_connect': lambda C, stride, affine: \ + Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine), + 'sep_conv_3x3': lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine), + 'sep_conv_5x5': lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine), + 'sep_conv_7x7': lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine), + 'dil_conv_3x3': lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine), # 5x5 + 'dil_conv_5x5': lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine), # 9x9 + 'conv_7x1_1x7': lambda C, stride, affine: FacConv(C, C, 7, stride, 3, affine=affine) +} + +PRIMITIVES = [ + 'max_pool_3x3', + 'avg_pool_3x3', + 'skip_connect', # identity + 'sep_conv_3x3', + 'sep_conv_5x5', + 'dil_conv_3x3', + 'dil_conv_5x5', + 'none' +] + +def channel_shuffle(x, groups): + batchsize, num_channels, height, width = x.data.size() + + channels_per_group = num_channels // groups + + # reshape + x = x.view(batchsize, groups, + channels_per_group, height, width) + + x = torch.transpose(x, 1, 2).contiguous() + + # flatten + x = x.view(batchsize, -1, height, width) + + return x + +def drop_path_(x, drop_prob, training): + if training and drop_prob > 0.: + keep_prob = 1. - drop_prob + # per data point mask; assuming x in cuda. + mask = torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) + x.div_(keep_prob).mul_(mask) + + return x + + +class DropPath_(nn.Module): + def __init__(self, p=0.): + """ [!] DropPath is inplace module + Args: + p: probability of an path to be zeroed. + """ + super().__init__() + self.p = p + + def extra_repr(self): + return 'p={}, inplace'.format(self.p) + + def forward(self, x): + drop_path_(x, self.p, self.training) + + return x + + +class PoolBN(nn.Module): + """ + AvgPool or MaxPool - BN + """ + def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): + """ + Args: + pool_type: 'max' or 'avg' + """ + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + self.bn = BatchNorm2d(C, affine=affine) + + def forward(self, x): + out = self.pool(x) + out = self.bn(out) + return out + +class Pool(nn.Module): + """ + AvgPool or MaxPool + """ + def __init__(self, pool_type, C, kernel_size, stride, padding, affine=True): + """ + Args: + pool_type: 'max' or 'avg' + """ + super().__init__() + if pool_type.lower() == 'max': + self.pool = nn.MaxPool2d(kernel_size, stride, padding) + elif pool_type.lower() == 'avg': + self.pool = nn.AvgPool2d(kernel_size, stride, padding, count_include_pad=False) + else: + raise ValueError() + + def forward(self, x): + out = self.pool(x) + return out + +class StdConv(nn.Module): + """ Standard conv + ReLU - Conv - BN + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_out, kernel_size, stride, padding, bias=False), + BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class FacConv(nn.Module): + """ Factorized conv + ReLU - Conv(Kx1) - Conv(1xK) - BN + """ + def __init__(self, C_in, C_out, kernel_length, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, (kernel_length, 1), stride, padding, bias=False), + nn.Conv2d(C_in, C_out, (1, kernel_length), stride, padding, bias=False), + BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class DilConv(nn.Module): + """ (Dilated) depthwise separable conv + ReLU - (Dilated) depthwise separable - Pointwise - BN + + If dilation == 2, 3x3 conv => 5x5 receptive field + 5x5 conv => 9x9 receptive field + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): + super().__init__() + self.net = nn.Sequential( + nn.ReLU(), + nn.Conv2d(C_in, C_in, kernel_size, stride, padding, dilation=dilation, groups=C_in, + bias=False), + nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False), + BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class SepConv(nn.Module): + """ Depthwise separable conv + DilConv(dilation=1) * 2 + """ + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super().__init__() + self.net = nn.Sequential( + DilConv(C_in, C_in, kernel_size, stride, padding, dilation=1, affine=affine), + DilConv(C_in, C_out, kernel_size, 1, padding, dilation=1, affine=affine) + ) + + def forward(self, x): + return self.net(x) + + +class Identity(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x + + +class Zero(nn.Module): + def __init__(self, stride): + super().__init__() + self.stride = stride + + def forward(self, x): + if self.stride == 1: + return x * 0. + + # re-sizing by stride + return x[:, :, ::self.stride, ::self.stride] * 0. + + +class FactorizedReduce(nn.Module): + """ + Reduce feature map size by factorized pointwise(stride=2). + """ + def __init__(self, C_in, C_out, affine=True): + super().__init__() + self.relu = nn.ReLU() + self.conv1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + x = self.relu(x) + out = torch.cat([self.conv1(x), self.conv2(x[:, :, 1:, 1:])], dim=1) + out = self.bn(out) + # print ("conv1 param: {}".format(self.conv1.weight)) + # print ("bn mean: {}".format(self.bn.running_mean)) + # print ("bn var: {}".format(self.bn.running_var)) + return out + + +def channel_shuffle(x, groups): + batchsize, num_channels, height, width = x.data.size() + + channels_per_group = num_channels // groups + + # reshape + x = x.view(batchsize, groups, + channels_per_group, height, width) + + x = torch.transpose(x, 1, 2).contiguous() + + # flatten + x = x.view(batchsize, -1, height, width) + + return x + + +class MixedOp(nn.Module): + """ Mixed operation """ + def __init__(self, C, stride, is_slim=False, k=4): + super().__init__() + self._ops = nn.ModuleList() + self.is_slim = is_slim + + for primitive in PRIMITIVES: + op = OPS[primitive](C, stride, False) + self._ops.append(op) + + + def forward(self, x, weights): + if self.is_slim: + if torch.sum(weights) == 0: + return 0. + else: + index = torch.argmax(weights).item() + return self._ops[index](x) + else: + return sum(w * op(x) for w, op in zip(weights, self._ops)) diff --git a/lib/models/search_cells.py b/lib/models/search_cells.py new file mode 100644 index 0000000..d627ded --- /dev/null +++ b/lib/models/search_cells.py @@ -0,0 +1,53 @@ +""" CNN cell for architecture search """ +import torch +import torch.nn as nn +from lib.models import ops + + +class SearchCell(nn.Module): + """ Cell for search + Each edge is mixed and continuous relaxed. + """ + def __init__(self, n_nodes, C_pp, C_p, C, reduction_p, reduction, is_slim=False): + """ + Args: + n_nodes: # of intermediate n_nodes + C_pp: C_out[k-2] + C_p : C_out[k-1] + C : C_in[k] (current) + reduction_p: flag for whether the previous cell is reduction cell or not + reduction: flag for whether the current cell is reduction cell or not + """ + super().__init__() + self.reduction = reduction + self.n_nodes = n_nodes + + # If previous cell is reduction cell, current input size does not match with + # output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing. + if reduction_p: + self.preproc0 = ops.FactorizedReduce(C_pp, C, affine=False) + else: + self.preproc0 = ops.StdConv(C_pp, C, 1, 1, 0, affine=False) + self.preproc1 = ops.StdConv(C_p, C, 1, 1, 0, affine=False) + + # generate dag + self.dag = nn.ModuleList() + for i in range(self.n_nodes): + self.dag.append(nn.ModuleList()) + for j in range(2+i): # include 2 input nodes + # reduction should be used only for input node + stride = 2 if reduction and j < 2 else 1 + op = ops.MixedOp(C, stride, is_slim) + self.dag[i].append(op) + + def forward(self, s0, s1, w_dag, w_edge): + s0 = self.preproc0(s0) + s1 = self.preproc1(s1) + + states = [s0, s1] + for edges, w_list, w_edge_list in zip(self.dag, w_dag, w_edge): + s_cur = sum(w_edge_list[i] * edges[i](s, w) for i, (s, w) in enumerate(zip(states, w_list))) + states.append(s_cur) + + s_out = torch.cat(states[2:], dim=1) + return s_out diff --git a/lib/utils/count_flops.py b/lib/utils/count_flops.py new file mode 100644 index 0000000..d4dd71a --- /dev/null +++ b/lib/utils/count_flops.py @@ -0,0 +1,46 @@ +""" Search cell """ +import json +import lib.utils.genotypes as gt + +from torchscope import scope +from lib.models.model_test import ModelTest + +# config +stem_multiplier = 1 +n_classes = 1000 +init_channels = 48 +model_type = 'imagenet' +cell_file = './genotypes.json' + + +#stem_multiplier = 3 +#n_classes = 10 +#init_channels = 36 +#model_type = 'cifar' +#cell_file = './genotypes.json' + +def main(): + file = open(cell_file, 'r') + js = file.read() + r_dict = json.loads(js) + + file.close() + genotypes_dict = {} + for layer_idx, genotype in r_dict.items(): + genotypes_dict[int(layer_idx)] = gt.from_str(genotype) + + model_main = ModelTest(genotypes_dict, model_type, res_stem=False, init_channel=init_channels, \ + stem_multiplier=stem_multiplier, n_nodes=4, num_classes=n_classes) + + if 'cifar' in model_type: + input_x = (3, 32, 32) + elif 'imagenet' in model_type: + input_x = (3, 224, 224) + else: + raise Exception("Not support dataset!") + + scope(model_main, input_size=input_x) + + +if __name__ == "__main__": + main() diff --git a/lib/utils/genotypes.py b/lib/utils/genotypes.py new file mode 100644 index 0000000..e527efb --- /dev/null +++ b/lib/utils/genotypes.py @@ -0,0 +1,159 @@ +""" Genotypes + - Genotype: normal/reduce gene + normal/reduce cell output connection (concat) + - gene: discrete ops information (w/o output connection) + - dag: real ops (can be mixed or discrete, but Genotype has only discrete information itself) +""" +from collections import namedtuple +import torch +import torch.nn as nn +import torch.nn.functional as F +from lib.models import ops +from lib.models.ops import PRIMITIVES + +Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') + +def to_dag(C_in, gene, reduction, bn_affine=True): + """ generate discrete ops from gene """ + dag = nn.ModuleList() + for edges in gene: + row = nn.ModuleList() + for op_name, s_idx in edges: + # reduction cell & from input nodes => stride = 2 + stride = 2 if reduction and s_idx < 2 else 1 + op = ops.OPS[op_name](C_in, stride, bn_affine) + if not isinstance(op, ops.Identity): # Identity does not use drop path + op = nn.Sequential( + op, + ops.DropPath_() + ) + op.s_idx = s_idx + row.append(op) + dag.append(row) + + return dag + + +def from_str(s): + """ generate genotype from string + e.g. "Genotype( + normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)], + [('sep_conv_3x3', 1), ('dil_conv_3x3', 2)], + [('sep_conv_3x3', 1), ('sep_conv_3x3', 2)], + [('sep_conv_3x3', 1), ('dil_conv_3x3', 4)]], + normal_concat=range(2, 6), + reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)], + [('max_pool_3x3', 0), ('skip_connect', 2)], + [('max_pool_3x3', 0), ('skip_connect', 2)], + [('max_pool_3x3', 0), ('skip_connect', 2)]], + reduce_concat=range(2, 6))" + """ + + genotype = eval(s) + + return genotype + + +def parse(alpha, beta, k): + """ + parse continuous alpha to discrete gene. + alpha is ParameterList: + ParameterList [ + Parameter(n_edges1, n_ops), + Parameter(n_edges2, n_ops), + ... + ] + + beta is ParameterList: + ParameterList [ + Parameter(n_edges1), + Parameter(n_edges2), + ... + ] + + gene is list: + [ + [('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)], + [('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)], + ... + ] + each node has two edges (k=2) in CNN. + """ + + gene = [] + assert PRIMITIVES[-1] == 'none' # assume last PRIMITIVE is 'none' + + # 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge + # 2) Choose top-k edges per node by edge score (top-1 weight in edge) + # output the connect idx[(node_idx, connect_idx, op_idx).... () ()] + connect_idx = [] + for edges, w in zip(alpha, beta): + # edges: Tensor(n_edges, n_ops) + edge_max, primitive_indices = torch.topk((w.view(-1, 1) * edges)[:, :-1], 1) # ignore 'none' + topk_edge_values, topk_edge_indices = torch.topk(edge_max.view(-1), k) + node_gene = [] + node_idx = [] + for edge_idx in topk_edge_indices: + prim_idx = primitive_indices[edge_idx] + prim = PRIMITIVES[prim_idx] + node_gene.append((prim, edge_idx.item())) + node_idx.append((edge_idx.item(), prim_idx.item())) + + gene.append(node_gene) + connect_idx.append(node_idx) + + return gene, connect_idx + +def parse_gumbel(alpha, beta, k): + """ + parse continuous alpha to discrete gene. + alpha is ParameterList: + ParameterList [ + Parameter(n_edges1, n_ops), + Parameter(n_edges2, n_ops), + ... + ] + + beta is ParameterList: + ParameterList [ + Parameter(n_edges1), + Parameter(n_edges2), + ... + ] + + gene is list: + [ + [('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)], + [('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)], + ... + ] + each node has two edges (k=2) in CNN. + """ + + gene = [] + assert PRIMITIVES[-1] == 'none' # assume last PRIMITIVE is 'none' + + # 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge + # 2) Choose top-k edges per node by edge score (top-1 weight in edge) + # output the connect idx[(node_idx, connect_idx, op_idx).... () ()] + connect_idx = [] + for edges, w in zip(alpha, beta): + # edges: Tensor(n_edges, n_ops) + discrete_a = F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True) + for i in range(k-1): + discrete_a = discrete_a + F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True) + discrete_a = discrete_a.reshape(-1, len(PRIMITIVES)-1) + reserved_edge = (discrete_a>0).nonzero() + + node_gene = [] + node_idx = [] + for i in range(reserved_edge.shape[0]): + edge_idx = reserved_edge[i][0].item() + prim_idx = reserved_edge[i][1].item() + prim = PRIMITIVES[prim_idx] + node_gene.append((prim, edge_idx)) + node_idx.append((edge_idx, prim_idx)) + + gene.append(node_gene) + connect_idx.append(node_idx) + + return gene, connect_idx \ No newline at end of file diff --git a/lib/utils/utils.py b/lib/utils/utils.py new file mode 100644 index 0000000..b7e7323 --- /dev/null +++ b/lib/utils/utils.py @@ -0,0 +1,117 @@ +""" Utilities """ +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +import logging +import shutil +import torch +import torch.distributed as dist +import numpy as np + + +class AverageMeter(): + """ Computes and stores the average and current value """ + def __init__(self): + self.reset() + + def reset(self): + """ Reset all statistics """ + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + """ Update statistics """ + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + +def get_logger(file_path): + """ Make python logger """ + logger = logging.getLogger('cdarts') + log_format = '%(asctime)s | %(message)s' + formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') + file_handler = logging.FileHandler(file_path) + file_handler.setFormatter(formatter) + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(formatter) + + logger.addHandler(file_handler) + logger.addHandler(stream_handler) + logger.setLevel(logging.INFO) + + return logger + + +def param_size(model): + """ Compute parameter size in MB """ + n_params = sum( + np.prod(v.size()) for k, v in model.named_parameters() if not k.startswith('aux_head')) + return n_params / 1e6 + +def print_speed(i, i_time, n, logger): + """print_speed(index, index_time, total_iteration)""" + average_time = i_time + remaining_time = (n - i) * average_time + remaining_day = math.floor(remaining_time / 86400) + remaining_hour = math.floor(remaining_time / 3600 - remaining_day * 24) + remaining_min = math.floor(remaining_time / 60 - remaining_day * 1440 - remaining_hour * 60) + logger.info('Progress: %d / %d [%d%%], Speed: %.3f s/iter, ETA %d:%02d:%02d (D:H:M)\n' % (i, n, i/n*100, average_time, remaining_day, remaining_hour, remaining_min)) + logger.info('\nPROGRESS: {:.2f}%\n'.format(100 * i / n)) + + +def accuracy(output, target, topk=(1,)): + """ Computes the precision@k for the specified values of k """ + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + # one-hot case + if target.ndimension() > 1: + target = target.max(1)[1] + + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(1.0 / batch_size)) + + return res + +def save_checkpoint(state, ckpt_dir, is_best=False): + filename = os.path.join(ckpt_dir, 'checkpoint.pth.tar') + torch.save(state, filename) + if is_best: + best_filename = os.path.join(ckpt_dir, 'best.pth.tar') + torch.save(state, best_filename) + # shutil.copyfile(filename, best_filename) + +def reduce_tensor(tensor, world_size): + rt = tensor.clone() + dist.all_reduce(rt, op=dist.ReduceOp.SUM) + rt /= world_size + return rt + +def drop_path_(x, drop_prob, training): + if training and drop_prob > 0.: + keep_prob = 1. - drop_prob + # per data point mask; assuming x in cuda. + mask = torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob) + x.div_(keep_prob).mul_(mask) + + return x + +def adjust_lr(optimizer, epoch, config): + # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large + if config.epochs - epoch > 5: + lr = config.lr * (config.epochs - 5 - epoch) / (config.epochs - 5) + else: + lr = config.lr * (config.epochs - epoch) / ((config.epochs - 5) * 5) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + return lr diff --git a/lib/utils/visualize.py b/lib/utils/visualize.py new file mode 100644 index 0000000..773ea18 --- /dev/null +++ b/lib/utils/visualize.py @@ -0,0 +1,74 @@ +""" Network architecture visualizer using graphviz """ +import sys +from graphviz import Digraph +import lib.utils.genotypes as gt + + +def plot(genotype, file_path, caption=None): + """ make DAG plot and save to file_path as .png """ + edge_attr = { + 'fontsize': '20', + 'fontname': 'times' + } + node_attr = { + 'style': 'filled', + 'shape': 'rect', + 'align': 'center', + 'fontsize': '20', + 'height': '0.5', + 'width': '0.5', + 'penwidth': '2', + 'fontname': 'times' + } + g = Digraph( + format='png', + edge_attr=edge_attr, + node_attr=node_attr, + engine='dot') + g.body.extend(['rankdir=LR']) + + # input nodes + g.node("c_{k-2}", fillcolor='darkseagreen2') + g.node("c_{k-1}", fillcolor='darkseagreen2') + + # intermediate nodes + n_nodes = len(genotype) + for i in range(n_nodes): + g.node(str(i), fillcolor='lightblue') + + for i, edges in enumerate(genotype): + for op, j in edges: + if j == 0: + u = "c_{k-2}" + elif j == 1: + u = "c_{k-1}" + else: + u = str(j-2) + + v = str(i) + g.edge(u, v, label=op, fillcolor="gray") + + # output node + g.node("c_{k}", fillcolor='palegoldenrod') + for i in range(n_nodes): + g.edge(str(i), "c_{k}", fillcolor="gray") + + # add image caption + if caption: + g.attr(label=caption, overlap='false', fontsize='20', fontname='times') + + g.render(file_path, view=False) + + +if __name__ == '__main__': + if len(sys.argv) != 2: + raise ValueError("usage:\n python {} GENOTYPE".format(sys.argv[0])) + + genotype_str = sys.argv[1] + try: + genotype = gt.from_str(genotype_str) + except AttributeError: + raise ValueError("Cannot parse {}".format(genotype_str)) + + plot(genotype.normal, "normal") + plot(genotype.reduce, "reduction") diff --git a/requirements b/requirements new file mode 100644 index 0000000..92c94a1 --- /dev/null +++ b/requirements @@ -0,0 +1,5 @@ +graphviz +torch==1.2 +torchvision==0.2 +tensorboard +tensorboardX \ No newline at end of file