From f9e5639a7557ff5ef1792303ed45480749f2a020 Mon Sep 17 00:00:00 2001 From: SanghyukChun Date: Fri, 26 Jun 2020 19:36:29 +0900 Subject: [PATCH] initial commit --- .gitignore | 122 +++ LICENSE | 19 + NOTICE | 167 ++++ README.md | 408 ++++++++++ clusters/cluster_label_1.pth | Bin 0 -> 17138 bytes clusters/cluster_label_2.pth | Bin 0 -> 17138 bytes clusters/cluster_label_3.pth | Bin 0 -> 17138 bytes criterions/__init__.py | 22 + criterions/comparison_methods.py | 97 +++ criterions/dist.py | 31 + criterions/hsic.py | 127 ++++ criterions/sigma_utils.py | 66 ++ datasets/__init__.py | 25 + datasets/colour_mnist.py | 190 +++++ datasets/imagenet.py | 166 ++++ datasets/kinetics.py | 24 + datasets/kinetics_tools/__init__.py | 3 + datasets/kinetics_tools/decoder.py | 236 ++++++ datasets/kinetics_tools/kinetics.py | 429 +++++++++++ datasets/kinetics_tools/loader.py | 79 ++ datasets/kinetics_tools/meters.py | 413 ++++++++++ datasets/kinetics_tools/transform.py | 121 +++ datasets/kinetics_tools/video_container.py | 16 + datasets/mimetics/download.py | 225 ++++++ datasets/mimetics/mimetics_v1.0.csv | 714 ++++++++++++++++++ datasets/mimetics/mimetics_v1.0_clsannot.txt | 51 ++ .../mimetics_v1.0_objectsceneannot.txt | 714 ++++++++++++++++++ evaluator.py | 291 +++++++ logger.py | 80 ++ main_action.py | 185 +++++ main_biased_mnist.py | 138 ++++ main_imagenet.py | 157 ++++ make_clusters.py | 154 ++++ models/__init__.py | 24 + models/action_models/ResNet3D.py | 255 +++++++ models/action_models/__init__.py | 3 + models/action_models/head_helper.py | 135 ++++ models/action_models/nonlocal_helper.py | 163 ++++ models/action_models/resnet_helper.py | 507 +++++++++++++ models/action_models/stem_helper.py | 171 +++++ models/action_models/weight_init_helper.py | 41 + models/imagenet_models.py | 385 ++++++++++ models/mnist_models.py | 60 ++ models/rebias_models.py | 65 ++ optims/__init__.py | 37 + requirements.txt | 5 + resources/Table1.png | Bin 0 -> 62858 bytes trainer.py | 480 ++++++++++++ 48 files changed, 7801 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 NOTICE create mode 100644 README.md create mode 100644 clusters/cluster_label_1.pth create mode 100644 clusters/cluster_label_2.pth create mode 100644 clusters/cluster_label_3.pth create mode 100644 criterions/__init__.py create mode 100644 criterions/comparison_methods.py create mode 100644 criterions/dist.py create mode 100644 criterions/hsic.py create mode 100644 criterions/sigma_utils.py create mode 100644 datasets/__init__.py create mode 100644 datasets/colour_mnist.py create mode 100644 datasets/imagenet.py create mode 100644 datasets/kinetics.py create mode 100644 datasets/kinetics_tools/__init__.py create mode 100644 datasets/kinetics_tools/decoder.py create mode 100644 datasets/kinetics_tools/kinetics.py create mode 100644 datasets/kinetics_tools/loader.py create mode 100644 datasets/kinetics_tools/meters.py create mode 100644 datasets/kinetics_tools/transform.py create mode 100644 datasets/kinetics_tools/video_container.py create mode 100644 datasets/mimetics/download.py create mode 100644 datasets/mimetics/mimetics_v1.0.csv create mode 100644 datasets/mimetics/mimetics_v1.0_clsannot.txt create mode 100644 datasets/mimetics/mimetics_v1.0_objectsceneannot.txt create mode 100644 evaluator.py create mode 100644 logger.py create mode 100644 main_action.py create mode 100644 main_biased_mnist.py create mode 100644 main_imagenet.py create mode 100644 make_clusters.py create mode 100644 models/__init__.py create mode 100644 models/action_models/ResNet3D.py create mode 100644 models/action_models/__init__.py create mode 100644 models/action_models/head_helper.py create mode 100644 models/action_models/nonlocal_helper.py create mode 100644 models/action_models/resnet_helper.py create mode 100644 models/action_models/stem_helper.py create mode 100644 models/action_models/weight_init_helper.py create mode 100644 models/imagenet_models.py create mode 100644 models/mnist_models.py create mode 100644 models/rebias_models.py create mode 100644 optims/__init__.py create mode 100644 requirements.txt create mode 100644 resources/Table1.png create mode 100644 trainer.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0d480f2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,122 @@ +*.swp +original/ +checkpoints*/ +scripts/ +*.sh +*_old.* +*_temp.* + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +rebis_baselines + +checkpoints/ + +nsml_run_gsheet.py +main_toy_3d_m_2d.py + +tmp +root diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..dfe5c28 --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2020-present NAVER Corp. + + Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..4f0f18b --- /dev/null +++ b/NOTICE @@ -0,0 +1,167 @@ +ReBias +Copyright (c) 2020-present NAVER Corp. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------------- + +This project contains subcomponents with separate copyright notices and license terms. +Your use of the source code for these subcomponents is subject to the terms and conditions of the following licenses. + +===== + +facebookresearch/SlowFast +https://github.com/facebookresearch/SlowFast + + +Copyright 2019, Facebook, Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +===== + +pytorch/vision +https://github.com/pytorch/vision + + +BSD 3-Clause License + +Copyright (c) Soumith Chintala 2016, +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +===== + +wielandbrendel/bag-of-local-features-models +https://github.com/wielandbrendel/bag-of-local-features-models + + +Copyright (c) 2019 Wieland Brendel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +===== + +cdancette/rubi.bootstrap.pytorch +https://github.com/cdancette/rubi.bootstrap.pytorch + + +BSD 3-Clause License + +Copyright (c) 2019+, Remi Cadene, Corentin Dancette +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +===== + +chrisc36/debias +https://github.com/chrisc36/debias + + +"Don’t Take the Easy Way Out: Ensemble Based Methods for Avoiding Known Dataset Biases". Christopher Clark, Mark Yatskar, Luke Zettlemoyer. In EMNLP 2019. + +--- + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +===== diff --git a/README.md b/README.md new file mode 100644 index 0000000..f32ce8c --- /dev/null +++ b/README.md @@ -0,0 +1,408 @@ +## Learning De-biased Representations with Biased Representations (ICML 2020) + +Official Pytorch implementation of **ReBias** | [Paper](https://arxiv.org/abs/1910.02806) + +Hyojin Bahng1, Sanghyuk Chun2, Sangdoo Yun2, Jaegul Choo3, Seong Joon Oh2 + +1 Korea university +2 Clova AI Research, NAVER Corp. +3 KAIST + +Many machine learning algorithms are trained and evaluated by splitting data from a single source into training and test sets. While such focus on *in-distribution* learning scenarios has led to interesting advancement, it has not been able to tell if models are relying on dataset biases as shortcuts for successful prediction (*e.g.*, using snow cues for recognising snowmobiles). Such biased models fail to generalise when the bias shifts to a different class. The *cross-bias generalisation* problem has been addressed by de-biasing training data through augmentation or re-sampling, which are often prohibitive due to the data collection cost (*e.g.*, collecting images of a snowmobile on a desert) and the difficulty of quantifying or expressing biases in the first place. In this work, we propose a novel framework to train a de-biased representation by encouraging it to be *different* from a set of representations that are biased by design. This tactic is feasible in many scenarios where it is much easier to define a set of biased representations than to define and quantify bias. We demonstrate the efficacy of our method across a variety of synthetic and real-world biases. Our experiments and analyses show that the method discourages models from taking bias shortcuts, resulting in improved generalisation. + +## Updates + +- **TBD Jun, 2020**: Initial upload. + +## Summary of code contributions + +The code repository contains the implementations of our method (**ReBias**) as well as prior de-biasing methods empirically compared in the paper. Specifically, we provide codes for: + +- **ReBias** (ours): Hilbert Schmidt Independence Criterion (HSIC) based minimax optimization. See [criterions/hsic.py](criterions/hsic.py) +- **Vanilla** and **Biased** architectures. See [models/mnist\_models.py](models/mnist_models.py), [models/imagenet\_models.py](models/imagenet_models.py), and [models/action\_models/ResNet3D.py](models/action_models/ResNet3D.py). +- **Learned Mixin** ([1] Clark, et al. 2019): [criterions/comparison\_methods.py](criterions/comparison_methods.py) +- **RUBi** ([2] Cadene, et al. 2019): [criterions/comparison\_methods.py](criterions/comparison_methods.py) + +We support training and evaluation of above methods on the three diverse datasets and tasks. See [trainer.py](trainer.py) and [evaluator.py](evaluator.py) for the unified framework. Supported datasets and tasks are: + +- **Biased MNIST** (Section 4.2): [main\_biased\_mnist.py](main_biased_mnist.py) and [datasets/colour\_mnist.py](datasets/colour\_mnist.py) +- **ImageNet** (Section 4.3): [main\_imagenet.py](main_imagenet.py), [datasets/imagenet.py](datasets/imagenet.py) and [make\_clusters.py](make_clusters.py) +- **Action recognition** (Section 4.4): [main\_action.py](main_action.py) and [datasets/kinetics.py](datasets/kinetics.py) + +In this implementation, we set `Adam` as the default optimiser for the reproducibility. However, we strongly recommend using a better optimiser `AdamP` [3] by `--optim AdamP` for future researches. We refer [the official repository of AdamP](https://github.com/clovaai/adamp) for interested users. + +## Installation + +### MNIST and ImageNet experiments + +Our implementations are tested on the following libraries with Python3.7. + +``` +fire +munch +torch==1.1.0 +torchvision==0.2.2.post3 +adamp +``` + +Install dependencies using the following command. + +```bash +pip install -r requirements.txt +``` + +### Action recognition experiments + +For action recoginition tasks, we implement the baselines upon the [official implementation of SlowFast](https://github.com/facebookresearch/SlowFast). + +**NOTE: We will not handle the issues from action recognition experiments.** + +Please follow the official SlowFast instruction: +https://github.com/facebookresearch/SlowFast/blob/master/INSTALL.md + +## Dataset preparation + +### Biased MNIST + +Biased MNIST is a colour-biased version of the original MNIST. [datasets/colour\_mnist.py](datasets/colour_mnist.py) downloads the original MNIST and applies colour biases on images by itself. No extra preparation is needed on the user side. + +### ImageNet + +We do not provide a detailed instruction for collecting the ImageNet (ILSVRC2015) dataset. +Please follow the usual practice. + +#### ImageNet-A and ImageNet-C + +To further measure the generalisability of de-biasing methods, we perform evaluations on the ImageNet-A ([4] Hendrycks, et al. 2019) and ImageNet-C ([5] Hendrycks, et al. 2019) as well. The datasets are available at https://github.com/hendrycks/natural-adv-examples and https://github.com/hendrycks/robustness, respectively. + +NOTE: We implement the ImageNet-C evaluator separately to this implementation, and do not provide the implementation here. Please refer to [5] for details. + +### Kinetics and Mimetics + +We use two datasets for action recognition: Kinetics and Mimetics ([6] Weinzaepfel, et al. 2019). + +Kinetics and Mimetics datasets are available at: + +- Kinetics: https://github.com/facebookresearch/SlowFast/blob/master/slowfast/datasets/DATASET.md +- Mimetics: https://europe.naverlabs.com/research/computer-vision/mimetics/ + +**NOTE: We will not handle the issues from action recognition experiments.** + +## How to run + +### Biased MNIST results in Table 1 + +![Table1](./resources/Table1.png) + +Main experiments for the Biased MNIST are configured in [main\_biased\_mnist.py](main_biased_mnist.py). +Note that we have reported the average of three runs in the main paper; the standard deviations are reported in the appendix. + +NOTE: We do not provide HEX [7] implementation which is significantly different from the other baselines. It does not require any biased model but containes pre-defined handcrafted feature extractor named NGLCM. Thus, instead of providing HEX under the unified framework, we have implemented it separately. Please refer to [official HEX implementation](https://github.com/HaohanWang/HEX) for details. + +#### ReBias (ours) + +For the better results with AdamP +```bash +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.999 --optim AdamP + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.997 --optim AdamP + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.995 --optim AdamP + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.99 --optim AdamP +``` + +For the original numbers, +```bash +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.999 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.997 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.995 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.99 +``` + +#### Vanilla & Biased + +By setting `f_lambda_outer` and `g_lambda_inner` to 0, f and g are trained separately without minimax optimization. + +```bash +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.999 + --f_lambda_outer 0 + --g_lambda_inner 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.997 + --f_lambda_outer 0 + --g_lambda_inner 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.995 + --f_lambda_outer 0 + --g_lambda_inner 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.99 + --f_lambda_outer 0 + --g_lambda_inner 0 +``` + +#### Learned Mixin + +In our experiments, we first pretrain g networks for the Learned Mixin and optimize F using the fixed g. +Hence, `n_g_pretrain_epochs` and `n_g_update` are set to 5 and 0, respectively. + +```bash +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.999 + --outer_criterion LearnedMixin + --g_lambda_inner 0 + --n_g_pretrain_epochs 5 + --n_g_update 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.997 + --outer_criterion LearnedMixin + --g_lambda_inner 0 + --n_g_pretrain_epochs 5 + --n_g_update 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.995 + --outer_criterion LearnedMixin + --g_lambda_inner 0 + --n_g_pretrain_epochs 5 + --n_g_update 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.99 + --outer_criterion LearnedMixin + --g_lambda_inner 0 + --n_g_pretrain_epochs 5 + --n_g_update 0 +``` + +#### RUBi + +RUBi updates F and g simultaneously but separately. +We set `g_lambda_inner` to 0 for only updating g network using the classification loss. + +```bash +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.999 + --outer_criterion RUBi + --g_lambda_inner 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.997 + --outer_criterion RUBi + --g_lambda_inner 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.995 + --outer_criterion RUBi + --g_lambda_inner 0 + +python main_biased_mnist.py --root /path/to/your/dataset + --train_correlation 0.99 + --outer_criterion RUBi + --g_lambda_inner 0 +``` + +### ImageNet results in Table 2 + +| Model | Biased (Standard acc) | Unbiased (Texture clustering) | ImageNet-A [4] | +|-----------------------|-----------------------|-------------------------------|----------------| +| Vanilla (ResNet18) | 90.8 | 88.8 | 24.9 | +| Biased (BagNet18) | 67.7 | 65.9 | 19.5 | +| Stylised ImageNet [8] | 88.4 | 86.6 | 24.6 | +| LearnedMixin [1] | 67.9 | 65.9 | 18.8 | +| RUBi [2] | 90.5 | 88.6 | 27.7 | +| ReBias (ours) | **91.9** | **90.5** | **29.6** | + +Main experiments for ImageNet are configured in `main_imagenet.py` . +Note that we have reported the average of three runs in the main paper; the standard deviations are reported in the appendix. + +#### ReBias (ours) + +Our default settings to kernel radius for HSIC is median, which need to compute pair-wise distances in training dataset. +We set the kernel radius by median of the distances. Thus, `rbf_sigma_x` and `rbf_sigma_y` are set to `median`, and we recompute the kernel radius for every epoch. + +To aviod unnecessary computations, we set `rbf_sigma_x` and `rbf_sigma_y` to 1 for other methods. + +```bash +python main_imagenet.py --train_root /path/to/your/imagenet/train + --val_root /path/to/your/imagenet/val + --imageneta_root /path/to/your/imagenet_a + --optim AdamP +``` + +For the original numbers, +```bash +python main_imagenet.py --train_root /path/to/your/imagenet/train + --val_root /path/to/your/imagenet/val + --imageneta_root /path/to/your/imagenet_a +``` + +#### Vanilla & Biased + +```bash +python main_imagenet.py --train_root /path/to/your/imagenet/train + --val_root /path/to/your/imagenet/val + --imageneta_root /path/to/your/imagenet_a + --f_lambda_outer 0 + --g_lambda_inner 0 + --rbf_sigma_x 1 + --rbf_sigma_y 1 +``` + +#### Learned Mixin + +```bash +python main_imagenet.py --train_root /path/to/your/imagenet/train + --val_root /path/to/your/imagenet/val + --imageneta_root /path/to/your/imagenet_a + --outer_criterion LearnedMixin + --n_g_pretrain_epochs 30 + --n_g_update 0 + --rbf_sigma_x 1 + --rbf_sigma_y 1 +``` + +#### RUBi + +```bash +python main_imagenet.py --train_root /path/to/your/imagenet/train + --val_root /path/to/your/imagenet/val + --imageneta_root /path/to/your/imagenet_a + --outer_criterion RUBi + --g_lambda_inner 0 + --rbf_sigma_x 1 + --rbf_sigma_y 1 +``` + +#### Stylised ImageNet + +We train Stylised ImageNet by training the vanilla model with the original ImageNet train dataset and the stylised train dataset (IN + SIN in [8]). +Please follow the instruction in the following repositories to build the stylised ImageNet training dataset. + +- https://github.com/rgeirhos/Stylized-ImageNet +- https://github.com/bethgelab/stylize-datasets + +Run the following command + +```bash +python main_imagenet.py --train_root /path/to/your/imagenet_and_stylised_imagenet/train + --val_root /path/to/your/imagenet/val + --imageneta_root /path/to/your/imagenet_a + --f_lambda_outer 0 + --g_lambda_inner 0 +``` + +Note that the command is identical to the **Vanilla & Biased** case, except that the training set is now replaced by the Stylized ImageNet. + +### Action recognition results in Table 3 + +**NOTE: We will not handle the issues from action recognition experiments.** + +| Model | Biased (Kinetics) | Unbiased (Mimetics) [6] | +|---------------------|-------------------|-------------------------| +| Vanilla (3D-ResNet) | 54.5 | 18.9 | +| Biased (2D-ResNet) | 50.7 | 18.4 | +| LearnedMixin [1] | 11.4 | 12.3 | +| RUBi [2] | 23.6 | 13.4 | +| ReBias (ours) | **55.8** | **22.4** | + +#### ReBias (ours) + +```bash +python main_action.py --train_root /path/to/your/kinetics/train + --train_annotation_file /path/to/your/kinetics/train_annotion + --eval_root /path/to/your/mimetics/train + --eval_annotation_file /path/to/your/kinetics/train_annotion +``` + +#### Learned Mixin + +```bash +python main_action.py --train_root /path/to/your/kinetics/train + --train_annotation_file /path/to/your/kinetics/train_annotion + --eval_root /path/to/your/mimetics/train + --eval_annotation_file /path/to/your/kinetics/train_annotion + --outer_criterion LearnedMixin + --n_g_pretrain_epochs 30 + --n_g_update 0 + --rbf_sigma_x 1 + --rbf_sigma_y 1 +``` + +#### RUBi + +```bash +python main_action.py --train_root /path/to/your/kinetics/train + --train_annotation_file /path/to/your/kinetics/train_annotion + --eval_root /path/to/your/mimetics/train + --eval_annotation_file /path/to/your/kinetics/train_annotion + --outer_criterion RUBi + --g_lambda_inner 0 + --rbf_sigma_x 1 + --rbf_sigma_y 1 +``` + +## License + +This project is distributed under [MIT license](LICENSE). + +``` +Copyright (c) 2020-present NAVER Corp. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +``` + +## How to cite + +``` +@inproceedings{bahng2019rebias, + title={Learning De-biased Representations with Biased Representations}, + author={Bahng, Hyojin and Chun, Sanghyuk and Yun, Sangdoo and Choo, Jaegul and Oh, Seong Joon}, + year={2020}, + booktitle={International Conference on Machine Learning (ICML)}, +} +``` + +## References + +- [1] Clark, C., Yatskar, M., and Zettlemoyer, L. Don’t take the easy way out: Ensemble based methods for avoiding known dataset biases. EMNLP, 2019. +- [2] Cadene, R., Dancette, C., Cord, M., Parikh, D., et al. Rubi: Reducing unimodal biases for visual question answering. In Advances in Neural Information Processing Systems, 2019. +- [3] Heo, B., Chun, S., Oh, S. J., Han, D., Yun, S., Uh, Y., and Ha, J. W. Slowing Down the Weight Norm Increase in Momentum-based Optimizers. arXiv preprint arXiv:2006.08217, 2020. +- [4] Hendrycks, D., Zhao, K., Basart, S., Steinhardt, J., and Song, D. Natural adversarial examples. arXiv preprint arXiv:1907.07174, 2019. +- [5] Hendrycks, D., Dietterich, T. Benchmarking Neural Network Robustness to Common Corruptions and Perturbations. ICLR, 2019. +- [6] Weinzaepfel, P. and Rogez, G. Mimetics: Towards understanding human actions out of context. arXiv preprint arXiv:1912.07249, 2019. +- [7] Wang, H., He, Z., and Xing, E. P. Learning robust representations by projecting superficial statistics out. In International Conference on Learning Representations, 2019. +- [8] Geirhos, R., Rubisch, P., Michaelis, C., Bethge, M., Wichmann, F. A., and Brendel, W. Imagenet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. In International Conference on Learning Representations, 2019. diff --git a/clusters/cluster_label_1.pth b/clusters/cluster_label_1.pth new file mode 100644 index 0000000000000000000000000000000000000000..d510ac94d681b00653d6873ec431478d24286dac GIT binary patch literal 17138 zcmbu<&u(3J6~%GeahqTSV#a4KRP%#czbibyFS0ZJo|ieb9-@pwLM%6r$1OKmlwO;<>qX2_2%OB)%NHw zrzeZJ`_tRa+3m%jH@DkIUu=(`p1!w;x1U|#?6&W{Iy!x{u#2nR_Wf7Kr^ow@>#I+< zk6#_%-7PcEch@)PpFKOf+g)7Vo}Arm{&073`Q~i5xw^f+Is5$i$+FYa)5rV3#rgWv z&GzuV<>Z%3%2QPoNef00q=Wp+}#~;6V@~dT= z-@ZCn{`b|g%Hdbr$A3Lv&Um>w-z`VHJ^A&`o6XJU&Cf5+cPHB?KmBC;z1{ZYlkNAH zC4av?_+R;lub;hmvW#8+4)5VV|Gxk3fBT!*|3SXuz3bb5`6>Kz@nZdQ_>SHg_7}SU zQ-}BJ!2w_U@b=^C6Yk%I{h#_h`*sgHv=_dAfA)X&yJ!FXFQ0j9D8EUa!$*hBr_0ml zJalMZ`0lkjklVa+K67yMAYH#X-J?ExwU5pZHr9hxXS`FQ-HGsoy!EKBymu&EqSs<<^gP4&FN5 zy}4I9KONFxb$l(yTs~aCd^+U!eef6c^S3^6b6WR2?``Y)_#nM#pGlwY zNiOVpC*AuC>vJx7?hP;MpYr&7K7FlMSGR{b-IK5Ae)ZeK9H>t|4n1dibU5YEp?rJb z>hkDoygoTaIp(?#I@E_3^-sF~B0n9se|2@(xt_z+Pw#s|hjM&pp0hqax;@nCe9(F7 za~{agM~BU?ZXY=l%IO?_b=(|Wy}CU2hwFpxqu6@;&E=a=ZtMCXpL5VVS59?(a`f|; zPrgZ)1LZ+|aMGuK{)eJ_@VlXZZO&SK%IO^Q`6pbP*Lrz#&Fwzk_xc~o&4>ESt9KuM z=ywe-%7f0y=RG$E=jUr59rCxIUY~i@r{6ib{Jskuc2DympFDfWqeJ%#`Q`CVsLx#Q z6Ti89&QIr~n@=ycZclnqy?fcW`Q3{Ts+;3n6Y}YkLx<|Hx<32z;jNcX_bZ1VR>$oH z=|y|d%O`!BUtRx%o!5T#qP*(vugDL5pM2&vj}Nx4AFkEqbe?`F2iJ#tt~md9MSVs6 zboVvwtF8~)uXVY;a~#$$PrZG4y@HbDL&L;;qS1u0ey%)KB(B8N?a`e$*^XX8% z*g5i{dbu1rJj z%j+-KXFjy2I_^C5qPp{%2kWn{&pdv5{gY3hee^;1B-gpyhfgR^4qrLlzR>$DhtJ*< z@&Nx+75V9SQI6dDar^Q?^>Vs->dp`OAYZv0?}@$nAYXI*9?_w` zo==WB?bqi$#d|Jwx;gUb_AwXAh1J`~2kSSV4yQfjbdEmtiPO#Hcm63?ALP?-U!2eL z!u8?yphG#(ep4U4D331BUXTvyMd$P$@ymyF{kU`B>g}`lnomEUz2rf8IG;XPA6=h1 z-F>J-^>X{t*RVN#Umn`e?;JSvd*(UO`5}Ee4G_qVps+Ih_DeU`7ykyEs1^(k-iJ2#ZKc0ToK z-ju@+&|b?>w1DVGlAx(7&?+j~$i%GIad{?>aQ=g<%J@zaaWVJ^RZIMvP9 z$5-9`(s6!!;p*n=GskmM$Mx~kdrmsPTqxH*P#?~Zs~0_Qd+6h%GPsiPx9Op1cU5E?Sc=nuuIzxy|LZQuGQZcfifcTQYRIUTC&r$ag)9n$&exH`SO=c%v$ z)^YE(_XVoUnb5xV{V;{Ww3QL;c==K3E^U_rxa$=XZbJ z2f7@6bbG<-{H^oJwFlJ4S6*MazMg|m-FesY`HSsqed23<>aaOIN9(xnV9IfRzxQ&X z9LO(MAI?|ggX(xu9^G7A|C&!9KcqvsIA75`x_hnOJh^mza@5^B^nG}b^+R*@^U39t zheQ4NLs7rE`pv7ZuFv`C(EYToZ_4F2UtYQU@w>za-6x-Zd_unBn!k1Z)t#TtS5$}2 zg+qP#gnUpQ4)x=n6F#9nSfA&r-duHi@yYQ%wJxVRE>|8uln>3pq4&x2lWUHAx<2*h V;;rk4>d<`q@R`@|fv%q){}*T=e<%O| literal 0 HcmV?d00001 diff --git a/clusters/cluster_label_2.pth b/clusters/cluster_label_2.pth new file mode 100644 index 0000000000000000000000000000000000000000..f0a2d82737734f932533463b34a8ed219a7b344b GIT binary patch literal 17138 zcmcK5&u(3J6~*yu$8CZUh$%CMVG=bEiaJM3LitlBo**kYSFMp{=h`=ws8p$wO29Ml z0K6KHfW&}vaXuY<#nwry;z3u}-fOMBf4_6i%@@aiJH7h%-+%e9AN}bczy0BpFOEO_ z=Y!?HpYJx0&wjAkY+l{o>~Ah^uFgN*-ril_T#k)^#Zuh%)o*$n*SlH$De)sP4le3dU#?AG| zyNAzDUcX*uUhHpfFFtv4{(66Tb$5DxyZz(q%d3~?`|b7J&F%T8KRI1?dVKcq5V$B` ze7xNq-CIt7wZ#1XzIM7hKKuS6`QX{l-hcMW7Ow{Negg z{=4_GO_8qtayj();axcV;gGHuLzZr;|3(c_?ow=>^(G}_KLobj1=#5aH9@0ZTeQ2I@%Hu0n=ZEs$ zKcBjM`u5f7amX(p(z_3w4~O;P^hM9te4MUa9m=Qcp8EEqsjd&FbE-ENw zkgr@Fwr{?=xz2;@LwWk;>h$I6MLAI4_k=^akk50)>7n~}{_6GB&7Y9o9(3-}9xvn>zB)ce9%1TyO9U^aVQ7pm&fNh(A$#^(&KV*=pLYa zx%SX6%H^ZSVZ!dGqcMf&uP^W`(zb@K$Er-tDI3EtZ zFSxzboeRpv`Jg%O0q29}@U5YJ)^z64;m~t|bQ9W>zdC*Q;oQ~f7ppUeZaOy|q+dhx zs*|H{Zgu+Ve0c9e{hOlaRKGpUuTGy2_x{nV)Ac)7uWpUI2WZbJ&tBEpvp%`%P~G#h zr#enwu5KSbb=W=Cp}swEefMlW)HkgLIG%hwAPV*SCN7 z(dR3-hdleL)5D&xIyu#;%kkXoh3iB4Q@?Yn!zYw)UQzzEKRul}a{1J&D|i2L*HCVK z>P7oNeeVwry-#qO!>`YeJBPVYo!))OD|fH-bme^V`EYvNKI%|?&2JCq#pSEx(46Yc zrGw3(lVeYHK1kPoefDvW=0Z98^m5EAc5c6axpL^`<8;k$z5B|clV@J_bZdP%^pFp4 zKA*bt*i)Z>LVo?pFCWtJO*!;%?OgKKP@Z$*(EH{02kOJ=Ju;up9Q#9k`Stal?A1Iu zkWLdb%Q?Iu6y>aMJg^X$~K3zxVFk&IkFXc~ic;$tSniygs+;dye+) zp>CfwAAS9}oZgdMeJHPb^`4uqetOvZX^wp%pWN2l#~~kHl;?ZVuWya>P3WAkI``N5 z8t;A5ul?TfIe+(3m-lU9@42~adw0+J%lq8gpYqh#_Fe0nClBxazHJUaoZg$xThr?o zryTq0!}@W3=f;~uU-TULpgel@zDK<${i!Z*4Sh%EPWtxiYhR9fcc!#(+3hwaOg zudgmgU)}S<_371d{UV*ZbKv&IJuldNdFnX7I&6+Qo9}&r z^5}|m>T>Yj4?UzScJJ!tw$A51+_SxKI@tMg`Ed6pM;+RWueti}mCnBQf%vGhmeEsUpQ#Y^ofYZ~NSCnV2z8rq%lFLV5)SvWn zCv?yH&9|>PbVWXO=a{&C^`V>z?L%Lb3+d%m@BZnE^!&bG_t81dO~+5CuWr6P&x0PP zx1St7bviyd`Z%Q5m!}TRldDe85A`9Pdodq}Q(Z1ykq@?y+n)~i9IC@1Ur`^*rBla? z`rb2r^`5`|az1+`Gk9QOA<(7!FprI&|8I(>WS|G%(v@uI!H=^pNH z%IWj=JUyrSExTMj?;U8raoPQKM$$&}x7OwW_woa(&q`jei{{U2;aug<_ zdOr6K>CMBTI<&v%OgEuDV0Ba7ys2-0x+#}VpI)v$?t6fIkRG;k2$&n_>f{n>E*(p)(|+icF)C+mwxXAdu?gP)F9v$*;8 z@%rTY?7Q{#bnE-+@b2+*vv~d0<<(|-?!m$Ft(l!&Y^LWQ93CHTGcGT_oNhljyt$cY zo^CF$PQSW$aIidwhEvI6EJHxt{i)w5;Br zV?KMLt)_$HmuJg6_uqQ^?Kh8(-hAWe=;;1*>+jL0k8h^K5ANUjaNg#V2Yd7X`*B`n z|HtX}&!_Vl&)27$`H0u6kFFlAuhx&=Jv-g3raP~HI=!%&R-aBU&P#qi?fqB&{=<9s z@62Q8f7>%||839r?|<|q)Cc*?WvdtK-@e55AK&&OxBnm=>Q^u4Q*XZd2%mp4{-1s1 z@0#=Uy8qcX#t1wit6^;&pmf8Kc750^v*c6 zpZAcXUX;UcPjjIh_d@3{@|oAVd2(>RQJ*@rzZ|IF-u!gv{_EqX^U?LY@A`1Q_Q`|l zP_BD%PX2OzP#yL?F~F=f|P_-4~so4(WW=@z&|S1IUN-xhFbQ_xq@> zZcltylv}@j$es$+^zxIUsQLReM!dTI=JG-3 z!i(zia6b2B&gfqFp}tWronL-+@7%rgb)V*_)8*iDVCRo~{7@fsesy!5hYt1OMg1dP zf04i6mviy+EulR|{qoF%{LuZl4_Lpy1NP#B@*uw)TtBp@?}M(d`Ra7s+!6B2ZJ#(@XMg?O!}+0pKDxZ>>U3xyb*PRP+t<16$J-}YeZ=V_)L$I=mvlMxd7ly1kNdsh zaC9H)yQ1%bzx#Qg)^X_E`sq-;sNOzvtM_|vfBohBqdn*&U-Rnscg_ClxLi4OzP>BY z&sXjp)y>h5!}^+s`~LW0_ipZzj~}`>sNeojy(otdx))p@zwe0;@;N^p(%qB%q8xqv zbf|7GT;AwD`0L}B1I=4P{!xxPl!xoH7k$L*=W}1U9H?)^^+Ud5^ULeU&4+#$OY`)@ z(RZkBANhFSpYHoCmj|22U*vT=v$>$pAjxmVnGhwIxF^%eEg@zK8aa({xKkQuo>ilxl>A3k&9`1giI$mFS{pHQ+eXHBo{nLx` z`Eh$eed@SA_f)<9^5(mLK6P^--Mpo9^2wD$FQ+>fG)K;eSChfXp z=;lGWu)2Nq^TGQ0=(xS*smpbU;yrJoOn_QjX5Irj4X zjr#4^e*RIvKJV0dxSXZ)jOOUKpIr0Rm-=^|FCQ-DF3p$Yp7`w}-}%(b<@7s}tFFJ; zdh^Tm>+9Usap+w7=|#Uoy1rdGU$Jxhz3{s~_4e@<^;MTs9bd{H-Lrn!oTa({w{C8+ zd#Lvv>CpF&%duy_3w8coQ67Kqw{<>fE~GbyKFS;Ea-p2j?@?VIq!*jR4_nvoeBO~y zpSr!hr#ijA6I`Bo?W03^P>#O(@Xo339Q5K+-5&PpeVMN=-`?`|@l|)fbevx%|<6Zdl+gmQ|e*DGGQEx6?Zud}!>Lb58vNyeoe%Pv2kDRw z>Cj&4(0u5gpuI+QzSi*~KWrVhH(p%oAJuyvxx4CeM}Bp=vTTf z0S@)y#pbk*^LbBPeF^!(Bi9ps}!y6=}RPn}*)FY>`ro$h@0g>*S|SX~|+ht=iF>-Wp= zKIGu7%cINHhpR(%xj5u&KR@hz{pv-z^~s_0`5l$p%Xfs!@ps*Pxv;u9t@FvX2h_)h ztHbL2P#xEYt3x_pb#=H@=PNGtsl)c$uX?%P!6>Ki+xL*iSCkL=q3=j;ebt?d51Ly( z(nmgf>4WybAwQ%;bKGxr9FF|*oxkWF^+UOkPadB<9NHUq4>%m@=0Se*Mmpap*WVdA z{PgavE>B;P&;9tjM2F_^$-zg+S6uS9uD`l-)A@?((78tad?ReW@2$Fiy7$LNs1MfX zUDZ2Z-8uQ>I5(fV{YHHwzg%_P9NhP1U-R0>U*yvVJEvS len(train_loader) * sigma_update_sampling_rate: + break + x = x.to(device) + n_data = len(x) + + _, f_feats = model.f_net(x) + f_feats = f_feats.view(n_data, -1) + _f_dist = _l2_dist(f_feats) + f_dist.extend(_f_dist) + + for g_net in model.g_nets: + _, g_feats = g_net(x) + g_feats = g_feats.view(n_data, -1) + _g_dist = _l2_dist(g_feats) + g_dist.extend(_g_dist) + + f_dist, g_dist = np.array(f_dist), np.array(g_dist) + + if func == 'median': + f_sigma, g_sigma = np.median(f_dist), np.median(g_dist) + else: + f_sigma, g_sigma = np.mean(f_dist), np.mean(g_dist) + + return np.sqrt(f_sigma), np.sqrt(g_sigma) + + +def feature_dimension(model, train_loader, device='cuda'): + model.train() + + for x, _ in train_loader: + x = x.to(device) + n_data = len(x) + + _, f_feats = model.f_net(x) + f_feats = f_feats.view(n_data, -1) + f_dim = f_feats.size()[1] + + for g_net in model.g_nets: + _, g_feats = g_net(x) + g_feats = g_feats.view(n_data, -1) + g_dim = g_feats.size()[1] + return np.sqrt(f_dim), np.sqrt(g_dim) diff --git a/datasets/__init__.py b/datasets/__init__.py new file mode 100644 index 0000000..a3e8b2d --- /dev/null +++ b/datasets/__init__.py @@ -0,0 +1,25 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Datasets used for the ``unbaised'' benchmarks +- Biased-MNIST: synthetic bias with background colours. +- 9-Class ImageNet: realistic bias where the unbiased performances are + computed by the proxy texture labels (by texture clustering). +- Kinetics-10: a subset of Kinetics dataset, where the unbiased performances are + measured by ``Mimetics'' dataset. + Weinzaepfel, Philippe, and Grégory Rogez. "Mimetics: Towards Understanding Human Actions Out of Context." arXiv preprint arXiv:1912.07249 (2019). + https://europe.naverlabs.com/research/computer-vision/mimetics/ +""" +from datasets.colour_mnist import get_biased_mnist_dataloader +try: + from datasets.kinetics import get_kinetics_dataloader +except ImportError: + print('failed to import kinetics, please install library from') + print('https://github.com/facebookresearch/SlowFast/blob/master/INSTALL.md') +from datasets.imagenet import get_imagenet_dataloader + + +__all__ = ['get_biased_mnist_dataloader', + 'get_kinetics_dataloader', + 'get_imagenet_dataloader'] diff --git a/datasets/colour_mnist.py b/datasets/colour_mnist.py new file mode 100644 index 0000000..276ceb8 --- /dev/null +++ b/datasets/colour_mnist.py @@ -0,0 +1,190 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Python implementation of Biased-MNIST. +""" +import os +import numpy as np +from PIL import Image + +import torch +from torch.utils import data + +from torchvision import transforms +from torchvision.datasets import MNIST + + +class BiasedMNIST(MNIST): + """A base class for Biased-MNIST. + We manually select ten colours to synthetic colour bias. (See `COLOUR_MAP` for the colour configuration) + Usage is exactly same as torchvision MNIST dataset class. + + You have two paramters to control the level of bias. + + Parameters + ---------- + root : str + path to MNIST dataset. + data_label_correlation : float, default=1.0 + Here, each class has the pre-defined colour (bias). + data_label_correlation, or `rho` controls the level of the dataset bias. + + A sample is coloured with + - the pre-defined colour with probability `rho`, + - coloured with one of the other colours with probability `1 - rho`. + The number of ``other colours'' is controlled by `n_confusing_labels` (default: 9). + Note that the colour is injected into the background of the image (see `_binary_to_colour`). + + Hence, we have + - Perfectly biased dataset with rho=1.0 + - Perfectly unbiased with rho=0.1 (1/10) ==> our ``unbiased'' setting in the test time. + In the paper, we explore the high correlations but with small hints, e.g., rho=0.999. + + n_confusing_labels : int, default=9 + In the real-world cases, biases are not equally distributed, but highly unbalanced. + We mimic the unbalanced biases by changing the number of confusing colours for each class. + In the paper, we use n_confusing_labels=9, i.e., during training, the model can observe + all colours for each class. However, you can make the problem harder by setting smaller n_confusing_labels, e.g., 2. + We suggest to researchers considering this benchmark for future researches. + """ + + COLOUR_MAP = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [225, 225, 0], [225, 0, 225], + [0, 255, 255], [255, 128, 0], [255, 0, 128], [128, 0, 255], [128, 128, 128]] + + def __init__(self, root, train=True, transform=None, target_transform=None, + download=False, data_label_correlation=1.0, n_confusing_labels=9): + super().__init__(root, train=train, transform=transform, + target_transform=target_transform, + download=download) + self.random = True + + self.data_label_correlation = data_label_correlation + self.n_confusing_labels = n_confusing_labels + self.data, self.targets, self.biased_targets = self.build_biased_mnist() + + indices = np.arange(len(self.data)) + self._shuffle(indices) + + self.data = self.data[indices].numpy() + self.targets = self.targets[indices] + self.biased_targets = self.biased_targets[indices] + + @property + def raw_folder(self): + return os.path.join(self.root, 'raw') + + @property + def processed_folder(self): + return os.path.join(self.root, 'processed') + + def _shuffle(self, iteratable): + if self.random: + np.random.shuffle(iteratable) + + def _make_biased_mnist(self, indices, label): + raise NotImplementedError + + def _update_bias_indices(self, bias_indices, label): + if self.n_confusing_labels > 9 or self.n_confusing_labels < 1: + raise ValueError(self.n_confusing_labels) + + indices = np.where((self.targets == label).numpy())[0] + self._shuffle(indices) + indices = torch.LongTensor(indices) + + n_samples = len(indices) + n_correlated_samples = int(n_samples * self.data_label_correlation) + n_decorrelated_per_class = int(np.ceil((n_samples - n_correlated_samples) / (self.n_confusing_labels))) + + correlated_indices = indices[:n_correlated_samples] + bias_indices[label] = torch.cat([bias_indices[label], correlated_indices]) + + decorrelated_indices = torch.split(indices[n_correlated_samples:], n_decorrelated_per_class) + + other_labels = [_label % 10 for _label in range(label + 1, label + 1 + self.n_confusing_labels)] + self._shuffle(other_labels) + + for idx, _indices in enumerate(decorrelated_indices): + _label = other_labels[idx] + bias_indices[_label] = torch.cat([bias_indices[_label], _indices]) + + def build_biased_mnist(self): + """Build biased MNIST. + """ + n_labels = self.targets.max().item() + 1 + + bias_indices = {label: torch.LongTensor() for label in range(n_labels)} + for label in range(n_labels): + self._update_bias_indices(bias_indices, label) + + data = torch.ByteTensor() + targets = torch.LongTensor() + biased_targets = [] + + for bias_label, indices in bias_indices.items(): + _data, _targets = self._make_biased_mnist(indices, bias_label) + data = torch.cat([data, _data]) + targets = torch.cat([targets, _targets]) + biased_targets.extend([bias_label] * len(indices)) + + biased_targets = torch.LongTensor(biased_targets) + return data, targets, biased_targets + + def __getitem__(self, index): + img, target = self.data[index], int(self.targets[index]) + img = Image.fromarray(img.astype(np.uint8), mode='RGB') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target, int(self.biased_targets[index]) + + +class ColourBiasedMNIST(BiasedMNIST): + def __init__(self, root, train=True, transform=None, target_transform=None, + download=False, data_label_correlation=1.0, n_confusing_labels=9): + super(ColourBiasedMNIST, self).__init__(root, train=train, transform=transform, + target_transform=target_transform, + download=download, + data_label_correlation=data_label_correlation, + n_confusing_labels=n_confusing_labels) + + def _binary_to_colour(self, data, colour): + fg_data = torch.zeros_like(data) + fg_data[data != 0] = 255 + fg_data[data == 0] = 0 + fg_data = torch.stack([fg_data, fg_data, fg_data], dim=1) + + bg_data = torch.zeros_like(data) + bg_data[data == 0] = 1 + bg_data[data != 0] = 0 + bg_data = torch.stack([bg_data, bg_data, bg_data], dim=3) + bg_data = bg_data * torch.ByteTensor(colour) + bg_data = bg_data.permute(0, 3, 1, 2) + + data = fg_data + bg_data + return data.permute(0, 2, 3, 1) + + def _make_biased_mnist(self, indices, label): + return self._binary_to_colour(self.data[indices], self.COLOUR_MAP[label]), self.targets[indices] + + +def get_biased_mnist_dataloader(root, batch_size, data_label_correlation, + n_confusing_labels=9, train=True, num_workers=8): + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5))]) + dataset = ColourBiasedMNIST(root, train=train, transform=transform, + download=True, data_label_correlation=data_label_correlation, + n_confusing_labels=n_confusing_labels) + dataloader = data.DataLoader(dataset=dataset, + batch_size=batch_size, + shuffle=True, + num_workers=num_workers, + pin_memory=True) + return dataloader diff --git a/datasets/imagenet.py b/datasets/imagenet.py new file mode 100644 index 0000000..24aaed2 --- /dev/null +++ b/datasets/imagenet.py @@ -0,0 +1,166 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +9-Class ImageNet wrapper. Many codes are borrowed from the official torchvision dataset. +https://github.com/pytorch/vision/blob/master/torchvision/datasets/imagenet.py + +The following nine classes are selected to build the subset: + dog, cat, frog, turtle, bird, monkey, fish, crab, insect +""" +import os +from PIL import Image +from torchvision import transforms +import torch +import torch.utils.data + + +IMG_EXTENSIONS = [ + '.jpg', '.JPG', '.jpeg', '.JPEG', + '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', +] + +CLASS_TO_INDEX = {'n01641577': 2, 'n01644373': 2, 'n01644900': 2, 'n01664065': 3, 'n01665541': 3, + 'n01667114': 3, 'n01667778': 3, 'n01669191': 3, 'n01819313': 4, 'n01820546': 4, + 'n01833805': 4, 'n01843383': 4, 'n01847000': 4, 'n01978287': 7, 'n01978455': 7, + 'n01980166': 7, 'n01981276': 7, 'n02085620': 0, 'n02099601': 0, 'n02106550': 0, + 'n02106662': 0, 'n02110958': 0, 'n02123045': 1, 'n02123159': 1, 'n02123394': 1, + 'n02123597': 1, 'n02124075': 1, 'n02174001': 8, 'n02177972': 8, 'n02190166': 8, + 'n02206856': 8, 'n02219486': 8, 'n02486410': 5, 'n02487347': 5, 'n02488291': 5, + 'n02488702': 5, 'n02492035': 5, 'n02607072': 6, 'n02640242': 6, 'n02641379': 6, + 'n02643566': 6, 'n02655020': 6} + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def make_dataset(dir, class_to_idx, data='ImageNet'): + # dog, cat, frog, turtle, bird, monkey, fish, crab, insect + RESTRICTED_RANGES = [(151, 254), (281, 285), (30, 32), (33, 37), (89, 97), + (372, 378), (393, 397), (118, 121), (306, 310)] + range_sets = [set(range(s, e + 1)) for s, e in RESTRICTED_RANGES] + class_to_idx_ = {} + + if data == 'ImageNet-A': + for class_name, idx in class_to_idx.items(): + try: + class_to_idx_[class_name] = CLASS_TO_INDEX[class_name] + except Exception: + pass + elif data == 'ImageNet-C': + # TODO + pass + else: # ImageNet + for class_name, idx in class_to_idx.items(): + for new_idx, range_set in enumerate(range_sets): + if idx in range_set: + if new_idx == 0: # classes that overlap with ImageNet-A + if idx in [151, 207, 234, 235, 254]: + class_to_idx_[class_name] = new_idx + elif new_idx == 4: + if idx in [89, 90, 94, 96, 97]: + class_to_idx_[class_name] = new_idx + elif new_idx == 5: + if idx in [372, 373, 374, 375, 378]: + class_to_idx_[class_name] = new_idx + else: + class_to_idx_[class_name] = new_idx + images = [] + dir = os.path.expanduser(dir) + a = sorted(class_to_idx_.keys()) + for target in a: + d = os.path.join(dir, target) + if not os.path.isdir(d): + continue + for root, _, fnames in sorted(os.walk(d)): + for fname in fnames: + if is_image_file(fname): + path = os.path.join(root, fname) + item = (path, class_to_idx_[target]) + images.append(item) + + return images, class_to_idx_ + + +def find_classes(dir): + classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))] + classes.sort() + class_to_idx = {classes[i]: i for i in range(len(classes))} + return classes, class_to_idx + + +def pil_loader(path): + with open(path, 'rb') as f: + with Image.open(f) as img: + return img.convert('RGB') + + +class ImageFolder(torch.utils.data.Dataset): + def __init__(self, root, transform=None, target_transform=None, loader=pil_loader, + train=True, val_data='ImageNet'): + classes, class_to_idx = find_classes(root) + imgs, class_to_idx_ = make_dataset(root, class_to_idx, val_data) + if len(imgs) == 0: + raise (RuntimeError("Found 0 images in subfolders of: " + root + "\n" + "Supported image extensions are: " + ",".join( + IMG_EXTENSIONS))) + self.root = root + self.dataset = imgs + self.classes = classes + self.class_to_idx = class_to_idx_ + self.transform = transform + self.target_transform = target_transform + self.loader = loader + self.train = train + self.val_data = val_data + self.clusters = [] + for i in range(3): + self.clusters.append(torch.load('clusters/cluster_label_{}.pth'.format(i+1))) + + def __getitem__(self, index): + path, target = self.dataset[index] + img = self.loader(path) + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + + if not self.train and self.val_data == 'ImageNet': + bias_target = [self.clusters[0][index], + self.clusters[1][index], + self.clusters[2][index]] + return img, target, bias_target + + else: + return img, target, target + + def __len__(self): + return len(self.dataset) + + +def get_imagenet_dataloader(root, batch_size, train=True, num_workers=8, + load_size=256, image_size=224, val_data='ImageNet'): + if train: + transform = transforms.Compose([ + transforms.RandomResizedCrop(image_size), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) + + else: + transform = transforms.Compose([ + transforms.Resize(load_size), + transforms.CenterCrop(image_size), + transforms.ToTensor(), + transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]) + + dataset = ImageFolder(root, transform=transform, train=train, val_data=val_data) + + dataloader = torch.utils.data.DataLoader(dataset=dataset, + batch_size=batch_size, + shuffle=train, + num_workers=num_workers, + pin_memory=True) + + return dataloader diff --git a/datasets/kinetics.py b/datasets/kinetics.py new file mode 100644 index 0000000..918b226 --- /dev/null +++ b/datasets/kinetics.py @@ -0,0 +1,24 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Dataset for the action recognition benchmarks. +We use the official implemenation of SlowFast by Facebook research. +https://github.com/facebookresearch/SlowFast +""" +import torch + +from datasets.kinetics_tools.loader import construct_loader + + +def get_kinetics_dataloader(root, + split='train', + logger=None, + anno_file=None, + dataset_name='kinetics50', + batch_size=16): + return construct_loader(root, split, logger, + anno_file=anno_file, + dataset_name=dataset_name, + num_gpus=torch.cuda.device_count(), + batch_size=batch_size) diff --git a/datasets/kinetics_tools/__init__.py b/datasets/kinetics_tools/__init__.py new file mode 100644 index 0000000..dd90032 --- /dev/null +++ b/datasets/kinetics_tools/__init__.py @@ -0,0 +1,3 @@ +"""Kinetics dataset implementations. +Original codes: https://github.com/facebookresearch/SlowFast +""" diff --git a/datasets/kinetics_tools/decoder.py b/datasets/kinetics_tools/decoder.py new file mode 100644 index 0000000..0edcc8a --- /dev/null +++ b/datasets/kinetics_tools/decoder.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import math +import numpy as np +import random +import torch + + +def temporal_sampling(frames, start_idx, end_idx, num_samples): + """ + Given the start and end frame index, sample num_samples frames between + the start and end with equal interval. + Args: + frames (tensor): a tensor of video frames, dimension is + `num video frames` x `channel` x `height` x `width`. + start_idx (int): the index of the start frame. + end_idx (int): the index of the end frame. + num_samples (int): number of frames to sample. + Returns: + frames (tersor): a tensor of temporal sampled video frames, dimension is + `num clip frames` x `channel` x `height` x `width`. + """ + index = torch.linspace(start_idx, end_idx, num_samples) + index = torch.clamp(index, 0, frames.shape[0] - 1).long() + frames = torch.index_select(frames, 0, index) + return frames + + +def get_start_end_idx(video_size, clip_size, clip_idx, num_clips): + """ + Sample a clip of size clip_size from a video of size video_size and + return the indices of the first and last frame of the clip. If clip_idx is + -1, the clip is randomly sampled, otherwise uniformly split the video to + num_clips clips, and select the start and end index of clip_idx-th video + clip. + Args: + video_size (int): number of overall frames. + clip_size (int): size of the clip to sample from the frames. + clip_idx (int): if clip_idx is -1, perform random jitter sampling. If + clip_idx is larger than -1, uniformly split the video to num_clips + clips, and select the start and end index of the clip_idx-th video + clip. + num_clips (int): overall number of clips to uniformly sample from the + given video for testing. + Returns: + start_idx (int): the start frame index. + end_idx (int): the end frame index. + """ + delta = max(video_size - clip_size, 0) + if clip_idx == -1: + # Random temporal sampling. + start_idx = random.uniform(0, delta) + else: + # Uniformly sample the clip with the given index. + start_idx = delta * clip_idx / num_clips + end_idx = start_idx + clip_size - 1 + return start_idx, end_idx + + +def pyav_decode_stream( + container, start_pts, end_pts, stream, stream_name, buffer_size=0 +): + """ + Decode the video with PyAV decoder. + Args: + container (container): PyAV container. + start_pts (int): the starting Presentation TimeStamp to fetch the + video frames. + end_pts (int): the ending Presentation TimeStamp of the decoded frames. + stream (stream): PyAV stream. + stream_name (dict): a dictionary of streams. For example, {"video": 0} + means video stream at stream index 0. + buffer_size (int): number of additional frames to decode beyond end_pts. + Returns: + result (list): list of frames decoded. + max_pts (int): max Presentation TimeStamp of the video sequence. + """ + # Seeking in the stream is imprecise. Thus, seek to an ealier PTS by a + # margin pts. + margin = 1024 + seek_offset = max(start_pts - margin, 0) + + container.seek(seek_offset, any_frame=False, backward=True, stream=stream) + frames = {} + buffer_count = 0 + max_pts = 0 + for frame in container.decode(**stream_name): + max_pts = max(max_pts, frame.pts) + if frame.pts < start_pts: + continue + if frame.pts <= end_pts: + frames[frame.pts] = frame + else: + buffer_count += 1 + frames[frame.pts] = frame + if buffer_count >= buffer_size: + break + result = [frames[pts] for pts in sorted(frames)] + return result, max_pts + + +def pyav_decode( + container, + sampling_rate, + num_frames, + clip_idx, + num_clips=10, + target_fps=30, +): + """ + Convert the video from its original fps to the target_fps. If the video + support selective decoding (contain decoding information in the video head), + the perform temporal selective decoding and sample a clip from the video + with the PyAV decoder. If the video does not support selective decoding, + decode the entire video. + + Args: + container (container): pyav container. + sampling_rate (int): frame sampling rate (interval between two sampled + frames. + num_frames (int): number of frames to sample. + clip_idx (int): if clip_idx is -1, perform random temporal sampling. If + clip_idx is larger than -1, uniformly split the video to num_clips + clips, and select the clip_idx-th video clip. + num_clips (int): overall number of clips to uniformly sample from the + given video. + target_fps (int): the input video may has different fps, convert it to + the target video fps before frame sampling. + Returns: + frames (tensor): decoded frames from the video. Return None if the no + video stream was found. + fps (float): the number of frames per second of the video. + decode_all_video (bool): If True, the entire video was decoded. + """ + # Try to fetch the decoding information from the video head. Some of the + # videos does not support fetching the decoding information, for that case + # it will get None duration. + fps = float(container.streams.video[0].average_rate) + frames_length = container.streams.video[0].frames + duration = container.streams.video[0].duration + + if duration is None: + # If failed to fetch the decoding information, decode the entire video. + decode_all_video = True + video_start_pts, video_end_pts = 0, math.inf + else: + # Perform selective decoding. + decode_all_video = False + start_idx, end_idx = get_start_end_idx( + frames_length, + sampling_rate * num_frames / target_fps * fps, + clip_idx, + num_clips, + ) + timebase = duration / frames_length + video_start_pts = int(start_idx * timebase) + video_end_pts = int(end_idx * timebase) + + frames = None + # If video stream was found, fetch video frames from the video. + if container.streams.video: + video_frames, max_pts = pyav_decode_stream( + container, + video_start_pts, + video_end_pts, + container.streams.video[0], + {"video": 0}, + ) + container.close() + + frames = [frame.to_rgb().to_ndarray() for frame in video_frames] + frames = torch.as_tensor(np.stack(frames)) + return frames, fps, decode_all_video + + +def decode( + container, + sampling_rate, + num_frames, + clip_idx=-1, + num_clips=10, + video_meta=None, + target_fps=30, +): + """ + Decode the video and perform temporal sampling. + Args: + container (container): pyav container. + sampling_rate (int): frame sampling rate (interval between two sampled + frames). + num_frames (int): number of frames to sample. + clip_idx (int): if clip_idx is -1, perform random temporal + sampling. If clip_idx is larger than -1, uniformly split the + video to num_clips clips, and select the + clip_idx-th video clip. + num_clips (int): overall number of clips to uniformly + sample from the given video. + video_meta (dict): a dict contains "fps", "timebase", and + "max_pts": + `fps` is the frames per second of the given video. + `timebase` is the video timebase. + `max_pts` is the largest pts from the video. + target_fps (int): the input video may have different fps, convert it to + the target video fps before frame sampling. + Returns: + frames (tensor): decoded frames from the video. + """ + # Currently support two decoders: 1) PyAV, and 2) TorchVision. + assert clip_idx >= -1, "Not valied clip_idx {}".format(clip_idx) + try: + frames, fps, decode_all_video = pyav_decode( + container, + sampling_rate, + num_frames, + clip_idx, + num_clips, + target_fps, + ) + except Exception as e: + print("Failed to decode with pyav with exception: {}".format(e)) + return None + + # Return None if the frames was not decoded successfully. + if frames is None: + return frames + + start_idx, end_idx = get_start_end_idx( + frames.shape[0], + num_frames * sampling_rate * fps / target_fps, + clip_idx if decode_all_video else 0, + num_clips if decode_all_video else 1, + ) + # Perform temporal sampling from the decoded video. + frames = temporal_sampling(frames, start_idx, end_idx, num_frames) + return frames diff --git a/datasets/kinetics_tools/kinetics.py b/datasets/kinetics_tools/kinetics.py new file mode 100644 index 0000000..c678a62 --- /dev/null +++ b/datasets/kinetics_tools/kinetics.py @@ -0,0 +1,429 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import os +import json +import random +import torch +import torch.utils.data + +import datasets.kinetics_tools.decoder as decoder +import datasets.kinetics_tools.video_container as container +import datasets.kinetics_tools.transform as transform + +import tqdm + +DATA_MEAN = [0.45, 0.45, 0.45] +DATA_STD = [0.225, 0.225, 0.225] +TRAIN_JITTER_SCALES = [256, 320] +TRAIN_CROP_SIZE = 224 +TEST_CROP_SIZE = 256 +TEST_NUM_ENSEMBLE_VIEWS = 10 +TEST_NUM_SPATIAL_CROPS = 1 +DATA_SAMPLING_RATE = 8 +DATA_NUM_FRAMES = 8 + + +class Kinetics(torch.utils.data.Dataset): + """ + Kinetics video loader. Construct the Kinetics video loader, then sample + clips from the videos. For training and validation, a single clip is + randomly sampled from every video with random cropping, scaling, and + flipping. For testing, multiple clips are uniformaly sampled from every + video with uniform cropping. For uniform cropping, we take the left, center, + and right crop if the width is larger than height, or take top, center, and + bottom crop if the height is larger than the width. + """ + + def __init__(self, root, mode, logger, num_retries=10, + dataset_name="kinetics50", + anno_file="kinetics-400.json"): + """ + Construct the Kinetics video loader + """ + # Only support train, val, and test mode. + assert mode in [ + "train", + "val", + "test", + ], "Split '{}' not supported for Kinetics".format(mode) + self.mode = mode + self.root = root + self.anno_file = anno_file + self.dataset_name = dataset_name + + assert self.dataset_name in ['kinetics400', 'kinetics50', 'mimetics50', + 'kinetics10', 'mimetics10',] + + self.logger = logger + + self._video_meta = {} + self._num_retries = num_retries + # For training or validation mode, one single clip is sampled from every + # video. For testing, NUM_ENSEMBLE_VIEWS clips are sampled from every + # video. For every clip, NUM_SPATIAL_CROPS is cropped spatially from + # the frames. + if self.mode in ["train", "val"]: + self._num_clips = 1 + elif self.mode in ["test"]: + self._num_clips = ( + TEST_NUM_ENSEMBLE_VIEWS * TEST_NUM_SPATIAL_CROPS + ) + + self.logger.log("Constructing Kinetics {}...".format(mode)) + self._construct_loader() + + def _parse_json(self, json_path, valid=False): + self.logger.log(json_path) + with open(json_path, 'r') as data_file: + self.json_data = json.load(data_file) + + if valid: + c_tr, c_te, c_v = 0, 0, 0 + for jd in self.json_data['database']: + if self.json_data['database'][jd]['subset'] == 'training': + c_tr += 1 + elif self.json_data['database'][jd]['subset'] == 'testing': + c_te += 1 + elif self.json_data['database'][jd]['subset'] == 'validation': + c_v += 1 + self.logger.log('Number of Training samples: %d' % c_tr) + self.logger.log('Number of Validation samples: %d' % c_v) + self.logger.log('Number of Testing samples: %d' % c_te) + + def _get_class_idx_map(self, classes): + self.class_labels_map = {} + for index, class_label in enumerate(classes): + self.class_labels_map[class_label] = index + + def _get_action_label(self, data): + if self.mode == 'train' and data['subset'] == 'training': + action_label = data['annotations']['label'] + elif self.mode == 'val' and data['subset'] == 'validation': + action_label = data['annotations']['label'] + elif self.mode == 'test' and data['subset'] == 'validation': + action_label = data['annotations']['label'] + elif self.mode == 'test' and data['subset'] == 'testing': + action_label = None + else: + action_label = None + return action_label + + def _set_path_prefix(self): + if self.mode == 'train': + self.PATH_PREFIX = 'train' + elif self.mode == 'val': + self.PATH_PREFIX = 'val' + elif self.mode == 'test': + self.PATH_PREFIX = 'val' + else: + raise NotImplementedError + + def _construct_loader(self): + """ + Construct the video loader. + """ + path_to_file = os.path.join(self.root, self.anno_file) + + self._set_path_prefix() + + if path_to_file.endswith('.json'): + self._parse_json(json_path=path_to_file) + + + self._path_to_videos = [] + self._labels = [] + self._spatial_temporal_idx = [] + clip_idx = 0 + num_missing_videos = 0 + subclasses = [] + if self.dataset_name == 'kinetics50': + with open('datasets/mimetics/mimetics_v1.0_clsannot.txt') as f_subclasses: + f_subclasses.readline() + for line in f_subclasses.readlines(): + subclasses.append(line.split()[0]) + self._get_class_idx_map(subclasses) + elif self.dataset_name == 'kinetics10': + with open('datasets/mimetics/mimetics_v1.0_clsannot.txt') as f_subclasses: + f_subclasses.readline() + line_idx = 0 + for line in f_subclasses.readlines(): + line_idx += 1 + if line_idx % 5 == 0: + subclasses.append(line.split()[0]) + print (subclasses) + self._get_class_idx_map(subclasses) + else: + self._get_class_idx_map(self.json_data['labels']) + + + for key in tqdm.tqdm(self.json_data['database']): + data = self.json_data['database'][key] + action_label = self._get_action_label(data) + if (action_label not in subclasses) and len(subclasses): + continue + + if action_label is None: + # when the json_data['subset'] is not matched with 'self.mode', skip this data. + # (for example, self.mode=='train' but data['subset']=='testing') + continue + + # path = os.path.join(root_path, self.PATH_PREFIX, action_label, key + '.mp4') + vid_name = key[:-14] + + # possible path lists (.mp4, .mkv, etc.) + paths = [] + paths.append(os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mp4')) + paths.append( + os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mp4')) + paths.append(os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mkv')) + paths.append( + os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mkv')) + paths.append( + os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mp4.mkv')) + paths.append( + os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mp4.mkv')) + paths.append( + os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), key + '.mp4.webm')) + paths.append( + os.path.join(self.root, self.PATH_PREFIX, action_label.replace(' ', '_'), vid_name + '.mp4.webm')) + + exist_path = [p for p in paths if os.path.exists(p)] + + label = self.class_labels_map[action_label] + if len(exist_path) > 0: + path = exist_path[0] + else: + # print(path) + num_missing_videos += 1 + continue + + for idx in range(self._num_clips): + self._path_to_videos.append(path) + self._labels.append(int(label)) + self._spatial_temporal_idx.append(idx) + self._video_meta[clip_idx * self._num_clips + idx] = {} + clip_idx += 1 + + self.logger.log('num_missing_videos: %d' % num_missing_videos) + # assert ( + # len(self._path_to_videos) > 0 + # ), "Failed to load Kinetics split {} from {}".format( + # self._split_idx, path_to_file + # ) + self.logger.log( + "Constructing kinetics_tools dataloader (size: {}) from {}".format( + len(self._path_to_videos), path_to_file + ) + ) + + else: + # path_to_file = os.path.join( + # self.root, "{}.csv".format(self.mode) + # ) + self._path_to_videos = [] + self._labels = [] + self._spatial_temporal_idx = [] + + label_strings = [] + with open(path_to_file, "r") as f: + f.readline() + for clip_idx, path_label in enumerate(f.read().splitlines()): + label_strings.append(path_label.split(',')[0]) + + label_strings = sorted(set(label_strings)) + + if self.dataset_name == 'mimetics10': + label_strings = label_strings[4::5] + print (label_strings) + + with open(path_to_file, "r") as f: + f.readline() + for clip_idx, path_label in enumerate(f.read().splitlines()): + # assert len(path_label.split()) == 2 + label_str, path, start_time, end_time, _, _ = path_label.split(',') + + if self.dataset_name == 'mimetics10' and label_str not in label_strings: + continue + + label = label_strings.index(label_str) + path = os.path.join(self.root, + 'data', + label_str, + '{0}_{1:06d}_{2:06d}.mp4'.format(path, int(start_time), int(end_time))) + if not os.path.exists(path): + self.logger.log('{} is not exists!'.format(path)) + continue + + for idx in range(self._num_clips): + self._path_to_videos.append(path) + self._labels.append(int(label)) + self._spatial_temporal_idx.append(idx) + self._video_meta[clip_idx * self._num_clips + idx] = {} + # assert ( + # len(self._path_to_videos) > 0 + # ), "Failed to load Kinetics split {} from {}".format( + # self._split_idx, path_to_file + # ) + self.logger.log( + "Constructing kinetics_tools dataloader (size: {}) from {}".format( + len(self._path_to_videos), path_to_file + ) + ) + + def __getitem__(self, index): + """ + Given the video index, return the list of frames, label, and video + index if the video can be fetched and decoded successfully, otherwise + repeatly find a random video that can be decoded as a replacement. + Args: + index (int): the video index provided by the pytorch sampler. + Returns: + frames (tensor): the frames of sampled from the video. The dimension + is `channel` x `num frames` x `height` x `width`. + label (int): the label of the current video. + index (int): if the video provided by pytorch sampler can be + decoded, then return the index of the video. If not, return the + index of the video replacement that can be decoded. + """ + if self.mode in ["train", "val"]: + # -1 indicates random sampling. + temporal_sample_index = -1 + spatial_sample_index = -1 + min_scale = TRAIN_JITTER_SCALES[0] + max_scale = TRAIN_JITTER_SCALES[1] + crop_size = TRAIN_CROP_SIZE + elif self.mode in ["test"]: + temporal_sample_index = ( + self._spatial_temporal_idx[index] + // TEST_NUM_SPATIAL_CROPS + ) + # spatial_sample_index is in [0, 1, 2]. Corresponding to left, + # center, or right if width is larger than height, and top, middle, + # or bottom if height is larger than width. + spatial_sample_index = ( + self._spatial_temporal_idx[index] + % TEST_NUM_SPATIAL_CROPS + ) + min_scale, max_scale, crop_size = [TEST_CROP_SIZE] * 3 + # The testing is deterministic and no jitter should be performed. + # min_scale, max_scale, and crop_size are expect to be the same. + assert len({min_scale, max_scale, crop_size}) == 1 + else: + raise NotImplementedError( + "Does not support {} mode".format(self.mode) + ) + + # Try to decode and sample a clip from a video. If the video can not be + # decoded, repeatly find a random video replacement that can be decoded. + for _ in range(self._num_retries): + video_container = None + try: + video_container = container.get_video_container( + self._path_to_videos[index] + ) + except Exception as e: + self.logger.log( + "Failed to load video from {} with error {}".format( + self._path_to_videos[index], e + ) + ) + # Select a random video if the current video was not able to access. + if video_container is None: + index = random.randint(0, len(self._path_to_videos) - 1) + continue + + # Decode video. Meta info is used to perform selective decoding. + frames = decoder.decode( + video_container, + DATA_SAMPLING_RATE, + DATA_NUM_FRAMES, + temporal_sample_index, + TEST_NUM_ENSEMBLE_VIEWS, + # video_meta=self._video_meta[index], + target_fps=30, + ) + + # If decoding failed (wrong format, video is too short, and etc), + # select another video. + if frames is None: + self.logger.log(self._path_to_videos[index]) + index = random.randint(0, len(self._path_to_videos) - 1) + continue + + # Perform color normalization. + frames = frames.float() + frames = frames / 255.0 + frames = frames - torch.tensor(DATA_MEAN) + frames = frames / torch.tensor(DATA_STD) + # T H W C -> C T H W. + frames = frames.permute(3, 0, 1, 2) + # Perform data augmentation. + frames = self.spatial_sampling( + frames, + spatial_idx=spatial_sample_index, + min_scale=min_scale, + max_scale=max_scale, + crop_size=crop_size, + ) + + # frames = [frames] + label = self._labels[index] + return frames, label, index + else: + raise RuntimeError( + "Failed to fetch video after {} retries.".format( + self._num_retries + ) + ) + + def __len__(self): + """ + Returns: + (int): the number of videos in the dataset. + """ + return len(self._path_to_videos) + + def spatial_sampling( + self, + frames, + spatial_idx=-1, + min_scale=256, + max_scale=320, + crop_size=224, + ): + """ + Perform spatial sampling on the given video frames. If spatial_idx is + -1, perform random scale, random crop, and random flip on the given + frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling + with the given spatial_idx. + Args: + frames (tensor): frames of images sampled from the video. The + dimension is `num frames` x `height` x `width` x `channel`. + spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, + or 2, perform left, center, right crop if width is larger than + height, and perform top, center, buttom crop if height is larger + than width. + min_scale (int): the minimal size of scaling. + max_scale (int): the maximal size of scaling. + crop_size (int): the size of height and width used to crop the + frames. + Returns: + frames (tensor): spatially sampled frames. + """ + assert spatial_idx in [-1, 0, 1, 2] + if spatial_idx == -1: + frames = transform.random_short_side_scale_jitter( + frames, min_scale, max_scale + ) + frames = transform.random_crop(frames, crop_size) + frames = transform.horizontal_flip(0.5, frames) + else: + # The testing is deterministic and no jitter should be performed. + # min_scale, max_scale, and crop_size are expect to be the same. + assert len({min_scale, max_scale, crop_size}) == 1 + frames = transform.random_short_side_scale_jitter( + frames, min_scale, max_scale + ) + frames = transform.uniform_crop(frames, crop_size, spatial_idx) + return frames diff --git a/datasets/kinetics_tools/loader.py b/datasets/kinetics_tools/loader.py new file mode 100644 index 0000000..9e42dec --- /dev/null +++ b/datasets/kinetics_tools/loader.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Data loader.""" + +import torch +from torch.utils.data.distributed import DistributedSampler +from torch.utils.data.sampler import RandomSampler + +from datasets.kinetics_tools.kinetics import Kinetics + +# Supported datasets. +_DATASET_CATALOG = {"kinetics50": Kinetics, "mimetics50": Kinetics, + "kinetics10": Kinetics, "mimetics10": Kinetics,} + + +def construct_loader(root, split, logger, anno_file, + dataset_name='kinetics', batch_size=64, + num_gpus=1, num_workers=24, pin_memory=True): + """ + :param root: root path + :param split: dataset split ('train','val','test') + :param logger: + :param dataset_name: + :param batch_size: + :param num_gpus: + :param num_workers: + :param pin_memory: + :return: + """ + + assert split in ["train", "val", "test"] + if split in ["train"]: + shuffle = True + drop_last = True + elif split in ["val"]: + shuffle = False + drop_last = False + elif split in ["test"]: + shuffle = False + drop_last = False + assert ( + dataset_name in _DATASET_CATALOG.keys() + ), "Dataset '{}' is not supported".format(dataset_name) + + # Construct the dataset + dataset = _DATASET_CATALOG[dataset_name](root, split, logger, + dataset_name=dataset_name, + anno_file=anno_file) + + # Create a sampler for multi-process training + # sampler = DistributedSampler(dataset) if num_gpus > 1 else None + sampler = None + # Create a loader + loader = torch.utils.data.DataLoader( + dataset, + batch_size=batch_size, + shuffle=shuffle, + num_workers=num_workers, + pin_memory=pin_memory, + drop_last=drop_last, + ) + return loader + + +def shuffle_dataset(loader, cur_epoch): + """" + Shuffles the data. + Args: + loader (loader): data loader to perform shuffle. + cur_epoch (int): number of the current epoch. + """ + assert isinstance( + loader.sampler, (RandomSampler, DistributedSampler) + ), "Sampler type '{}' not supported".format(type(loader.sampler)) + # RandomSampler handles shuffling automatically + if isinstance(loader.sampler, DistributedSampler): + # DistributedSampler shuffles data based on epoch + loader.sampler.set_epoch(cur_epoch) diff --git a/datasets/kinetics_tools/meters.py b/datasets/kinetics_tools/meters.py new file mode 100644 index 0000000..6cb7107 --- /dev/null +++ b/datasets/kinetics_tools/meters.py @@ -0,0 +1,413 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Meters.""" + +import datetime +import numpy as np +from collections import deque +import torch +import time + +import slowfast.utils.logging as logging +import slowfast.utils.metrics as metrics + +from fvcore.common.timer import Timer + +logger = logging.get_logger(__name__) + + +class TestMeter(object): + """ + Perform the multi-view ensemble for testing: each video with an unique index + will be sampled with multiple clips, and the predictions of the clips will + be aggregated to produce the final prediction for the video. + The accuracy is calculated with the given ground truth labels. + """ + + def __init__(self, num_videos, num_clips, num_cls, overall_iters): + """ + Construct tensors to store the predictions and labels. Expect to get + num_clips predictions from each video, and calculate the metrics on + num_videos videos. + Args: + num_videos (int): number of videos to test. + num_clips (int): number of clips sampled from each video for + aggregating the final prediction for the video. + num_cls (int): number of classes for each prediction. + overall_iters (int): overall iterations for testing. + """ + + self.iter_timer = Timer() + self.num_clips = num_clips + self.overall_iters = overall_iters + # Initialize tensors. + self.video_preds = torch.zeros((num_videos, num_cls)) + self.video_labels = torch.zeros((num_videos)).long() + self.clip_count = torch.zeros((num_videos)).long() + # Reset metric. + self.reset() + + def reset(self): + """ + Reset the metric. + """ + self.clip_count.zero_() + self.video_preds.zero_() + self.video_labels.zero_() + + def update_stats(self, preds, labels, clip_ids): + """ + Collect the predictions from the current batch and perform on-the-flight + summation as ensemble. + Args: + preds (tensor): predictions from the current batch. Dimension is + N x C where N is the batch size and C is the channel size + (num_cls). + labels (tensor): the corresponding labels of the current batch. + Dimension is N. + clip_ids (tensor): clip indexes of the current batch, dimension is + N. + """ + for ind in range(preds.shape[0]): + vid_id = int(clip_ids[ind]) // self.num_clips + self.video_labels[vid_id] = labels[ind] + self.video_preds[vid_id] += preds[ind] + self.clip_count[vid_id] += 1 + + def log_iter_stats(self, cur_iter): + """ + Log the stats. + Args: + cur_iter (int): the current iteration of testing. + """ + eta_sec = self.iter_timer.seconds() * (self.overall_iters - cur_iter) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + stats = { + "split": "test_iter", + "cur_iter": "{}".format(cur_iter + 1), + "eta": eta, + "time_diff": self.iter_timer.seconds(), + } + logging.log_json_stats(stats) + + def iter_tic(self): + self.iter_timer.reset() + + def iter_toc(self): + self.iter_timer.pause() + + def finalize_metrics(self, ks=(1, 5)): + """ + Calculate and log the final ensembled metrics. + ks (tuple): list of top-k values for topk_accuracies. For example, + ks = (1, 5) correspods to top-1 and top-5 accuracy. + """ + if not all(self.clip_count == self.num_clips): + logger.warning( + "clip count {} ~= num clips {}".format( + self.clip_count, self.num_clips + ) + ) + logger.warning(self.clip_count) + + num_topks_correct = metrics.topks_correct( + self.video_preds, self.video_labels, ks + ) + topks = [ + (x / self.video_preds.size(0)) * 100.0 for x in num_topks_correct + ] + assert len({len(ks), len(topks)}) == 1 + stats = {"split": "test_final"} + for k, topk in zip(ks, topks): + stats["top{}_acc".format(k)] = "{:.{prec}f}".format(topk, prec=2) + logging.log_json_stats(stats) + + +class ScalarMeter(object): + """ + A scalar meter uses a deque to track a series of scaler values with a given + window size. It supports calculating the median and average values of the + window, and also supports calculating the global average. + """ + + def __init__(self, window_size): + """ + Args: + window_size (int): size of the max length of the deque. + """ + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + + def reset(self): + """ + Reset the deque. + """ + self.deque.clear() + self.total = 0.0 + self.count = 0 + + def add_value(self, value): + """ + Add a new scalar value to the deque. + """ + self.deque.append(value) + self.count += 1 + self.total += value + + def get_win_median(self): + """ + Calculate the current median value of the deque. + """ + return np.median(self.deque) + + def get_win_avg(self): + """ + Calculate the current average value of the deque. + """ + return np.mean(self.deque) + + def get_global_avg(self): + """ + Calculate the global mean value. + """ + return self.total / self.count + + +class TrainMeter(object): + """ + Measures training stats. + """ + + def __init__(self, epoch_iters, cfg): + """ + Args: + epoch_iters (int): the overall number of iterations of one epoch. + cfg (CfgNode): configs. + """ + self._cfg = cfg + self.epoch_iters = epoch_iters + self.MAX_EPOCH = cfg.SOLVER.MAX_EPOCH * epoch_iters + self.iter_timer = Timer() + self.loss = ScalarMeter(cfg.LOG_PERIOD) + self.loss_total = 0.0 + self.lr = None + # Current minibatch errors (smoothed over a window). + self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD) + self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD) + # Number of misclassified examples. + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + + def reset(self): + """ + Reset the Meter. + """ + self.loss.reset() + self.loss_total = 0.0 + self.lr = None + self.mb_top1_err.reset() + self.mb_top5_err.reset() + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + + def iter_tic(self): + """ + Start to record time. + """ + self.iter_timer.reset() + + def iter_toc(self): + """ + Stop to record time. + """ + self.iter_timer.pause() + + def update_stats(self, top1_err, top5_err, loss, lr, mb_size): + """ + Update the current stats. + Args: + top1_err (float): top1 error rate. + top5_err (float): top5 error rate. + loss (float): loss value. + lr (float): learning rate. + mb_size (int): mini batch size. + """ + # Current minibatch stats + self.mb_top1_err.add_value(top1_err) + self.mb_top5_err.add_value(top5_err) + self.loss.add_value(loss) + self.lr = lr + # Aggregate stats + self.num_top1_mis += top1_err * mb_size + self.num_top5_mis += top5_err * mb_size + self.loss_total += loss * mb_size + self.num_samples += mb_size + + def log_iter_stats(self, cur_epoch, cur_iter): + """ + log the stats of the current iteration. + Args: + cur_epoch (int): the number of current epoch. + cur_iter (int): the number of current iteration. + """ + if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0: + return + eta_sec = self.iter_timer.seconds() * ( + self.MAX_EPOCH - (cur_epoch * self.epoch_iters + cur_iter + 1) + ) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + mem_usage = misc.gpu_mem_usage() + stats = { + "_type": "train_iter", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "iter": "{}/{}".format(cur_iter + 1, self.epoch_iters), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "top1_err": self.mb_top1_err.get_win_median(), + "top5_err": self.mb_top5_err.get_win_median(), + "loss": self.loss.get_win_median(), + "lr": self.lr, + "mem": int(np.ceil(mem_usage)), + } + logging.log_json_stats(stats) + + def log_epoch_stats(self, cur_epoch): + """ + Log the stats of the current epoch. + Args: + cur_epoch (int): the number of current epoch. + """ + eta_sec = self.iter_timer.seconds() * ( + self.MAX_EPOCH - (cur_epoch + 1) * self.epoch_iters + ) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + mem_usage = misc.gpu_mem_usage() + top1_err = self.num_top1_mis / self.num_samples + top5_err = self.num_top5_mis / self.num_samples + avg_loss = self.loss_total / self.num_samples + stats = { + "_type": "train_epoch", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "top1_err": top1_err, + "top5_err": top5_err, + "loss": avg_loss, + "lr": self.lr, + "mem": int(np.ceil(mem_usage)), + } + logging.log_json_stats(stats) + + +class ValMeter(object): + """ + Measures validation stats. + """ + + def __init__(self, max_iter, cfg): + """ + Args: + max_iter (int): the max number of iteration of the current epoch. + cfg (CfgNode): configs. + """ + self._cfg = cfg + self.max_iter = max_iter + self.iter_timer = Timer() + # Current minibatch errors (smoothed over a window). + self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD) + self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD) + # Min errors (over the full val set). + self.min_top1_err = 100.0 + self.min_top5_err = 100.0 + # Number of misclassified examples. + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + + def reset(self): + """ + Reset the Meter. + """ + self.iter_timer.reset() + self.mb_top1_err.reset() + self.mb_top5_err.reset() + self.num_top1_mis = 0 + self.num_top5_mis = 0 + self.num_samples = 0 + + def iter_tic(self): + """ + Start to record time. + """ + self.iter_timer.reset() + + def iter_toc(self): + """ + Stop to record time. + """ + self.iter_timer.pause() + + def update_stats(self, top1_err, top5_err, mb_size): + """ + Update the current stats. + Args: + top1_err (float): top1 error rate. + top5_err (float): top5 error rate. + mb_size (int): mini batch size. + """ + self.mb_top1_err.add_value(top1_err) + self.mb_top5_err.add_value(top5_err) + self.num_top1_mis += top1_err * mb_size + self.num_top5_mis += top5_err * mb_size + self.num_samples += mb_size + + def log_iter_stats(self, cur_epoch, cur_iter): + """ + log the stats of the current iteration. + Args: + cur_epoch (int): the number of current epoch. + cur_iter (int): the number of current iteration. + """ + if (cur_iter + 1) % self._cfg.LOG_PERIOD != 0: + return + eta_sec = self.iter_timer.seconds() * (self.max_iter - cur_iter - 1) + eta = str(datetime.timedelta(seconds=int(eta_sec))) + mem_usage = misc.gpu_mem_usage() + stats = { + "_type": "val_iter", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "iter": "{}/{}".format(cur_iter + 1, self.max_iter), + "time_diff": self.iter_timer.seconds(), + "eta": eta, + "top1_err": self.mb_top1_err.get_win_median(), + "top5_err": self.mb_top5_err.get_win_median(), + "mem": int(np.ceil(mem_usage)), + } + logging.log_json_stats(stats) + + def log_epoch_stats(self, cur_epoch): + """ + Log the stats of the current epoch. + Args: + cur_epoch (int): the number of current epoch. + """ + top1_err = self.num_top1_mis / self.num_samples + top5_err = self.num_top5_mis / self.num_samples + self.min_top1_err = min(self.min_top1_err, top1_err) + self.min_top5_err = min(self.min_top5_err, top5_err) + mem_usage = misc.gpu_mem_usage() + stats = { + "_type": "val_epoch", + "epoch": "{}/{}".format(cur_epoch + 1, self._cfg.SOLVER.MAX_EPOCH), + "time_diff": self.iter_timer.seconds(), + "top1_err": top1_err, + "top5_err": top5_err, + "min_top1_err": self.min_top1_err, + "min_top5_err": self.min_top5_err, + "mem": int(np.ceil(mem_usage)), + } + logging.log_json_stats(stats) diff --git a/datasets/kinetics_tools/transform.py b/datasets/kinetics_tools/transform.py new file mode 100644 index 0000000..7ef1a71 --- /dev/null +++ b/datasets/kinetics_tools/transform.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import math +import numpy as np +import torch + + +def random_short_side_scale_jitter(images, min_size, max_size): + """ + Perform a spatial short scale jittering on the given images. + Args: + images (tensor): images to perform scale jitter. Dimension is + `num frames` x `channel` x `height` x `width`. + min_size (int): the minimal size to scale the frames. + max_size (int): the maximal size to scale the frames. + Returns: + (tensor): the scaled images with dimension of + `num frames` x `channel` x `new height` x `new width`. + """ + size = int(round(np.random.uniform(min_size, max_size))) + + height = images.shape[2] + width = images.shape[3] + if (width <= height and width == size) or ( + height <= width and height == size + ): + return images + new_width = size + new_height = size + if width < height: + new_height = int(math.floor((float(height) / width) * size)) + else: + new_width = int(math.floor((float(width) / height) * size)) + + return torch.nn.functional.interpolate( + images, + size=(new_height, new_width), + mode="bilinear", + align_corners=False, + ) + + +def random_crop(images, size): + """ + Perform random spatial crop on the given images. + Args: + images (tensor): images to perform random crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): the size of height and width to crop on the image. + Returns: + (tensor): cropped images with dimension of + `num frames` x `channel` x `size` x `size`. + """ + if images.shape[2] == size and images.shape[3] == size: + return images + height = images.shape[2] + width = images.shape[3] + y_offset = 0 + if height > size: + y_offset = int(np.random.randint(0, height - size)) + x_offset = 0 + if width > size: + x_offset = int(np.random.randint(0, width - size)) + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + return cropped + + +def horizontal_flip(prob, images): + """ + Perform horizontal flip on the given images. + Args: + prob (float): probility to flip the images. + images (tensor): images to perform horizontal flip, the dimension is + `num frames` x `channel` x `height` x `width`. + Returns: + (tensor): images with dimension of + `num frames` x `channel` x `height` x `width`. + """ + if np.random.uniform() < prob: + images = images.flip((-1)) + return images + + +def uniform_crop(images, size, spatial_idx): + """ + Perform uniform spatial sampling on the images. + Args: + images (tensor): images to perform uniform crop. The dimension is + `num frames` x `channel` x `height` x `width`. + size (int): size of height and weight to crop the images. + spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width + is larger than height. Or 0, 1, or 2 for top, center, and bottom + crop if height is larger than width. + Returns: + cropped (tensor): images with dimension of + `num frames` x `channel` x `size` x `size`. + """ + assert spatial_idx in [0, 1, 2] + height = images.shape[2] + width = images.shape[3] + + y_offset = int(math.ceil((height - size) / 2)) + x_offset = int(math.ceil((width - size) / 2)) + + if height > width: + if spatial_idx == 0: + y_offset = 0 + elif spatial_idx == 2: + y_offset = height - size + else: + if spatial_idx == 0: + x_offset = 0 + elif spatial_idx == 2: + x_offset = width - size + cropped = images[ + :, :, y_offset : y_offset + size, x_offset : x_offset + size + ] + return cropped diff --git a/datasets/kinetics_tools/video_container.py b/datasets/kinetics_tools/video_container.py new file mode 100644 index 0000000..edd75f0 --- /dev/null +++ b/datasets/kinetics_tools/video_container.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +import av + + +def get_video_container(path_to_vid): + """ + Given the path to the video, return the pyav video container. + Args: + path_to_vid (str): patth to the video. + Returns: + container (container): pyav video container. + """ + container = av.open(path_to_vid) + return container diff --git a/datasets/mimetics/download.py b/datasets/mimetics/download.py new file mode 100644 index 0000000..a1dbb8c --- /dev/null +++ b/datasets/mimetics/download.py @@ -0,0 +1,225 @@ +import argparse +import glob +import json +import os +import shutil +import subprocess +import uuid +from collections import OrderedDict + +from joblib import delayed +from joblib import Parallel +import pandas as pd + + +def create_video_folders(dataset, output_dir, tmp_dir): + """Creates a directory for each label name in the dataset.""" + if 'label-name' not in dataset.columns: + this_dir = os.path.join(output_dir, 'test') + if not os.path.exists(this_dir): + os.makedirs(this_dir) + # I should return a dict but ... + return this_dir + if not os.path.exists(output_dir): + os.makedirs(output_dir) + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + label_to_dir = {} + for label_name in dataset['label-name'].unique(): + this_dir = os.path.join(output_dir, label_name) + if not os.path.exists(this_dir): + os.makedirs(this_dir) + label_to_dir[label_name] = this_dir + return label_to_dir + + +def construct_video_filename(row, label_to_dir, trim_format='%06d'): + """Given a dataset row, this function constructs the + output filename for a given video. + """ + basename = '%s_%s_%s.mp4' % (row['video-id'], + trim_format % row['start-time'], + trim_format % row['end-time']) + if not isinstance(label_to_dir, dict): + dirname = label_to_dir + else: + dirname = label_to_dir[row['label-name']] + output_filename = os.path.join(dirname, basename) + return output_filename + + +def download_clip(video_identifier, output_filename, + start_time, end_time, + tmp_dir='/tmp/kinetics', + num_attempts=5, + url_base='https://www.youtube.com/watch?v='): + """Download a video from youtube if exists and is not blocked. + + arguments: + --------- + video_identifier: str + Unique YouTube video identifier (11 characters) + output_filename: str + File path where the video will be stored. + start_time: float + Indicates the begining time in seconds from where the video + will be trimmed. + end_time: float + Indicates the ending time in seconds of the trimmed video. + """ + # Defensive argument checking. + assert isinstance(video_identifier, str), 'video_identifier must be string' + assert isinstance(output_filename, str), 'output_filename must be string' + assert len(video_identifier) == 11, 'video_identifier must have length 11' + + status = False + # Construct command line for getting the direct video link. + tmp_filename = os.path.join(tmp_dir, + '%s.%%(ext)s' % uuid.uuid4()) + command = ['youtube-dl', + '--quiet', '--no-warnings', + '-f', 'mp4', + '-o', '"%s"' % tmp_filename, + '"%s"' % (url_base + video_identifier)] + command = ' '.join(command) + attempts = 0 + while True: + try: + output = subprocess.check_output(command, shell=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + attempts += 1 + if attempts == num_attempts: + return status, err.output + else: + break + + tmp_filename = glob.glob('%s*' % tmp_filename.split('.')[0])[0] + # Construct command to trim the videos (ffmpeg required). + command = ['ffmpeg', + '-i', '"%s"' % tmp_filename, + '-ss', str(start_time), + '-t', str(end_time - start_time), + '-c:v', 'libx264', '-c:a', 'copy', + '-threads', '1', + '-loglevel', 'panic', + '"%s"' % output_filename] + command = ' '.join(command) + try: + output = subprocess.check_output(command, shell=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as err: + return status, err.output + + # Check if the video was successfully saved. + status = os.path.exists(output_filename) + os.remove(tmp_filename) + return status, 'Downloaded' + + +def download_clip_wrapper(row, label_to_dir, trim_format, tmp_dir): + """Wrapper for parallel processing purposes.""" + output_filename = construct_video_filename(row, label_to_dir, + trim_format) + clip_id = os.path.basename(output_filename).split('.mp4')[0] + if os.path.exists(output_filename): + if os.path.getsize(output_filename) > 262: + status = tuple([clip_id, True, 'Exists']) + print (status) + return status + else: + os.remove(output_filename) + + downloaded, log = download_clip(row['video-id'], output_filename, + row['start-time'], row['end-time'], + tmp_dir=tmp_dir) + status = tuple([clip_id, downloaded, log]) + print (status) + return status + + +def parse_kinetics_annotations(input_csv, ignore_is_cc=False): + """Returns a parsed DataFrame. + + arguments: + --------- + input_csv: str + Path to CSV file containing the following columns: + 'YouTube Identifier,Start time,End time,Class label' + + returns: + ------- + dataset: DataFrame + Pandas with the following columns: + 'video-id', 'start-time', 'end-time', 'label-name' + """ + df = pd.read_csv(input_csv) + if 'youtube_id' in df.columns: + columns = OrderedDict([ + ('youtube_id', 'video-id'), + ('time_start', 'start-time'), + ('time_end', 'end-time'), + ('label', 'label-name')]) + df.rename(columns=columns, inplace=True) + if ignore_is_cc: + df = df.loc[:, df.columns.tolist()[:-1]] + return df + + +def main(input_csv, output_dir, + trim_format='%06d', num_jobs=24, tmp_dir='/tmp/kinetics', + drop_duplicates=False): + + # Reading and parsing Kinetics. + dataset = parse_kinetics_annotations(input_csv) + # if os.path.isfile(drop_duplicates): + # print('Attempt to remove duplicates') + # old_dataset = parse_kinetics_annotations(drop_duplicates, + # ignore_is_cc=True) + # df = pd.concat([dataset, old_dataset], axis=0, ignore_index=True) + # df.drop_duplicates(inplace=True, keep=False) + # print(dataset.shape, old_dataset.shape) + # dataset = df + # print(dataset.shape) + + # Creates folders where videos will be saved later. + label_to_dir = create_video_folders(dataset, output_dir, tmp_dir) + + # Download all clips. + if num_jobs == 1: + status_lst = [] + for i, row in dataset.iterrows(): + status_lst.append(download_clip_wrapper(row, label_to_dir, + trim_format, tmp_dir)) + else: + status_lst = Parallel(n_jobs=num_jobs)(delayed(download_clip_wrapper)( + row, label_to_dir, + trim_format, tmp_dir) for i, row in dataset.iterrows()) + + # Clean tmp dir. + shutil.rmtree(tmp_dir) + + # Save download report. + # with open('download_report.json', 'w') as fobj: + # fobj.write(json.dumps(status_lst)) + + +if __name__ == '__main__': + description = 'Helper script for downloading and trimming kinetics videos.' + p = argparse.ArgumentParser(description=description) + p.add_argument('input_csv', type=str, + help=('CSV file containing the following format: ' + 'YouTube Identifier,Start time,End time,Class label')) + p.add_argument('output_dir', type=str, + help='Output directory where videos will be saved.') + p.add_argument('-f', '--trim-format', type=str, default='%06d', + help=('This will be the format for the ' + 'filename of trimmed videos: ' + 'videoid_%0xd(start_time)_%0xd(end_time).mp4')) + p.add_argument('-n', '--num-jobs', type=int, default=24) + p.add_argument('-t', '--tmp-dir', type=str, default='/tmp/kinetics') + p.add_argument('--drop-duplicates', type=str, default='non-existent', + help='Unavailable at the moment') + # help='CSV file of the previous version of Kinetics.') + main(**vars(p.parse_args())) \ No newline at end of file diff --git a/datasets/mimetics/mimetics_v1.0.csv b/datasets/mimetics/mimetics_v1.0.csv new file mode 100644 index 0000000..6f2d7c6 --- /dev/null +++ b/datasets/mimetics/mimetics_v1.0.csv @@ -0,0 +1,714 @@ +label,youtube_id,time_start,time_end,split,is_cc +archery,0zFbEjoGlhc,6,10,test,0 +archery,0zFbEjoGlhc,14,24,test,0 +archery,AHMNZopmQBQ,71,74,test,0 +archery,GZWM9or9AhM,40,44,test,0 +archery,N4WKMa_KKYc,23,24,test,0 +archery,Nru0Tb3f6lg,14,15,test,0 +archery,SHVMWGGAkGQ,104,109,test,0 +archery,SdY0GFiME2c,393,395,test,0 +archery,Wj4-fNnZqIc,3,7,test,0 +archery,aXknIbHLPLM,13,16,test,0 +archery,ci9pKRJWueg,48,56,test,0 +archery,eu3whHZ63Jo,24,26,test,0 +archery,kQIZjktFIWY,165,168,test,0 +archery,qOOs5bnc25g,12,15,test,0 +archery,raooqfayrIQ,50,51,test,0 +archery,rn4TUxN67XY,229,238,test,0 +archery,td7OmZ4YTNs,380,384,test,0 +archery,x58YK75ijjY,21,30,test,0 +archery,zedVxImHkK8,22,23,test,0 +bowling,-xdBnY59MyY,108,112,test,0 +bowling,0Pxoj58NcqI,27,30,test,0 +bowling,5_R9jYis68Y,21,31,test,0 +bowling,9ysdRIode6A,105,108,test,0 +bowling,DrhqyqLiodc,62,64,test,0 +bowling,DrhqyqLiodc,65,66,test,0 +bowling,DrhqyqLiodc,66,69,test,0 +bowling,GeiX1Af9HtI,83,93,test,0 +bowling,HeDQh6MQ8_E,27,30,test,0 +bowling,HpycA1NELVM,123,125,test,0 +bowling,RY7s2xwD5Pk,2,7,test,0 +bowling,TRcCyetKj0o,51,56,test,0 +bowling,a9xyg4iz7KM,3,5,test,0 +brushing hair,37R1c2TOoCY,100,109,test,0 +brushing hair,5CV-aGIS00w,70,75,test,0 +brushing hair,5HfC8ROl_Z8,73,75,test,0 +brushing hair,5r3Z45QAmxI,205,211,test,0 +brushing hair,8FtwAj8XQ7o,206,208,test,0 +brushing hair,A2Pvv6y0jOw,3,12,test,0 +brushing hair,ABBJ5QGrMlI,98,101,test,0 +brushing hair,DuXviwv7VBM,63,70,test,0 +brushing hair,EfVGZBDtC1c,85,92,test,0 +brushing hair,HXldFQk7lv8,126,129,test,0 +brushing hair,JPBjBflm7V4,63,69,test,0 +brushing hair,LlXKCeXsdNg,86,89,test,0 +brushing hair,MNj1VOYinX0,63,73,test,0 +brushing hair,PjUiNabLd04,469,472,test,0 +brushing hair,RXUC8ebzAEI,123,133,test,0 +brushing hair,UbdK6pKQnSQ,110,113,test,0 +brushing hair,ZZheLvdGg38,130,132,test,0 +brushing hair,_HDQfm5f36U,83,93,test,0 +brushing hair,ktaZ7IRhifs,25,30,test,0 +brushing hair,ong17Z8dBVg,101,107,test,0 +brushing teeth,1lukUCL5pS4,91,101,test,0 +brushing teeth,2OYAboBWNRc,59,69,test,0 +brushing teeth,4XlPCuCEfUg,58,60,test,0 +brushing teeth,5CV-aGIS00w,144,147,test,0 +brushing teeth,8FtwAj8XQ7o,120,125,test,0 +brushing teeth,Es0RP4KFhxM,10,13,test,0 +brushing teeth,MNj1VOYinX0,44,47,test,0 +brushing teeth,SZ44v1ejnac,19,29,test,0 +brushing teeth,TBdnqWOtY-o,0,8,test,0 +brushing teeth,_HDQfm5f36U,37,42,test,0 +brushing teeth,a5SMXR1BUL0,10,20,test,0 +brushing teeth,n7ew4qIamfA,47,52,test,0 +brushing teeth,x9I8pjBmX28,20,23,test,0 +brushing teeth,xyXEdjivNDU,51,55,test,0 +brushing teeth,yyTJQN_e1q0,10,17,test,0 +canoeing or kayaking,1Ng5ULurhh4,180,190,test,0 +canoeing or kayaking,89BVtt8r0hQ,25,30,test,0 +canoeing or kayaking,9MKtWBhuexI,11,21,test,0 +canoeing or kayaking,CjXn0c_XvDE,59,61,test,0 +canoeing or kayaking,DbAA2Xb3dcU,50,60,test,0 +canoeing or kayaking,ENzcF7wbvkU,1,6,test,0 +canoeing or kayaking,LUmAuEeKS_M,2,5,test,0 +canoeing or kayaking,T7mhaaJ70u4,13,16,test,0 +canoeing or kayaking,TCIbkyhlPpg,15,25,test,0 +canoeing or kayaking,TGK0SkDTDrE,3,6,test,0 +canoeing or kayaking,c2Cl52b3A7g,5,15,test,0 +canoeing or kayaking,hBEUQlh6PNs,94,96,test,0 +canoeing or kayaking,i1fbSuUV5Nc,39,49,test,0 +canoeing or kayaking,o5soCfdcNdU,0,10,test,0 +catching or throwing baseball,-CM9SDKQRQc,91,93,test,0 +catching or throwing baseball,0QAUWDBvbHc,74,75,test,0 +catching or throwing baseball,BpsJdEQKIGI,105,108,test,0 +catching or throwing baseball,Ej9DeLsFxNs,26,27,test,0 +catching or throwing baseball,FaCkfAZOgOM,40,41,test,0 +catching or throwing baseball,L4lqtPwU5vY,122,123,test,0 +catching or throwing baseball,NAkrjA7GR84,29,31,test,0 +catching or throwing baseball,TRcCyetKj0o,107,112,test,0 +catching or throwing baseball,YREQPADOWnM,128,130,test,0 +catching or throwing baseball,bodvGG0lOFY,76,77,test,0 +catching or throwing baseball,bodvGG0lOFY,118,119,test,0 +catching or throwing baseball,lpZs8sx51tk,28,31,test,0 +catching or throwing baseball,mwRu5ngESZc,26,27,test,0 +catching or throwing baseball,tKS0NvaO0h4,82,85,test,0 +catching or throwing frisbee,-7MNk_seKxo,0,6,test,0 +catching or throwing frisbee,-7MNk_seKxo,30,33,test,0 +catching or throwing frisbee,GdKK42PIcB0,6,10,test,0 +catching or throwing frisbee,GdKK42PIcB0,81,84,test,0 +catching or throwing frisbee,ItllLoYW1Og,28,30,test,0 +catching or throwing frisbee,ItllLoYW1Og,74,76,test,0 +catching or throwing frisbee,P-gHg3cUU2A,19,21,test,0 +catching or throwing frisbee,P-gHg3cUU2A,46,47,test,0 +catching or throwing frisbee,P-gHg3cUU2A,60,64,test,0 +catching or throwing frisbee,RQkSoH9WTks,23,26,test,0 +catching or throwing frisbee,RQkSoH9WTks,29,31,test,0 +catching or throwing frisbee,TazunpSKrq0,25,31,test,0 +catching or throwing frisbee,TazunpSKrq0,73,76,test,0 +catching or throwing frisbee,zS4yOVFTEvM,10,13,test,0 +clean and jerk,AhuE6ybVnh0,353,363,test,0 +clean and jerk,GKbkRz9TtEs,159,169,test,0 +clean and jerk,KHafHqcY0zM,184,187,test,0 +clean and jerk,LZKWWQHg6tI,670,678,test,0 +clean and jerk,O9m7k8GagTA,263,273,test,0 +clean and jerk,PGEtxyw3wog,0,5,test,0 +clean and jerk,RWLqaD9v6N8,204,208,test,0 +clean and jerk,SdY0GFiME2c,410,415,test,0 +clean and jerk,TRcCyetKj0o,202,211,test,0 +clean and jerk,pcBXtl667nI,160,170,test,0 +clean and jerk,qe5KLw8qee0,287,297,test,0 +clean and jerk,rn4TUxN67XY,75,85,test,0 +clean and jerk,xxWzjiEEEyo,36,41,test,0 +cleaning windows,2L0fVqR_xtU,3,13,test,0 +cleaning windows,4pfixAOJK_c,35,45,test,0 +cleaning windows,6QV4jL5lrJI,12,16,test,0 +cleaning windows,NMe-qhhPUTo,156,166,test,0 +cleaning windows,RLkJThRccyc,11,15,test,0 +cleaning windows,VNX0UCoor6M,87,97,test,0 +cleaning windows,XvKLqW2XTSA,44,48,test,0 +cleaning windows,XvKLqW2XTSA,66,72,test,0 +cleaning windows,cPZl3pipfEw,20,25,test,0 +cleaning windows,cPZl3pipfEw,63,73,test,0 +cleaning windows,fhIOg7xMQlA,68,73,test,0 +cleaning windows,lpQQWMFWNAM,0,1,test,0 +cleaning windows,tJVNul1R24o,73,81,test,0 +cleaning windows,twz6DMLXS4U,21,31,test,0 +cleaning windows,xGs73XJkM24,0,2,test,0 +cleaning windows,zNI3Cw58xH8,0,10,test,0 +climbing a rope,29CXH6FI8W4,18,23,test,0 +climbing a rope,3EeU-kSO0bY,64,68,test,0 +climbing a rope,79KsitsRxK4,88,96,test,0 +climbing a rope,F9H7ERFv0n0,0,10,test,0 +climbing a rope,Rxm1Bd10hhw,15,22,test,0 +climbing a rope,TTXE56hW3jw,8,18,test,0 +climbing a rope,UbdK6pKQnSQ,14,16,test,0 +climbing a rope,bz0COpP1iV0,92,96,test,0 +climbing a rope,eDDxKQ9KqFk,12,17,test,0 +climbing a rope,jEPBGRHgTwg,177,180,test,0 +climbing a rope,qe5KLw8qee0,181,184,test,0 +climbing a rope,uP9mY0A_0Ts,21,25,test,0 +climbing a rope,vf-e-dWOxgE,39,49,test,0 +climbing a rope,x8vuJPisFOg,8,15,test,0 +climbing ladder,7u3k9tzCHDw,100,110,test,0 +climbing ladder,CRSVqiLk8OQ,36,46,test,0 +climbing ladder,ETxZ7HjUrhE,4,11,test,0 +climbing ladder,Fii2Pf_UBGA,0,3,test,0 +climbing ladder,Hmo6-E7DYO4,2,8,test,0 +climbing ladder,SdY0GFiME2c,457,461,test,0 +climbing ladder,T3InRU7tizs,6,12,test,0 +climbing ladder,UbdK6pKQnSQ,19,23,test,0 +climbing ladder,bXnIBugOVLs,56,66,test,0 +climbing ladder,g5NWNG4XyS8,76,85,test,0 +climbing ladder,hBEUQlh6PNs,284,286,test,0 +climbing ladder,j1JtNwBg8Fs,5,10,test,0 +climbing ladder,nG7nXhS_Iq8,185,195,test,0 +deadlifting,7xUGM3fJIks,4,6,test,0 +deadlifting,9_UCzNl796M,141,145,test,0 +deadlifting,Mdz5lsRh7pI,144,146,test,0 +deadlifting,Yxzp8o1DdDU,116,120,test,0 +deadlifting,hJn2UPaicZc,8,10,test,0 +deadlifting,ipB9C6YuxUI,0,3,test,0 +deadlifting,ipB9C6YuxUI,7,12,test,0 +deadlifting,jvzsEAhGMAE,325,326,test,0 +deadlifting,o7Aj7Bq8Kos,90,91,test,0 +deadlifting,pjZJCBOGwRU,5,10,test,0 +deadlifting,pn1dt2YD46s,33,35,test,0 +dribbling basketball,3cFRbGaV5-k,4,8,test,0 +dribbling basketball,BT0Q_P1fBlY,380,386,test,0 +dribbling basketball,BdEDEi_LFOA,21,23,test,0 +dribbling basketball,EDLkX44ocqw,5,9,test,0 +dribbling basketball,EYDCiKGpSFs,10,12,test,0 +dribbling basketball,JSIMsw3NrGM,58,63,test,0 +dribbling basketball,L4lqtPwU5vY,87,93,test,0 +dribbling basketball,Mio4-VYusug,6,8,test,0 +dribbling basketball,VcWQTlNOzoU,39,40,test,0 +dribbling basketball,VcWQTlNOzoU,131,133,test,0 +dribbling basketball,ZsMqq2ByFgM,7,13,test,0 +dribbling basketball,cIJd33dNr-4,6,12,test,0 +dribbling basketball,lSxv5M4-8tE,31,39,test,0 +dribbling basketball,o1q2BURthvU,63,73,test,0 +dribbling basketball,pWJ3n5NTgDQ,7,10,test,0 +dribbling basketball,sByIMfW6aI0,20,21,test,0 +dribbling basketball,wILLcjlT58U,1,4,test,0 +dribbling basketball,xv60PJOPVgg,57,58,test,0 +drinking,1LBm1M3DBNY,89,92,test,0 +drinking,5oCZjxPE2dg,60,66,test,0 +drinking,5r3Z45QAmxI,271,276,test,0 +drinking,7by8NBSszwk,19,26,test,0 +drinking,C_zr0Kq_qfg,69,71,test,0 +drinking,EkxyU6gC0zQ,100,104,test,0 +drinking,F_vRbO50J20,190,194,test,0 +drinking,ISAMIkCV5Is,367,370,test,0 +drinking,JV3qQldC1eg,390,391,test,0 +drinking,JV3qQldC1eg,399,402,test,0 +drinking,KgfRK_3hGiY,326,330,test,0 +drinking,LkMG3Ntn-g8,56,60,test,0 +drinking,Oa3GfbRU67E,165,167,test,0 +drinking,PZxs-3Mfnr8,120,122,test,0 +drinking,Q6g6dU0IgBU,5,10,test,0 +drinking,TFSIm3Zeecg,53,55,test,0 +drinking,TGhDWDW7WhY,562,567,test,0 +drinking,UinTEeZHWas,28,33,test,0 +drinking,bM3Dqa0WX_g,15,16,test,0 +drinking,cRsvdcmXk8M,5,9,test,0 +drinking,ftRFY6vUIEw,235,240,test,0 +drinking,gFhirwb2vtk,71,74,test,0 +drinking,gYZbufD61Og,85,92,test,0 +drinking,i1x9qnLy3Ng,27,29,test,0 +drinking,n7ew4qIamfA,53,55,test,0 +drinking,svY2E5YUUxc,49,59,test,0 +drinking,xQs2G_09NxU,65,67,test,0 +driving car,0eVaGBh2aYg,50,60,test,0 +driving car,0uLKTeX6HjA,1,4,test,0 +driving car,FPMBV3rd_hI,77,87,test,0 +driving car,GHahcY5ew3o,1,5,test,0 +driving car,MFWgsGsKGMQ,182,188,test,0 +driving car,Ous_8Cm7G1s,213,223,test,0 +driving car,QRiAY_889YM,128,138,test,0 +driving car,SrS39FDK3HY,28,35,test,0 +driving car,aGZL2rw-qtk,30,40,test,0 +driving car,aJd5kfrA0Vs,341,351,test,0 +driving car,bkzsoJf-UsA,59,61,test,0 +driving car,cOZSG3wYTLo,64,72,test,0 +driving car,gYZbufD61Og,130,132,test,0 +driving car,lIrkASGtoz8,38,46,test,0 +driving car,m-H7X-deSTk,216,219,test,0 +driving car,mm1HgQbXFEE,100,106,test,0 +dunking basketball,ElJQ_h3Y-0U,34,37,test,0 +dunking basketball,VcWQTlNOzoU,134,137,test,0 +dunking basketball,_sKrWYJPc2g,28,33,test,0 +dunking basketball,duC8ZXFfo9k,71,73,test,0 +dunking basketball,duC8ZXFfo9k,120,124,test,0 +dunking basketball,eMXo3QIsFQo,178,180,test,0 +dunking basketball,ucaR1j6Ajs8,9,11,test,0 +dunking basketball,v0fHvZxWUqk,0,2,test,0 +dunking basketball,v0fHvZxWUqk,4,6,test,0 +dunking basketball,v0fHvZxWUqk,8,10,test,0 +eating cake,1LBm1M3DBNY,76,86,test,0 +eating cake,89vQjjd5M70,129,138,test,0 +eating cake,COppfZLpk-8,65,75,test,0 +eating cake,CmwzhtJ98tk,69,71,test,0 +eating cake,FPMBV3rd_hI,3,10,test,0 +eating cake,LlXKCeXsdNg,177,183,test,0 +eating cake,M5-HKclj15U,42,52,test,0 +eating cake,N4WKMa_KKYc,1,3,test,0 +eating cake,PZxs-3Mfnr8,210,213,test,0 +eating cake,PjUiNabLd04,623,625,test,0 +eating cake,RWLqaD9v6N8,140,146,test,0 +eating cake,TdGVBQbBTbo,11,18,test,0 +eating cake,c-DsvJAjK0w,50,57,test,0 +eating cake,cLqHYQY3Sjo,52,56,test,0 +eating cake,ftRFY6vUIEw,140,150,test,0 +eating cake,kGpDLyc59ik,190,200,test,0 +eating cake,kGpDLyc59ik,320,330,test,0 +eating cake,uPCzjHPS--U,70,72,test,0 +eating cake,zvPgORDt900,36,46,test,0 +eating ice cream,37R1c2TOoCY,146,151,test,0 +eating ice cream,53SXVzTpCjg,37,45,test,0 +eating ice cream,5bAFqsG37C0,0,8,test,0 +eating ice cream,EUHjIrtQvms,2,8,test,0 +eating ice cream,EjVnQyhz8uo,202,210,test,0 +eating ice cream,LAwv-gU3lqE,55,57,test,0 +eating ice cream,N3coivBwaqc,50,52,test,0 +eating ice cream,QDs8zNsJVUI,0,6,test,0 +eating ice cream,T2gTIworsLk,49,50,test,0 +eating ice cream,X2pmhFCUDJw,95,100,test,0 +eating ice cream,yVMqtk4MDj8,9,12,test,0 +flying kite,-fiZ0aXnDX8,156,166,test,0 +flying kite,Kc7a47NaUrU,1,9,test,0 +flying kite,MbmFcjcGf8w,196,203,test,0 +flying kite,NjaSSRcBgus,85,95,test,0 +flying kite,OXg_ybIJYuE,48,57,test,0 +flying kite,VwDR7DvX4PE,10,20,test,0 +flying kite,_JYwMtF5UzA,0,10,test,0 +flying kite,eF5Yv3U1UaY,150,153,test,0 +flying kite,sLXBT10xg8w,28,38,test,0 +flying kite,xwOp2GJ3kDI,234,239,test,0 +golf driving,-vEG9b1osjk,0,2,test,0 +golf driving,B281ZM_guy4,441,446,test,0 +golf driving,B2HcgdVgAYE,17,22,test,0 +golf driving,CjXn0c_XvDE,44,51,test,0 +golf driving,D4p5RrIMcBo,48,53,test,0 +golf driving,JiH47aX9sqU,17,23,test,0 +golf driving,LZKWWQHg6tI,608,612,test,0 +golf driving,SLQOMHEeURM,180,184,test,0 +golf driving,TRcCyetKj0o,228,236,test,0 +golf driving,eMXo3QIsFQo,100,104,test,0 +golf driving,enqzVO7JoK4,35,36,test,0 +golf driving,htZoKnl5OE4,13,18,test,0 +golf driving,mW2ZS9Y2m8E,8,10,test,0 +golf driving,nEkB5deP0T8,117,122,test,0 +golf driving,vA0v6Xp7CIU,105,112,test,0 +golf driving,zLr8OGVDTFA,5,8,test,0 +hitting baseball,-CM9SDKQRQc,150,152,test,0 +hitting baseball,0xA0SB7uFcc,19,20,test,0 +hitting baseball,IL_G87_XyxU,81,83,test,0 +hitting baseball,NAkrjA7GR84,112,115,test,0 +hitting baseball,OG4GyiHdRAI,75,76,test,0 +hitting baseball,S64EAB8haMw,15,19,test,0 +hitting baseball,TRcCyetKj0o,115,116,test,0 +hitting baseball,YvKce9m1z7g,4,6,test,0 +hitting baseball,ZMJixoSYf8Q,45,47,test,0 +hitting baseball,bodvGG0lOFY,78,81,test,0 +hitting baseball,eMXo3QIsFQo,114,116,test,0 +hitting baseball,hvD1Jypn7cg,108,110,test,0 +hitting baseball,lpZs8sx51tk,15,17,test,0 +hitting baseball,n98HEowTnkQ,11,12,test,0 +hitting baseball,x3QNv54PtlE,17,20,test,0 +hurdling,3hxZTxU0Svw,0,2,test,0 +hurdling,HHvryR3MtRg,0,5,test,0 +hurdling,QK87X-9J-ps,0,4,test,0 +hurdling,Wy7qUxpzmzM,4,10,test,0 +hurdling,bawjQioW0cM,122,129,test,0 +hurdling,lhA3Mxwy8xg,54,55,test,0 +hurdling,ov38wjek-ok,0,10,test,0 +hurdling,svnmmn-sYTs,0,3,test,0 +hurdling,wZXgTlk-rak,0,10,test,0 +hurdling,xtIw4-LOSKw,0,8,test,0 +juggling balls,BRtsvUVpHf4,164,169,test,0 +juggling balls,CytOsGVQyKg,26,27,test,0 +juggling balls,E-Bnewo247E,4,5,test,0 +juggling balls,EJqkPjB6EpQ,74,76,test,0 +juggling balls,WzgnBOXs9Wo,0,2,test,0 +juggling balls,XrTg8mq_geo,4,5,test,0 +juggling balls,aoNi9Im_Iio,134,136,test,0 +juggling balls,dS3Hc-5o9Z8,26,31,test,0 +juggling balls,dbPOVCeb8dU,351,353,test,0 +juggling balls,j5tSDS91AcM,104,108,test,0 +juggling balls,kr8KU7N93dw,3,6,test,0 +juggling balls,u2rLpICCatI,57,62,test,0 +juggling soccer ball,228yqOPCJaY,14,16,test,0 +juggling soccer ball,3YUTULgA-Hc,36,37,test,0 +juggling soccer ball,58RrWSDBsNc,281,283,test,0 +juggling soccer ball,80C5dnlGtiY,4,9,test,0 +juggling soccer ball,80C5dnlGtiY,21,25,test,0 +juggling soccer ball,9AbZIwq7qJU,0,3,test,0 +juggling soccer ball,ArGXhyriIzs,6,12,test,0 +juggling soccer ball,JuGpEUvXEW0,11,15,test,0 +juggling soccer ball,M9rgnIoZQTo,12,15,test,0 +juggling soccer ball,M9rgnIoZQTo,15,17,test,0 +juggling soccer ball,MuHA3dDr5JI,8,10,test,0 +juggling soccer ball,MuHA3dDr5JI,13,14,test,0 +juggling soccer ball,OLTMQBerToU,64,68,test,0 +juggling soccer ball,ZxyVh5NlOF8,26,28,test,0 +juggling soccer ball,eMXo3QIsFQo,138,139,test,0 +juggling soccer ball,j11ARKpK-8Q,20,22,test,0 +juggling soccer ball,pOHZlLytOQw,70,73,test,0 +juggling soccer ball,pOHZlLytOQw,170,180,test,0 +opening bottle,1LBm1M3DBNY,87,89,test,0 +opening bottle,1lukUCL5pS4,146,151,test,0 +opening bottle,7by8NBSszwk,4,5,test,0 +opening bottle,C_zr0Kq_qfg,65,67,test,0 +opening bottle,JV3qQldC1eg,375,379,test,0 +opening bottle,LkMG3Ntn-g8,54,56,test,0 +opening bottle,gFhirwb2vtk,58,68,test,0 +opening bottle,s6psOZC5UE8,90,100,test,0 +opening bottle,xQs2G_09NxU,64,65,test,0 +playing accordion,8_Ex8pZgeAY,50,60,test,0 +playing accordion,9CtKGiqhCoA,23,27,test,0 +playing accordion,HNNBPNQwh8E,8,10,test,0 +playing accordion,HrF_3NMGU2c,2,4,test,0 +playing accordion,J72oxMhjf1Y,37,41,test,0 +playing accordion,LfceW5releU,2,4,test,0 +playing accordion,USAL4OpwJ-w,20,30,test,0 +playing accordion,eeC8u7osjsU,0,8,test,0 +playing accordion,hFsTEHai5Zg,0,10,test,0 +playing accordion,s--xTEYCHUo,90,100,test,0 +playing accordion,vDonJJgnI0w,10,20,test,0 +playing basketball,7zzLN3ZNQEI,0,10,test,0 +playing basketball,JwkAbfty5p8,19,25,test,0 +playing basketball,L4lqtPwU5vY,81,86,test,0 +playing basketball,L4lqtPwU5vY,95,96,test,0 +playing basketball,RVlKVMy-oxI,178,181,test,0 +playing basketball,YfTcVAUWc74,1,3,test,0 +playing basketball,eMXo3QIsFQo,84,89,test,0 +playing basketball,g7qQK6Qxi3U,12,17,test,0 +playing basketball,gW2qYxCB-0g,4,6,test,0 +playing basketball,lhuGcGu3-7E,3,13,test,0 +playing basketball,nrlWz4aIkRI,0,3,test,0 +playing basketball,vNUcBRjaboo,22,27,test,0 +playing basketball,xNeghXu6ZS4,2,7,test,0 +playing basketball,xv60PJOPVgg,3,9,test,0 +playing bass guitar,-0dNIgdbSX8,0,7,test,0 +playing bass guitar,A4nytLns0_M,72,76,test,0 +playing bass guitar,D0ntNYx8Zis,28,38,test,0 +playing bass guitar,Fhd0n8xXTSA,0,10,test,0 +playing bass guitar,GkrtgWwIKU0,1,11,test,0 +playing bass guitar,dThVRcijxZQ,10,20,test,0 +playing bass guitar,hK-F1ILPaAk,1,10,test,0 +playing bass guitar,kR8zkH-35kk,173,175,test,0 +playing bass guitar,mGlm3QKBvD8,85,86,test,0 +playing bass guitar,oAU0Pb-74RU,41,51,test,0 +playing bass guitar,pbCEaGyf-Rs,12,22,test,0 +playing bass guitar,wROQoqSDHV0,11,13,test,0 +playing bass guitar,yx_zMTYkoes,34,37,test,0 +playing guitar,8ayeoP9Q-eY,35,40,test,0 +playing guitar,BjPzEu7Phuc,2,4,test,0 +playing guitar,E5MuJVnH8Hg,23,25,test,0 +playing guitar,G_3kwIzNN3g,50,57,test,0 +playing guitar,HXldFQk7lv8,170,172,test,0 +playing guitar,NjytEC7-6Z0,43,48,test,0 +playing guitar,R1dW8M4EqYY,156,162,test,0 +playing guitar,TTtswoWkGQU,149,153,test,0 +playing guitar,WQ8E-AVgcUw,20,30,test,0 +playing guitar,XmoOJuF2kZ8,314,318,test,0 +playing guitar,d0PB6JKaBro,22,30,test,0 +playing guitar,ixBkTYPG82g,15,22,test,0 +playing guitar,j8lFuauKYlg,20,25,test,0 +playing guitar,jJPCbVsrQbw,0,2,test,0 +playing guitar,kFdtIX78qn8,65,68,test,0 +playing guitar,v3K8cu6dT9s,20,30,test,0 +playing guitar,yh1JjC75YIc,9,11,test,0 +playing guitar,zbx22vdGsiI,168,170,test,0 +playing piano,2je914Qrjzw,20,30,test,0 +playing piano,4K7T7XNwkOs,10,20,test,0 +playing piano,5scu_DSFGcs,15,17,test,0 +playing piano,7ulJtKGSNvg,25,35,test,0 +playing piano,I79RUT2mWFc,20,24,test,0 +playing piano,L2-iSLXXUjc,52,58,test,0 +playing piano,SoVAjHzeduo,5,15,test,0 +playing piano,TdX-4k1C0FE,25,30,test,0 +playing piano,cmM4OrWuj_M,2,10,test,0 +playing piano,m-H7X-deSTk,12,17,test,0 +playing piano,oiGd1Q7mm-w,70,80,test,0 +playing piano,soxQzU7uERg,55,60,test,0 +playing piano,trFxg5SOSvI,10,20,test,0 +playing piano,vg8js-NANDo,20,30,test,0 +playing piano,yAX_BfEjH0c,15,25,test,0 +playing piano,yTWTsSIykcg,0,10,test,0 +playing piano,yZurS3ExCPk,27,37,test,0 +playing saxophone,3gXpyjOawoM,25,32,test,0 +playing saxophone,6gHz-cxfeQ8,1,6,test,0 +playing saxophone,7LTKAifeIgQ,5,15,test,0 +playing saxophone,9qVfAdeiq0A,20,27,test,0 +playing saxophone,J2zoY_UfNlQ,5,15,test,0 +playing saxophone,RKCpr9cZUBQ,15,20,test,0 +playing saxophone,_wxtgv2k1c0,107,112,test,0 +playing saxophone,ixBkTYPG82g,48,53,test,0 +playing saxophone,kfRWbynDGu8,96,103,test,0 +playing saxophone,oaNiS_tHfbE,35,39,test,0 +playing saxophone,ouDw29KY3UM,11,15,test,0 +playing saxophone,teRBvK5qvXE,10,15,test,0 +playing saxophone,uJ38b0KykWI,15,20,test,0 +playing tennis,1-v_nFabl1M,14,20,test,0 +playing tennis,1uO6_0Ii-gw,76,78,test,0 +playing tennis,Gc6t9fuZUKY,552,559,test,0 +playing tennis,I0wv7LKnkSw,19,23,test,0 +playing tennis,KukISaZBhcI,50,54,test,0 +playing tennis,L4lqtPwU5vY,70,80,test,0 +playing tennis,MykK0N1WFN0,170,176,test,0 +playing tennis,RVlKVMy-oxI,143,147,test,0 +playing tennis,TRcCyetKj0o,170,172,test,0 +playing tennis,Tg9mJRahkBs,70,76,test,0 +playing tennis,UqzbgSuea94,0,4,test,0 +playing tennis,XAZML7RXsww,111,114,test,0 +playing tennis,Z2_P2eLs-oY,105,114,test,0 +playing tennis,af_KqWyh2fE,55,64,test,0 +playing tennis,gsdJUGBoKdk,25,32,test,0 +playing tennis,rUn1mjk3LhE,15,18,test,0 +playing tennis,u27ZpuwQ7Qw,27,32,test,0 +playing tennis,u4jWPS4uQb4,2,5,test,0 +playing tennis,uGYF0cohpPo,17,20,test,0 +playing trumpet,AXJjDX7f4U8,47,52,test,0 +playing trumpet,Fn866-mNriY,22,32,test,0 +playing trumpet,G7UveGPjZR8,0,6,test,0 +playing trumpet,Of2AffIeV58,3,10,test,0 +playing trumpet,VSwUuE4Mb7k,8,12,test,0 +playing trumpet,XVQvmod4svI,12,15,test,0 +playing trumpet,Zxp7_IMUTRQ,8,15,test,0 +playing trumpet,f-A2Wa8jR7s,5,8,test,0 +playing trumpet,fG8aSj5ttKg,0,9,test,0 +playing trumpet,mePrNsy72Yg,0,10,test,0 +playing trumpet,ow-WRihXwm0,3,13,test,0 +playing trumpet,qbK-l1kJaWo,0,4,test,0 +playing trumpet,yKwAi-1opzU,23,26,test,0 +playing trumpet,z6woHuuwl1o,0,10,test,0 +playing violin,1LBm1M3DBNY,30,40,test,0 +playing violin,AM72BbbXH4w,55,60,test,0 +playing violin,GgOXsW06EYE,3,10,test,0 +playing violin,KM9jkQlR51E,329,334,test,0 +playing violin,L71DUOPJn88,136,138,test,0 +playing violin,Lci2M22ywSg,0,10,test,0 +playing violin,PTCVfbDY0RM,7,10,test,0 +playing violin,SM6vDMcLHiI,112,115,test,0 +playing violin,TyqATpi_knw,115,119,test,0 +playing violin,U4SUGeh27RA,6,8,test,0 +playing violin,hQmoQOJZqok,50,56,test,0 +playing violin,kfvNRCtoyZc,20,28,test,0 +playing violin,natexREv6Uo,40,50,test,0 +playing violin,q-rGl0v0KI8,30,35,test,0 +playing violin,qNJINPPOZC0,19,29,test,0 +playing violin,scZvLstzkig,0,10,test,0 +playing violin,tJVNul1R24o,10,15,test,0 +playing violin,vYTbXgjqWTw,2,7,test,0 +playing violin,wo4t9PKxGhI,7,13,test,0 +playing violin,zbx22vdGsiI,176,181,test,0 +playing volleyball,4mW-7bipojM,72,79,test,0 +playing volleyball,84GzqqaUiVc,0,7,test,0 +playing volleyball,Fz97Mmtozcg,14,16,test,0 +playing volleyball,GVhUVhlJYDM,35,44,test,0 +playing volleyball,HOHxR8_CqEQ,8,15,test,0 +playing volleyball,L4lqtPwU5vY,149,158,test,0 +playing volleyball,Mt4P2qJzSZs,0,3,test,0 +playing volleyball,P5CYaw5mEM0,105,108,test,0 +playing volleyball,TRcCyetKj0o,77,79,test,0 +playing volleyball,TrrI4cFOYEk,6,9,test,0 +playing volleyball,eGDD8GeE0qo,34,44,test,0 +playing volleyball,feX4zhOfjX4,90,100,test,0 +playing volleyball,uiup9wdVzw0,20,24,test,0 +punching person (boxing),9qUKSCHQ3z0,57,59,test,0 +punching person (boxing),FUIqsc5hMMQ,182,184,test,0 +punching person (boxing),KC7P1KVZUC0,55,62,test,0 +punching person (boxing),PW5e01acttw,172,173,test,0 +punching person (boxing),PuQDdeQxejw,1,5,test,0 +punching person (boxing),SHVMWGGAkGQ,96,100,test,0 +punching person (boxing),SdY0GFiME2c,396,399,test,0 +punching person (boxing),TGhDWDW7WhY,164,166,test,0 +punching person (boxing),ZQAzZN4uRM4,269,271,test,0 +punching person (boxing),bOkmovhjL2M,34,37,test,0 +punching person (boxing),dGFYRCvR_II,197,199,test,0 +punching person (boxing),gYZbufD61Og,214,216,test,0 +punching person (boxing),jz7diIi-jY8,27,29,test,0 +punching person (boxing),wPwRnIwXWNA,171,175,test,0 +punching person (boxing),xJUW5S38h14,54,61,test,0 +punching person (boxing),zl5qNMwJ_jU,120,121,test,0 +reading book,-CY1TkCMwjM,11,18,test,0 +reading book,11EMLAs8D_A,410,414,test,0 +reading book,KSFnKPS3Ol8,64,68,test,0 +reading book,L3yIVkdL43E,30,35,test,0 +reading book,L71DUOPJn88,242,245,test,0 +reading book,S-E3rlSsrz0,23,28,test,0 +reading book,cLqHYQY3Sjo,33,38,test,0 +reading book,ftRFY6vUIEw,59,62,test,0 +reading book,ok3F7541Ews,80,82,test,0 +reading book,qdznL4VyV10,33,38,test,0 +reading newspaper,-CY1TkCMwjM,41,51,test,0 +reading newspaper,0PwWxpWlC-U,12,20,test,0 +reading newspaper,62Sc2tcK7xc,6,11,test,0 +reading newspaper,COppfZLpk-8,50,60,test,0 +reading newspaper,JV3qQldC1eg,296,302,test,0 +reading newspaper,UDuEHi7yrZ8,550,560,test,0 +reading newspaper,Xp1oE4WvPuA,75,85,test,0 +reading newspaper,aTSVeNeJxog,51,57,test,0 +reading newspaper,gw2pK33hEWw,85,87,test,0 +reading newspaper,oxmXnucXlMQ,150,154,test,0 +shooting basketball,BT0Q_P1fBlY,387,390,test,0 +shooting basketball,GyjPg9e1lvs,85,88,test,0 +shooting basketball,L4lqtPwU5vY,93,94,test,0 +shooting basketball,PzzMKgMU02M,27,28,test,0 +shooting basketball,SdY0GFiME2c,450,452,test,0 +shooting basketball,Sjmcire2g88,141,144,test,0 +shooting basketball,XvpuGCv9hyo,29,30,test,0 +shooting basketball,XvpuGCv9hyo,42,43,test,0 +shooting basketball,YfTcVAUWc74,4,6,test,0 +shooting basketball,lMucgdCqsqw,19,22,test,0 +shooting basketball,lSxv5M4-8tE,40,41,test,0 +shooting basketball,nd4TpqEJwEs,133,135,test,0 +shooting basketball,nrlWz4aIkRI,3,4,test,0 +shooting basketball,o1q2BURthvU,51,55,test,0 +shooting basketball,o1q2BURthvU,151,152,test,0 +shooting basketball,sByIMfW6aI0,9,11,test,0 +shooting basketball,wILLcjlT58U,8,10,test,0 +shooting basketball,xv60PJOPVgg,25,27,test,0 +shooting basketball,zXem1xEiy-E,8,10,test,0 +shooting goal (soccer),L4lqtPwU5vY,48,50,test,0 +shooting goal (soccer),MuHA3dDr5JI,10,11,test,0 +shooting goal (soccer),Pk2OTQENWro,7,11,test,0 +shooting goal (soccer),RVlKVMy-oxI,240,243,test,0 +shooting goal (soccer),Y22Tr4_naSk,37,39,test,0 +shooting goal (soccer),Z2_P2eLs-oY,94,95,test,0 +shooting goal (soccer),bFzzcjXUnD8,2,5,test,0 +shooting goal (soccer),dbn_Nc3G8jA,4,6,test,0 +shooting goal (soccer),eMXo3QIsFQo,140,143,test,0 +shooting goal (soccer),ffBqvOzUU_A,88,90,test,0 +shooting goal (soccer),j11ARKpK-8Q,67,69,test,0 +shooting goal (soccer),j11ARKpK-8Q,82,86,test,0 +shooting goal (soccer),vlxFyd_hi7I,65,68,test,0 +shooting goal (soccer),vlxFyd_hi7I,90,93,test,0 +skiing (not slalom or crosscountry),CGznm4YKiVU,205,206,test,0 +skiing (not slalom or crosscountry),L-1AdOARfRU,90,92,test,0 +skiing (not slalom or crosscountry),PU0-dmClrFY,11,21,test,0 +skiing (not slalom or crosscountry),TRcCyetKj0o,184,187,test,0 +skiing (not slalom or crosscountry),UV6cW4NoVhw,18,28,test,0 +skiing (not slalom or crosscountry),XmoOJuF2kZ8,113,115,test,0 +skiing (not slalom or crosscountry),dNmYajS1Gng,50,59,test,0 +skiing (not slalom or crosscountry),j734kxPTeD8,240,246,test,0 +skiing (not slalom or crosscountry),yk7yXdzabxY,27,30,test,0 +skiing (not slalom or crosscountry),zseV_Vxy1xg,242,246,test,0 +skiing slalom,0TuF-RqBhWs,137,147,test,0 +skiing slalom,HxOm8k6PVb8,35,45,test,0 +skiing slalom,KISD1YRTeCk,95,105,test,0 +skiing slalom,Q4vDfKGz52Y,5,10,test,0 +skiing slalom,SdY0GFiME2c,75,81,test,0 +skiing slalom,iB3sn6RRKME,0,8,test,0 +skiing slalom,j734kxPTeD8,257,267,test,0 +skiing slalom,ogNC_prhefk,0,7,test,0 +skiing slalom,pKnP88kWTKA,1536,1546,test,0 +skiing slalom,xB5O3xFxm5Y,45,55,test,0 +skipping rope,27e57LLAOM0,4,12,test,0 +skipping rope,4PZAs22DgHA,29,39,test,0 +skipping rope,CGMgGwKiHKg,4,14,test,0 +skipping rope,FrSYiqa_eiA,28,38,test,0 +skipping rope,M1qYDjgWF7g,68,78,test,0 +skipping rope,MeIPJuWWznc,42,52,test,0 +skipping rope,XZBs7xlrNAw,15,25,test,0 +skipping rope,ldDONms8lAI,0,6,test,0 +skipping rope,oRGRNNCGB08,0,10,test,0 +skipping rope,vjy05xjkgkg,6,10,test,0 +skipping rope,zvR84iiKECs,13,18,test,0 +skipping rope,zvR84iiKECs,157,161,test,0 +smoking,2BVHPrletXY,208,214,test,0 +smoking,3IKir8rumOs,100,102,test,0 +smoking,AOgag2eGxyE,53,57,test,0 +smoking,COppfZLpk-8,18,20,test,0 +smoking,EkxyU6gC0zQ,136,146,test,0 +smoking,JiYtY-UpV_E,0,10,test,0 +smoking,M5-HKclj15U,90,92,test,0 +smoking,Ous_8Cm7G1s,156,160,test,0 +smoking,PZxs-3Mfnr8,250,254,test,0 +smoking,QHy1zMKfbmY,34,38,test,0 +smoking,RWLqaD9v6N8,44,47,test,0 +smoking,Tc78yPv_ztM,29,31,test,0 +smoking,fYHOAFqdjaY,26,29,test,0 +smoking,gYZbufD61Og,107,117,test,0 +smoking,kGpDLyc59ik,41,51,test,0 +smoking,o4jvz4kSC7E,80,81,test,0 +smoking,oxmXnucXlMQ,165,171,test,0 +smoking,twz6DMLXS4U,302,312,test,0 +smoking,z9r4IWdxW1w,91,94,test,0 +surfing water,05c4TS7obbM,52,53,test,0 +surfing water,LR26Z9ySfXc,51,61,test,0 +surfing water,TRcCyetKj0o,188,192,test,0 +surfing water,bMNS3HYVpa0,5,11,test,0 +surfing water,czIuu4xAJXY,165,171,test,0 +surfing water,eMXo3QIsFQo,212,216,test,0 +surfing water,fKtaBkCHwpU,5,7,test,0 +surfing water,iHxLX1M7v2Q,865,868,test,0 +surfing water,qgHpb__5AIA,30,36,test,0 +surfing water,zVk-hIYzOXY,114,119,test,0 +sweeping floor,E8JBLVSuOPM,102,106,test,0 +sweeping floor,K1MQX20MHE4,0,10,test,0 +sweeping floor,L4lqtPwU5vY,162,166,test,0 +sweeping floor,Vx0iQQWYaB0,16,18,test,0 +sweeping floor,an1BQc5Nitg,113,118,test,0 +sweeping floor,an1BQc5Nitg,139,142,test,0 +sweeping floor,jxHnpPtlGfM,0,2,test,0 +sweeping floor,qBGgfVdl43w,321,323,test,0 +sweeping floor,tJVNul1R24o,0,5,test,0 +sweeping floor,utUp-BoIErA,91,99,test,0 +sweeping floor,vXLLSsfJgMk,294,299,test,0 +sword fighting,-8wp06ZwKQc,0,4,test,0 +sword fighting,05IGI2SgarU,113,123,test,0 +sword fighting,3_mPgxrywfI,90,91,test,0 +sword fighting,7qFtea79UQE,157,160,test,0 +sword fighting,7qFtea79UQE,160,162,test,0 +sword fighting,FUIqsc5hMMQ,48,53,test,0 +sword fighting,JvfW7e6H-io,43,45,test,0 +sword fighting,MeJZ8NOP4IU,2,5,test,0 +sword fighting,S4E02y5K2Fc,13,19,test,0 +sword fighting,TRcCyetKj0o,304,308,test,0 +sword fighting,UbDd0WJE3LU,0,10,test,0 +sword fighting,XUwYB1vVgK0,106,110,test,0 +sword fighting,hXFx9M9e6Bo,70,75,test,0 +sword fighting,mJ0yvdpCZdc,0,9,test,0 +sword fighting,n7isYwg7ooc,183,186,test,0 +sword fighting,pHtvEIWftR4,15,19,test,0 +sword fighting,tu3JQ2edqA0,29,33,test,0 +tying tie,5CV-aGIS00w,77,79,test,0 +tying tie,8FtwAj8XQ7o,230,240,test,0 +tying tie,COppfZLpk-8,205,208,test,0 +tying tie,JPBjBflm7V4,52,54,test,0 +tying tie,ZsoPVDvWzLA,19,29,test,0 +tying tie,dbPOVCeb8dU,283,284,test,0 +tying tie,ktaZ7IRhifs,20,24,test,0 +tying tie,nPCH31K5ISg,9,13,test,0 +walking the dog,-N5zNXtTF-E,15,18,test,0 +walking the dog,05c4TS7obbM,77,80,test,0 +walking the dog,1lukUCL5pS4,270,277,test,0 +walking the dog,7pkMMm2BhTg,12,17,test,0 +walking the dog,CmwzhtJ98tk,25,31,test,0 +walking the dog,K2Ny4BjAc1o,0,7,test,0 +walking the dog,Kby4miFDaco,30,36,test,0 +walking the dog,KqbXanPwUZg,96,100,test,0 +walking the dog,PVwuEaln_6E,22,32,test,0 +walking the dog,U7d5rZ944Vs,53,54,test,0 +walking the dog,ZTjXEV-mvgY,11,14,test,0 +walking the dog,_qs0XOPOp5U,166,174,test,0 +walking the dog,cGt5BblpKvQ,47,53,test,0 +walking the dog,oxmXnucXlMQ,55,57,test,0 +walking the dog,yMVRfX-cYRk,84,89,test,0 +writing,13UBd2tR_sg,20,21,test,0 +writing,3CHYTZ1Zhug,0,6,test,0 +writing,CX77AwLLDcE,26,28,test,0 +writing,FGM8u4oX9Xw,85,87,test,0 +writing,N4WKMa_KKYc,17,19,test,0 +writing,UOJPlbURmwM,48,49,test,0 +writing,aGZL2rw-qtk,117,127,test,0 +writing,prstk5Ol85U,10,11,test,0 +writing,rDr3LnYDK7g,110,111,test,0 +writing,rDr3LnYDK7g,113,115,test,0 +writing,rDr3LnYDK7g,120,122,test,0 +writing,x9I8pjBmX28,77,84,test,0 +writing,xu3hDsutNjU,20,30,test,0 diff --git a/datasets/mimetics/mimetics_v1.0_clsannot.txt b/datasets/mimetics/mimetics_v1.0_clsannot.txt new file mode 100644 index 0000000..f05444a --- /dev/null +++ b/datasets/mimetics/mimetics_v1.0_clsannot.txt @@ -0,0 +1,51 @@ +#label object scene +archery large yes +bowling medium yes +brushing_hair small yes +brushing_teeth small yes +canoeing_or_kayaking large yes +catching_or_throwing_baseball small yes +catching_or_throwing_frisbee small yes +clean_and_jerk large yes +cleaning_windows small yes +climbing_a_rope medium no +climbing_ladder large no +deadlifting large yes +dribbling_basketball medium yes +drinking small no +driving_car large yes +dunking_basketball medium yes +eating_cake small no +eating_ice_cream medium no +flying_kite medium no +golf_driving medium yes +hitting_baseball medium yes +hurdling no yes +juggling_balls small no +juggling_soccer_ball medium no +opening_bottle small no +playing_accordion medium no +playing_basketball medium yes +playing_bass_guitar large no +playing_guitar large no +playing_piano large no +playing_saxophone large no +playing_tennis medium yes +playing_trumpet large no +playing_violin large no +playing_volleyball medium yes +punching_person_(boxing) small yes +reading_book small no +reading_newspaper large no +shooting_basketball medium yes +shooting_goal_(soccer) small yes +skiing_(not_slalom_or_crosscountry) no yes +skiing_slalom no yes +skipping_rope small no +smoking small no +surfing_water large yes +sweeping_floor medium no +sword_fighting medium no +tying_tie small no +walking_the_dog large no +writing small no \ No newline at end of file diff --git a/datasets/mimetics/mimetics_v1.0_objectsceneannot.txt b/datasets/mimetics/mimetics_v1.0_objectsceneannot.txt new file mode 100644 index 0000000..d762c5e --- /dev/null +++ b/datasets/mimetics/mimetics_v1.0_objectsceneannot.txt @@ -0,0 +1,714 @@ +#video object scene mime +0zFbEjoGlhc_000006_000010.mp4 no no no +0zFbEjoGlhc_000014_000024.mp4 no no yes +AHMNZopmQBQ_000071_000074.mp4 no no no +GZWM9or9AhM_000040_000044.mp4 no no yes +N4WKMa_KKYc_000023_000024.mp4 no no no +Nru0Tb3f6lg_000014_000015.mp4 no no no +SHVMWGGAkGQ_000104_000109.mp4 no no no +SdY0GFiME2c_000393_000395.mp4 no no yes +Wj4-fNnZqIc_000003_000007.mp4 no no no +aXknIbHLPLM_000013_000016.mp4 no no no +ci9pKRJWueg_000048_000056.mp4 no no no +eu3whHZ63Jo_000024_000026.mp4 no no no +kQIZjktFIWY_000165_000168.mp4 no no yes +qOOs5bnc25g_000012_000015.mp4 no no no +raooqfayrIQ_000050_000051.mp4 no no no +rn4TUxN67XY_000229_000238.mp4 no no no +td7OmZ4YTNs_000380_000384.mp4 no no yes +x58YK75ijjY_000021_000030.mp4 no no no +zedVxImHkK8_000022_000023.mp4 no no no +-xdBnY59MyY_000108_000112.mp4 no no no +0Pxoj58NcqI_000027_000030.mp4 no no no +5_R9jYis68Y_000021_000031.mp4 no no no +9ysdRIode6A_000105_000108.mp4 no no no +DrhqyqLiodc_000062_000064.mp4 no no no +DrhqyqLiodc_000065_000066.mp4 no no no +DrhqyqLiodc_000066_000069.mp4 no no no +GeiX1Af9HtI_000083_000093.mp4 no no yes +HeDQh6MQ8_E_000027_000030.mp4 no no no +HpycA1NELVM_000123_000125.mp4 no no no +RY7s2xwD5Pk_000002_000007.mp4 no yes no +TRcCyetKj0o_000051_000056.mp4 no no no +a9xyg4iz7KM_000003_000005.mp4 no no no +37R1c2TOoCY_000100_000109.mp4 no no yes +5CV-aGIS00w_000070_000075.mp4 no no yes +5HfC8ROl_Z8_000073_000075.mp4 no no no +5r3Z45QAmxI_000205_000211.mp4 no no yes +8FtwAj8XQ7o_000206_000208.mp4 no no yes +A2Pvv6y0jOw_000003_000012.mp4 no no no +ABBJ5QGrMlI_000098_000101.mp4 no no yes +DuXviwv7VBM_000063_000070.mp4 no yes no +EfVGZBDtC1c_000085_000092.mp4 no no no +HXldFQk7lv8_000126_000129.mp4 no no no +JPBjBflm7V4_000063_000069.mp4 no no yes +LlXKCeXsdNg_000086_000089.mp4 no no no +MNj1VOYinX0_000063_000073.mp4 no yes no +PjUiNabLd04_000469_000472.mp4 no no no +RXUC8ebzAEI_000123_000133.mp4 no no yes +UbdK6pKQnSQ_000110_000113.mp4 no no yes +ZZheLvdGg38_000130_000132.mp4 no no no +_HDQfm5f36U_000083_000093.mp4 no no no +ktaZ7IRhifs_000025_000030.mp4 no no no +ong17Z8dBVg_000101_000107.mp4 no no yes +1lukUCL5pS4_000091_000101.mp4 no yes yes +2OYAboBWNRc_000059_000069.mp4 no no no +4XlPCuCEfUg_000058_000060.mp4 no no no +5CV-aGIS00w_000144_000147.mp4 no no yes +8FtwAj8XQ7o_000120_000125.mp4 no no yes +Es0RP4KFhxM_000010_000013.mp4 no no no +MNj1VOYinX0_000044_000047.mp4 no yes no +SZ44v1ejnac_000019_000029.mp4 no yes no +TBdnqWOtY-o_000000_000008.mp4 no no no +_HDQfm5f36U_000037_000042.mp4 no no no +a5SMXR1BUL0_000010_000020.mp4 no no no +n7ew4qIamfA_000047_000052.mp4 no no no +x9I8pjBmX28_000020_000023.mp4 no no yes +xyXEdjivNDU_000051_000055.mp4 no no yes +yyTJQN_e1q0_000010_000017.mp4 no no no +1Ng5ULurhh4_000180_000190.mp4 no no no +89BVtt8r0hQ_000025_000030.mp4 no no no +9MKtWBhuexI_000011_000021.mp4 no no no +CjXn0c_XvDE_000059_000061.mp4 no no no +DbAA2Xb3dcU_000050_000060.mp4 no no no +ENzcF7wbvkU_000001_000006.mp4 no no no +LUmAuEeKS_M_000002_000005.mp4 no no yes +T7mhaaJ70u4_000013_000016.mp4 no no no +TCIbkyhlPpg_000015_000025.mp4 no no no +TGK0SkDTDrE_000003_000006.mp4 no no no +c2Cl52b3A7g_000005_000015.mp4 no no no +hBEUQlh6PNs_000094_000096.mp4 no no yes +i1fbSuUV5Nc_000039_000049.mp4 no no yes +o5soCfdcNdU_000000_000010.mp4 yes no no +-CM9SDKQRQc_000091_000093.mp4 no yes yes +0QAUWDBvbHc_000074_000075.mp4 no no no +BpsJdEQKIGI_000105_000108.mp4 no yes no +Ej9DeLsFxNs_000026_000027.mp4 no yes no +FaCkfAZOgOM_000040_000041.mp4 no no no +L4lqtPwU5vY_000122_000123.mp4 no no no +NAkrjA7GR84_000029_000031.mp4 no no no +TRcCyetKj0o_000107_000112.mp4 no no no +YREQPADOWnM_000128_000130.mp4 no no no +bodvGG0lOFY_000076_000077.mp4 no yes no +bodvGG0lOFY_000118_000119.mp4 no yes no +lpZs8sx51tk_000028_000031.mp4 no no no +mwRu5ngESZc_000026_000027.mp4 no no yes +tKS0NvaO0h4_000082_000085.mp4 no no no +-7MNk_seKxo_000000_000006.mp4 no no no +-7MNk_seKxo_000030_000033.mp4 no yes no +GdKK42PIcB0_000006_000010.mp4 no no no +GdKK42PIcB0_000081_000084.mp4 no yes no +ItllLoYW1Og_000028_000030.mp4 no no no +ItllLoYW1Og_000074_000076.mp4 no no no +P-gHg3cUU2A_000019_000021.mp4 no yes no +P-gHg3cUU2A_000046_000047.mp4 no yes no +P-gHg3cUU2A_000060_000064.mp4 no yes no +RQkSoH9WTks_000023_000026.mp4 no yes no +RQkSoH9WTks_000029_000031.mp4 no yes no +TazunpSKrq0_000025_000031.mp4 no no no +TazunpSKrq0_000073_000076.mp4 no no no +zS4yOVFTEvM_000010_000013.mp4 no no no +AhuE6ybVnh0_000353_000363.mp4 no no yes +GKbkRz9TtEs_000159_000169.mp4 no no no +KHafHqcY0zM_000184_000187.mp4 no yes yes +LZKWWQHg6tI_000670_000678.mp4 no no no +O9m7k8GagTA_000263_000273.mp4 no no yes +PGEtxyw3wog_000000_000005.mp4 no no no +RWLqaD9v6N8_000204_000208.mp4 no no yes +SdY0GFiME2c_000410_000415.mp4 no no yes +TRcCyetKj0o_000202_000211.mp4 no no no +pcBXtl667nI_000160_000170.mp4 no no yes +qe5KLw8qee0_000287_000297.mp4 no no yes +rn4TUxN67XY_000075_000085.mp4 no no no +xxWzjiEEEyo_000036_000041.mp4 no no no +2L0fVqR_xtU_000003_000013.mp4 yes no yes +4pfixAOJK_c_000035_000045.mp4 no no no +6QV4jL5lrJI_000012_000016.mp4 yes no no +NMe-qhhPUTo_000156_000166.mp4 no no yes +RLkJThRccyc_000011_000015.mp4 yes no no +VNX0UCoor6M_000087_000097.mp4 yes no no +XvKLqW2XTSA_000044_000048.mp4 yes no yes +XvKLqW2XTSA_000066_000072.mp4 yes no yes +cPZl3pipfEw_000020_000025.mp4 no no no +cPZl3pipfEw_000063_000073.mp4 no no no +fhIOg7xMQlA_000068_000073.mp4 yes no no +lpQQWMFWNAM_000000_000001.mp4 yes yes no +tJVNul1R24o_000073_000081.mp4 no no no +twz6DMLXS4U_000021_000031.mp4 yes no no +xGs73XJkM24_000000_000002.mp4 yes no no +zNI3Cw58xH8_000000_000010.mp4 yes no no +29CXH6FI8W4_000018_000023.mp4 no no no +3EeU-kSO0bY_000064_000068.mp4 no no yes +79KsitsRxK4_000088_000096.mp4 no no yes +F9H7ERFv0n0_000000_000010.mp4 no no no +Rxm1Bd10hhw_000015_000022.mp4 no no yes +TTXE56hW3jw_000008_000018.mp4 no no no +UbdK6pKQnSQ_000014_000016.mp4 no no no +bz0COpP1iV0_000092_000096.mp4 no no no +eDDxKQ9KqFk_000012_000017.mp4 no no yes +jEPBGRHgTwg_000177_000180.mp4 no no no +qe5KLw8qee0_000181_000184.mp4 no no yes +uP9mY0A_0Ts_000021_000025.mp4 no no no +vf-e-dWOxgE_000039_000049.mp4 no no no +x8vuJPisFOg_000008_000015.mp4 no no no +7u3k9tzCHDw_000100_000110.mp4 no no yes +CRSVqiLk8OQ_000036_000046.mp4 no no yes +ETxZ7HjUrhE_000004_000011.mp4 no no no +Fii2Pf_UBGA_000000_000003.mp4 no no no +Hmo6-E7DYO4_000002_000008.mp4 no no yes +SdY0GFiME2c_000457_000461.mp4 no no yes +T3InRU7tizs_000006_000012.mp4 no no no +UbdK6pKQnSQ_000019_000023.mp4 no no no +bXnIBugOVLs_000056_000066.mp4 no no no +g5NWNG4XyS8_000076_000085.mp4 no no no +hBEUQlh6PNs_000284_000286.mp4 no no yes +j1JtNwBg8Fs_000005_000010.mp4 no no yes +nG7nXhS_Iq8_000185_000195.mp4 no no yes +7xUGM3fJIks_000004_000006.mp4 no no no +9_UCzNl796M_000141_000145.mp4 no no no +Mdz5lsRh7pI_000144_000146.mp4 no no no +Yxzp8o1DdDU_000116_000120.mp4 no no yes +hJn2UPaicZc_000008_000010.mp4 no yes no +ipB9C6YuxUI_000000_000003.mp4 no no no +ipB9C6YuxUI_000007_000012.mp4 no no no +jvzsEAhGMAE_000325_000326.mp4 yes no no +o7Aj7Bq8Kos_000090_000091.mp4 no no no +pjZJCBOGwRU_000005_000010.mp4 no no no +pn1dt2YD46s_000033_000035.mp4 no no no +3cFRbGaV5-k_000004_000008.mp4 no no no +BT0Q_P1fBlY_000380_000386.mp4 no no yes +BdEDEi_LFOA_000021_000023.mp4 no yes no +EDLkX44ocqw_000005_000009.mp4 no no no +EYDCiKGpSFs_000010_000012.mp4 no no no +JSIMsw3NrGM_000058_000063.mp4 no no no +L4lqtPwU5vY_000087_000093.mp4 no no no +Mio4-VYusug_000006_000008.mp4 no yes no +VcWQTlNOzoU_000039_000040.mp4 no no no +VcWQTlNOzoU_000131_000133.mp4 no no no +ZsMqq2ByFgM_000007_000013.mp4 no no no +cIJd33dNr-4_000006_000012.mp4 no no no +lSxv5M4-8tE_000031_000039.mp4 no no no +o1q2BURthvU_000063_000073.mp4 no no yes +pWJ3n5NTgDQ_000007_000010.mp4 no no no +sByIMfW6aI0_000020_000021.mp4 no no no +wILLcjlT58U_000001_000004.mp4 no no no +xv60PJOPVgg_000057_000058.mp4 no yes no +1LBm1M3DBNY_000089_000092.mp4 no no yes +5oCZjxPE2dg_000060_000066.mp4 no no yes +5r3Z45QAmxI_000271_000276.mp4 no no yes +7by8NBSszwk_000019_000026.mp4 no no yes +C_zr0Kq_qfg_000069_000071.mp4 no no yes +EkxyU6gC0zQ_000100_000104.mp4 no no no +F_vRbO50J20_000190_000194.mp4 no no no +ISAMIkCV5Is_000367_000370.mp4 no no no +JV3qQldC1eg_000390_000391.mp4 no yes no +JV3qQldC1eg_000399_000402.mp4 no yes no +KgfRK_3hGiY_000326_000330.mp4 no yes yes +LkMG3Ntn-g8_000056_000060.mp4 no no yes +Oa3GfbRU67E_000165_000167.mp4 no no yes +PZxs-3Mfnr8_000120_000122.mp4 no no yes +Q6g6dU0IgBU_000005_000010.mp4 no no no +TFSIm3Zeecg_000053_000055.mp4 no no yes +TGhDWDW7WhY_000562_000567.mp4 no no yes +UinTEeZHWas_000028_000033.mp4 no no no +bM3Dqa0WX_g_000015_000016.mp4 no no no +cRsvdcmXk8M_000005_000009.mp4 no no no +ftRFY6vUIEw_000235_000240.mp4 no no yes +gFhirwb2vtk_000071_000074.mp4 no no yes +gYZbufD61Og_000085_000092.mp4 no no yes +i1x9qnLy3Ng_000027_000029.mp4 no no no +n7ew4qIamfA_000053_000055.mp4 no no no +svY2E5YUUxc_000049_000059.mp4 no no yes +xQs2G_09NxU_000065_000067.mp4 no no yes +0eVaGBh2aYg_000050_000060.mp4 no no yes +0uLKTeX6HjA_000001_000004.mp4 no yes no +FPMBV3rd_hI_000077_000087.mp4 no no yes +GHahcY5ew3o_000001_000005.mp4 no no yes +MFWgsGsKGMQ_000182_000188.mp4 no no yes +Ous_8Cm7G1s_000213_000223.mp4 no no no +QRiAY_889YM_000128_000138.mp4 no no yes +SrS39FDK3HY_000028_000035.mp4 no no no +aGZL2rw-qtk_000030_000040.mp4 no no no +aJd5kfrA0Vs_000341_000351.mp4 no no yes +bkzsoJf-UsA_000059_000061.mp4 no no no +cOZSG3wYTLo_000064_000072.mp4 no no no +gYZbufD61Og_000130_000132.mp4 no no yes +lIrkASGtoz8_000038_000046.mp4 no no yes +m-H7X-deSTk_000216_000219.mp4 no no yes +mm1HgQbXFEE_000100_000106.mp4 no no no +ElJQ_h3Y-0U_000034_000037.mp4 no yes no +VcWQTlNOzoU_000134_000137.mp4 no no no +_sKrWYJPc2g_000028_000033.mp4 no no no +duC8ZXFfo9k_000071_000073.mp4 no no no +duC8ZXFfo9k_000120_000124.mp4 no no no +eMXo3QIsFQo_000178_000180.mp4 yes yes yes +ucaR1j6Ajs8_000009_000011.mp4 no yes no +v0fHvZxWUqk_000000_000002.mp4 yes no no +v0fHvZxWUqk_000004_000006.mp4 yes no no +v0fHvZxWUqk_000008_000010.mp4 yes no no +1LBm1M3DBNY_000076_000086.mp4 no no yes +89vQjjd5M70_000129_000138.mp4 no no yes +COppfZLpk-8_000065_000075.mp4 no no yes +CmwzhtJ98tk_000069_000071.mp4 no no yes +FPMBV3rd_hI_000003_000010.mp4 no no yes +LlXKCeXsdNg_000177_000183.mp4 no no no +M5-HKclj15U_000042_000052.mp4 no no yes +N4WKMa_KKYc_000001_000003.mp4 no no no +PZxs-3Mfnr8_000210_000213.mp4 no no yes +PjUiNabLd04_000623_000625.mp4 no no no +RWLqaD9v6N8_000140_000146.mp4 no no yes +TdGVBQbBTbo_000011_000018.mp4 no no yes +c-DsvJAjK0w_000050_000057.mp4 no no no +cLqHYQY3Sjo_000052_000056.mp4 no no yes +ftRFY6vUIEw_000140_000150.mp4 no no yes +kGpDLyc59ik_000190_000200.mp4 no no yes +kGpDLyc59ik_000320_000330.mp4 no no yes +uPCzjHPS--U_000070_000072.mp4 no no no +zvPgORDt900_000036_000046.mp4 no no yes +37R1c2TOoCY_000146_000151.mp4 no no yes +53SXVzTpCjg_000037_000045.mp4 no no no +5bAFqsG37C0_000000_000008.mp4 no no no +EUHjIrtQvms_000002_000008.mp4 no no no +EjVnQyhz8uo_000202_000210.mp4 no no no +LAwv-gU3lqE_000055_000057.mp4 no no no +N3coivBwaqc_000050_000052.mp4 no no yes +QDs8zNsJVUI_000000_000006.mp4 no no no +T2gTIworsLk_000049_000050.mp4 no no no +X2pmhFCUDJw_000095_000100.mp4 no no no +yVMqtk4MDj8_000009_000012.mp4 no no yes +-fiZ0aXnDX8_000156_000166.mp4 no no no +Kc7a47NaUrU_000001_000009.mp4 no no no +MbmFcjcGf8w_000196_000203.mp4 no no yes +NjaSSRcBgus_000085_000095.mp4 no no no +OXg_ybIJYuE_000048_000057.mp4 no no no +VwDR7DvX4PE_000010_000020.mp4 no no no +_JYwMtF5UzA_000000_000010.mp4 no no no +eF5Yv3U1UaY_000150_000153.mp4 no no no +sLXBT10xg8w_000028_000038.mp4 no no no +xwOp2GJ3kDI_000234_000239.mp4 no no yes +-vEG9b1osjk_000000_000002.mp4 no no no +B281ZM_guy4_000441_000446.mp4 no no no +B2HcgdVgAYE_000017_000022.mp4 yes no no +CjXn0c_XvDE_000044_000051.mp4 no yes no +D4p5RrIMcBo_000048_000053.mp4 no no no +JiH47aX9sqU_000017_000023.mp4 no no no +LZKWWQHg6tI_000608_000612.mp4 no no no +SLQOMHEeURM_000180_000184.mp4 yes no no +TRcCyetKj0o_000228_000236.mp4 no no no +eMXo3QIsFQo_000100_000104.mp4 yes yes yes +enqzVO7JoK4_000035_000036.mp4 no no no +htZoKnl5OE4_000013_000018.mp4 no no no +mW2ZS9Y2m8E_000008_000010.mp4 no no no +nEkB5deP0T8_000117_000122.mp4 no no no +vA0v6Xp7CIU_000105_000112.mp4 no no no +zLr8OGVDTFA_000005_000008.mp4 no no no +-CM9SDKQRQc_000150_000152.mp4 no yes no +0xA0SB7uFcc_000019_000020.mp4 no yes no +IL_G87_XyxU_000081_000083.mp4 no no no +NAkrjA7GR84_000112_000115.mp4 no no no +OG4GyiHdRAI_000075_000076.mp4 no no no +S64EAB8haMw_000015_000019.mp4 no no no +TRcCyetKj0o_000115_000116.mp4 no no no +YvKce9m1z7g_000004_000006.mp4 no no no +ZMJixoSYf8Q_000045_000047.mp4 yes no no +bodvGG0lOFY_000078_000081.mp4 no yes no +eMXo3QIsFQo_000114_000116.mp4 yes yes yes +hvD1Jypn7cg_000108_000110.mp4 no no no +lpZs8sx51tk_000015_000017.mp4 no no no +n98HEowTnkQ_000011_000012.mp4 no yes no +x3QNv54PtlE_000017_000020.mp4 no yes no +3hxZTxU0Svw_000000_000002.mp4 no no no +HHvryR3MtRg_000000_000005.mp4 no no no +QK87X-9J-ps_000000_000004.mp4 no no no +Wy7qUxpzmzM_000004_000010.mp4 no no no +bawjQioW0cM_000122_000129.mp4 no yes no +lhA3Mxwy8xg_000054_000055.mp4 no no no +ov38wjek-ok_000000_000010.mp4 no no no +svnmmn-sYTs_000000_000003.mp4 no no no +wZXgTlk-rak_000000_000010.mp4 no no no +xtIw4-LOSKw_000000_000008.mp4 no no no +BRtsvUVpHf4_000164_000169.mp4 no no no +CytOsGVQyKg_000026_000027.mp4 no no yes +E-Bnewo247E_000004_000005.mp4 no no yes +EJqkPjB6EpQ_000074_000076.mp4 no no no +WzgnBOXs9Wo_000000_000002.mp4 no no no +XrTg8mq_geo_000004_000005.mp4 no no no +aoNi9Im_Iio_000134_000136.mp4 no no yes +dS3Hc-5o9Z8_000026_000031.mp4 no no yes +dbPOVCeb8dU_000351_000353.mp4 no no no +j5tSDS91AcM_000104_000108.mp4 no no yes +kr8KU7N93dw_000003_000006.mp4 no no no +u2rLpICCatI_000057_000062.mp4 no no no +228yqOPCJaY_000014_000016.mp4 no no no +3YUTULgA-Hc_000036_000037.mp4 no no yes +58RrWSDBsNc_000281_000283.mp4 no no no +80C5dnlGtiY_000004_000009.mp4 no no no +80C5dnlGtiY_000021_000025.mp4 no no no +9AbZIwq7qJU_000000_000003.mp4 no no no +ArGXhyriIzs_000006_000012.mp4 no no no +JuGpEUvXEW0_000011_000015.mp4 no no no +M9rgnIoZQTo_000012_000015.mp4 no no no +M9rgnIoZQTo_000015_000017.mp4 no no no +MuHA3dDr5JI_000008_000010.mp4 no no no +MuHA3dDr5JI_000013_000014.mp4 no no no +OLTMQBerToU_000064_000068.mp4 no no no +ZxyVh5NlOF8_000026_000028.mp4 no no no +eMXo3QIsFQo_000138_000139.mp4 yes yes yes +j11ARKpK-8Q_000020_000022.mp4 no no no +pOHZlLytOQw_000070_000073.mp4 no no no +pOHZlLytOQw_000170_000180.mp4 no no no +1LBm1M3DBNY_000087_000089.mp4 no no yes +1lukUCL5pS4_000146_000151.mp4 no yes yes +7by8NBSszwk_000004_000005.mp4 no no yes +C_zr0Kq_qfg_000065_000067.mp4 no no yes +JV3qQldC1eg_000375_000379.mp4 no yes no +LkMG3Ntn-g8_000054_000056.mp4 no no yes +gFhirwb2vtk_000058_000068.mp4 no no no +s6psOZC5UE8_000090_000100.mp4 no no yes +xQs2G_09NxU_000064_000065.mp4 no no no +8_Ex8pZgeAY_000050_000060.mp4 no no no +9CtKGiqhCoA_000023_000027.mp4 no no no +HNNBPNQwh8E_000008_000010.mp4 no no no +HrF_3NMGU2c_000002_000004.mp4 no no no +J72oxMhjf1Y_000037_000041.mp4 no no no +LfceW5releU_000002_000004.mp4 no no no +USAL4OpwJ-w_000020_000030.mp4 no no no +eeC8u7osjsU_000000_000008.mp4 no no no +hFsTEHai5Zg_000000_000010.mp4 no no no +s--xTEYCHUo_000090_000100.mp4 no no no +vDonJJgnI0w_000010_000020.mp4 no no no +7zzLN3ZNQEI_000000_000010.mp4 no yes no +JwkAbfty5p8_000019_000025.mp4 no yes no +L4lqtPwU5vY_000081_000086.mp4 no no no +L4lqtPwU5vY_000095_000096.mp4 no yes no +RVlKVMy-oxI_000178_000181.mp4 no no no +YfTcVAUWc74_000001_000003.mp4 no no no +eMXo3QIsFQo_000084_000089.mp4 yes yes yes +g7qQK6Qxi3U_000012_000017.mp4 no no no +gW2qYxCB-0g_000004_000006.mp4 no no no +lhuGcGu3-7E_000003_000013.mp4 no yes no +nrlWz4aIkRI_000000_000003.mp4 no no no +vNUcBRjaboo_000022_000027.mp4 no no no +xNeghXu6ZS4_000002_000007.mp4 no no no +xv60PJOPVgg_000003_000009.mp4 no yes no +-0dNIgdbSX8_000000_000007.mp4 no no no +A4nytLns0_M_000072_000076.mp4 no no no +D0ntNYx8Zis_000028_000038.mp4 no no no +Fhd0n8xXTSA_000000_000010.mp4 no no no +GkrtgWwIKU0_000001_000011.mp4 no no no +dThVRcijxZQ_000010_000020.mp4 no no no +hK-F1ILPaAk_000001_000010.mp4 no no no +kR8zkH-35kk_000173_000175.mp4 no no no +mGlm3QKBvD8_000085_000086.mp4 no no no +oAU0Pb-74RU_000041_000051.mp4 no no no +pbCEaGyf-Rs_000012_000022.mp4 no no no +wROQoqSDHV0_000011_000013.mp4 no no no +yx_zMTYkoes_000034_000037.mp4 no no no +8ayeoP9Q-eY_000035_000040.mp4 no no no +BjPzEu7Phuc_000002_000004.mp4 no no no +E5MuJVnH8Hg_000023_000025.mp4 no no yes +G_3kwIzNN3g_000050_000057.mp4 no no no +HXldFQk7lv8_000170_000172.mp4 no no no +NjytEC7-6Z0_000043_000048.mp4 no no no +R1dW8M4EqYY_000156_000162.mp4 no no no +TTtswoWkGQU_000149_000153.mp4 no no no +WQ8E-AVgcUw_000020_000030.mp4 no no no +XmoOJuF2kZ8_000314_000318.mp4 no no no +d0PB6JKaBro_000022_000030.mp4 no no no +ixBkTYPG82g_000015_000022.mp4 no no no +j8lFuauKYlg_000020_000025.mp4 no no yes +jJPCbVsrQbw_000000_000002.mp4 no no no +kFdtIX78qn8_000065_000068.mp4 no no no +v3K8cu6dT9s_000020_000030.mp4 no no no +yh1JjC75YIc_000009_000011.mp4 no no no +zbx22vdGsiI_000168_000170.mp4 no no yes +2je914Qrjzw_000020_000030.mp4 no no yes +4K7T7XNwkOs_000010_000020.mp4 no no no +5scu_DSFGcs_000015_000017.mp4 no no no +7ulJtKGSNvg_000025_000035.mp4 no no no +I79RUT2mWFc_000020_000024.mp4 no no yes +L2-iSLXXUjc_000052_000058.mp4 no no yes +SoVAjHzeduo_000005_000015.mp4 no no no +TdX-4k1C0FE_000025_000030.mp4 no yes no +cmM4OrWuj_M_000002_000010.mp4 no no no +m-H7X-deSTk_000012_000017.mp4 no no yes +oiGd1Q7mm-w_000070_000080.mp4 no no no +soxQzU7uERg_000055_000060.mp4 no no no +trFxg5SOSvI_000010_000020.mp4 no no no +vg8js-NANDo_000020_000030.mp4 no no no +yAX_BfEjH0c_000015_000025.mp4 no no no +yTWTsSIykcg_000000_000010.mp4 no no no +yZurS3ExCPk_000027_000037.mp4 no no no +3gXpyjOawoM_000025_000032.mp4 no no no +6gHz-cxfeQ8_000001_000006.mp4 no no no +7LTKAifeIgQ_000005_000015.mp4 no no no +9qVfAdeiq0A_000020_000027.mp4 no no no +J2zoY_UfNlQ_000005_000015.mp4 no no no +RKCpr9cZUBQ_000015_000020.mp4 no no no +_wxtgv2k1c0_000107_000112.mp4 no no no +ixBkTYPG82g_000048_000053.mp4 no no no +kfRWbynDGu8_000096_000103.mp4 no no no +oaNiS_tHfbE_000035_000039.mp4 no no no +ouDw29KY3UM_000011_000015.mp4 no no no +teRBvK5qvXE_000010_000015.mp4 no no no +uJ38b0KykWI_000015_000020.mp4 no no no +1-v_nFabl1M_000014_000020.mp4 no yes yes +1uO6_0Ii-gw_000076_000078.mp4 yes no no +Gc6t9fuZUKY_000552_000559.mp4 no no no +I0wv7LKnkSw_000019_000023.mp4 no no no +KukISaZBhcI_000050_000054.mp4 no no yes +L4lqtPwU5vY_000070_000080.mp4 no no no +MykK0N1WFN0_000170_000176.mp4 no no no +RVlKVMy-oxI_000143_000147.mp4 no no no +TRcCyetKj0o_000170_000172.mp4 no no no +Tg9mJRahkBs_000070_000076.mp4 no no no +UqzbgSuea94_000000_000004.mp4 no no no +XAZML7RXsww_000111_000114.mp4 no no no +Z2_P2eLs-oY_000105_000114.mp4 no no no +af_KqWyh2fE_000055_000064.mp4 no no no +gsdJUGBoKdk_000025_000032.mp4 yes no no +rUn1mjk3LhE_000015_000018.mp4 no no no +u27ZpuwQ7Qw_000027_000032.mp4 no no no +u4jWPS4uQb4_000002_000005.mp4 no no no +uGYF0cohpPo_000017_000020.mp4 no no no +AXJjDX7f4U8_000047_000052.mp4 no no no +Fn866-mNriY_000022_000032.mp4 no no no +G7UveGPjZR8_000000_000006.mp4 no no no +Of2AffIeV58_000003_000010.mp4 no no no +VSwUuE4Mb7k_000008_000012.mp4 no no no +XVQvmod4svI_000012_000015.mp4 no no no +Zxp7_IMUTRQ_000008_000015.mp4 no no no +f-A2Wa8jR7s_000005_000008.mp4 no no no +fG8aSj5ttKg_000000_000009.mp4 no no no +mePrNsy72Yg_000000_000010.mp4 no no no +ow-WRihXwm0_000003_000013.mp4 no no no +qbK-l1kJaWo_000000_000004.mp4 no no no +yKwAi-1opzU_000023_000026.mp4 no no no +z6woHuuwl1o_000000_000010.mp4 no no no +1LBm1M3DBNY_000030_000040.mp4 no no yes +AM72BbbXH4w_000055_000060.mp4 no no no +GgOXsW06EYE_000003_000010.mp4 no no no +KM9jkQlR51E_000329_000334.mp4 no no yes +L71DUOPJn88_000136_000138.mp4 yes no yes +Lci2M22ywSg_000000_000010.mp4 no no no +PTCVfbDY0RM_000007_000010.mp4 no no no +SM6vDMcLHiI_000112_000115.mp4 no no no +TyqATpi_knw_000115_000119.mp4 no no no +U4SUGeh27RA_000006_000008.mp4 no no no +hQmoQOJZqok_000050_000056.mp4 no no yes +kfvNRCtoyZc_000020_000028.mp4 no no no +natexREv6Uo_000040_000050.mp4 no no no +q-rGl0v0KI8_000030_000035.mp4 no no no +qNJINPPOZC0_000019_000029.mp4 no no no +scZvLstzkig_000000_000010.mp4 no no no +tJVNul1R24o_000010_000015.mp4 no no no +vYTbXgjqWTw_000002_000007.mp4 no no no +wo4t9PKxGhI_000007_000013.mp4 no no yes +zbx22vdGsiI_000176_000181.mp4 no no yes +4mW-7bipojM_000072_000079.mp4 no no no +84GzqqaUiVc_000000_000007.mp4 no no no +Fz97Mmtozcg_000014_000016.mp4 no yes no +GVhUVhlJYDM_000035_000044.mp4 no no no +HOHxR8_CqEQ_000008_000015.mp4 no no no +L4lqtPwU5vY_000149_000158.mp4 no no no +Mt4P2qJzSZs_000000_000003.mp4 no no no +P5CYaw5mEM0_000105_000108.mp4 no no no +TRcCyetKj0o_000077_000079.mp4 no no no +TrrI4cFOYEk_000006_000009.mp4 no no no +eGDD8GeE0qo_000034_000044.mp4 no yes no +feX4zhOfjX4_000090_000100.mp4 no no no +uiup9wdVzw0_000020_000024.mp4 no no no +9qUKSCHQ3z0_000057_000059.mp4 no no no +FUIqsc5hMMQ_000182_000184.mp4 no no no +KC7P1KVZUC0_000055_000062.mp4 yes no yes +PW5e01acttw_000172_000173.mp4 no no yes +PuQDdeQxejw_000001_000005.mp4 no no no +SHVMWGGAkGQ_000096_000100.mp4 no no no +SdY0GFiME2c_000396_000399.mp4 no no yes +TGhDWDW7WhY_000164_000166.mp4 no no yes +ZQAzZN4uRM4_000269_000271.mp4 no no yes +bOkmovhjL2M_000034_000037.mp4 no no no +dGFYRCvR_II_000197_000199.mp4 no no no +gYZbufD61Og_000214_000216.mp4 no no yes +jz7diIi-jY8_000027_000029.mp4 no no yes +wPwRnIwXWNA_000171_000175.mp4 no no no +xJUW5S38h14_000054_000061.mp4 yes no no +zl5qNMwJ_jU_000120_000121.mp4 no no no +-CY1TkCMwjM_000011_000018.mp4 no no yes +11EMLAs8D_A_000410_000414.mp4 no no yes +KSFnKPS3Ol8_000064_000068.mp4 no no yes +L3yIVkdL43E_000030_000035.mp4 no no yes +L71DUOPJn88_000242_000245.mp4 no no yes +S-E3rlSsrz0_000023_000028.mp4 no no no +cLqHYQY3Sjo_000033_000038.mp4 no no no +ftRFY6vUIEw_000059_000062.mp4 no no yes +ok3F7541Ews_000080_000082.mp4 no no yes +qdznL4VyV10_000033_000038.mp4 no no yes +-CY1TkCMwjM_000041_000051.mp4 no no yes +0PwWxpWlC-U_000012_000020.mp4 no no yes +62Sc2tcK7xc_000006_000011.mp4 no no yes +COppfZLpk-8_000050_000060.mp4 no no yes +JV3qQldC1eg_000296_000302.mp4 no no no +UDuEHi7yrZ8_000550_000560.mp4 no no no +Xp1oE4WvPuA_000075_000085.mp4 no no yes +aTSVeNeJxog_000051_000057.mp4 no no yes +gw2pK33hEWw_000085_000087.mp4 no no yes +oxmXnucXlMQ_000150_000154.mp4 no no yes +BT0Q_P1fBlY_000387_000390.mp4 no yes yes +GyjPg9e1lvs_000085_000088.mp4 no no no +L4lqtPwU5vY_000093_000094.mp4 no no no +PzzMKgMU02M_000027_000028.mp4 no yes no +SdY0GFiME2c_000450_000452.mp4 no no yes +Sjmcire2g88_000141_000144.mp4 no no no +XvpuGCv9hyo_000029_000030.mp4 no no no +XvpuGCv9hyo_000042_000043.mp4 no no no +YfTcVAUWc74_000004_000006.mp4 no no no +lMucgdCqsqw_000019_000022.mp4 no no no +lSxv5M4-8tE_000040_000041.mp4 no no no +nd4TpqEJwEs_000133_000135.mp4 no yes no +nrlWz4aIkRI_000003_000004.mp4 no no no +o1q2BURthvU_000051_000055.mp4 no no yes +o1q2BURthvU_000151_000152.mp4 no no yes +sByIMfW6aI0_000009_000011.mp4 no no no +wILLcjlT58U_000008_000010.mp4 no no no +xv60PJOPVgg_000025_000027.mp4 no yes no +zXem1xEiy-E_000008_000010.mp4 no yes no +L4lqtPwU5vY_000048_000050.mp4 no yes no +MuHA3dDr5JI_000010_000011.mp4 no no no +Pk2OTQENWro_000007_000011.mp4 no no no +RVlKVMy-oxI_000240_000243.mp4 no no no +Y22Tr4_naSk_000037_000039.mp4 no no no +Z2_P2eLs-oY_000094_000095.mp4 no no no +bFzzcjXUnD8_000002_000005.mp4 no yes no +dbn_Nc3G8jA_000004_000006.mp4 no no no +eMXo3QIsFQo_000140_000143.mp4 yes yes yes +ffBqvOzUU_A_000088_000090.mp4 no yes no +j11ARKpK-8Q_000067_000069.mp4 no yes no +j11ARKpK-8Q_000082_000086.mp4 no no no +vlxFyd_hi7I_000065_000068.mp4 no yes no +vlxFyd_hi7I_000090_000093.mp4 no yes no +CGznm4YKiVU_000205_000206.mp4 no no no +L-1AdOARfRU_000090_000092.mp4 yes no no +PU0-dmClrFY_000011_000021.mp4 no no no +TRcCyetKj0o_000184_000187.mp4 no no no +UV6cW4NoVhw_000018_000028.mp4 yes no no +XmoOJuF2kZ8_000113_000115.mp4 no no no +dNmYajS1Gng_000050_000059.mp4 no no no +j734kxPTeD8_000240_000246.mp4 no no yes +yk7yXdzabxY_000027_000030.mp4 no no no +zseV_Vxy1xg_000242_000246.mp4 yes no no +0TuF-RqBhWs_000137_000147.mp4 yes no no +HxOm8k6PVb8_000035_000045.mp4 yes no no +KISD1YRTeCk_000095_000105.mp4 no no no +Q4vDfKGz52Y_000005_000010.mp4 yes no no +SdY0GFiME2c_000075_000081.mp4 no no yes +iB3sn6RRKME_000000_000008.mp4 no no no +j734kxPTeD8_000257_000267.mp4 no no yes +ogNC_prhefk_000000_000007.mp4 no no no +pKnP88kWTKA_001536_001546.mp4 no no no +xB5O3xFxm5Y_000045_000055.mp4 no no no +27e57LLAOM0_000004_000012.mp4 no no no +4PZAs22DgHA_000029_000039.mp4 no no no +CGMgGwKiHKg_000004_000014.mp4 no no no +FrSYiqa_eiA_000028_000038.mp4 no no no +M1qYDjgWF7g_000068_000078.mp4 no no no +MeIPJuWWznc_000042_000052.mp4 no no no +XZBs7xlrNAw_000015_000025.mp4 no no no +ldDONms8lAI_000000_000006.mp4 no no no +oRGRNNCGB08_000000_000010.mp4 no no no +vjy05xjkgkg_000006_000010.mp4 no no no +zvR84iiKECs_000013_000018.mp4 no no no +zvR84iiKECs_000157_000161.mp4 no no no +2BVHPrletXY_000208_000214.mp4 no no yes +3IKir8rumOs_000100_000102.mp4 no no yes +AOgag2eGxyE_000053_000057.mp4 no no yes +COppfZLpk-8_000018_000020.mp4 no no no +EkxyU6gC0zQ_000136_000146.mp4 no no no +JiYtY-UpV_E_000000_000010.mp4 no no yes +M5-HKclj15U_000090_000092.mp4 no no yes +Ous_8Cm7G1s_000156_000160.mp4 no no no +PZxs-3Mfnr8_000250_000254.mp4 no no yes +QHy1zMKfbmY_000034_000038.mp4 no no yes +RWLqaD9v6N8_000044_000047.mp4 no no yes +Tc78yPv_ztM_000029_000031.mp4 no no no +fYHOAFqdjaY_000026_000029.mp4 no no yes +gYZbufD61Og_000107_000117.mp4 no no yes +kGpDLyc59ik_000041_000051.mp4 no no yes +o4jvz4kSC7E_000080_000081.mp4 no no yes +oxmXnucXlMQ_000165_000171.mp4 no no yes +twz6DMLXS4U_000302_000312.mp4 no no yes +z9r4IWdxW1w_000091_000094.mp4 no no no +05c4TS7obbM_000052_000053.mp4 no no no +LR26Z9ySfXc_000051_000061.mp4 no no no +TRcCyetKj0o_000188_000192.mp4 no no no +bMNS3HYVpa0_000005_000011.mp4 yes no no +czIuu4xAJXY_000165_000171.mp4 yes no no +eMXo3QIsFQo_000212_000216.mp4 yes yes yes +fKtaBkCHwpU_000005_000007.mp4 no yes no +iHxLX1M7v2Q_000865_000868.mp4 yes no no +qgHpb__5AIA_000030_000036.mp4 yes no no +zVk-hIYzOXY_000114_000119.mp4 yes no no +E8JBLVSuOPM_000102_000106.mp4 no no yes +K1MQX20MHE4_000000_000010.mp4 no no no +L4lqtPwU5vY_000162_000166.mp4 no no no +Vx0iQQWYaB0_000016_000018.mp4 no no no +an1BQc5Nitg_000113_000118.mp4 no no yes +an1BQc5Nitg_000139_000142.mp4 no no yes +jxHnpPtlGfM_000000_000002.mp4 no no no +qBGgfVdl43w_000321_000323.mp4 no no yes +tJVNul1R24o_000000_000005.mp4 no no no +utUp-BoIErA_000091_000099.mp4 no no yes +vXLLSsfJgMk_000294_000299.mp4 no no yes +-8wp06ZwKQc_000000_000004.mp4 no no no +05IGI2SgarU_000113_000123.mp4 no no no +3_mPgxrywfI_000090_000091.mp4 no no yes +7qFtea79UQE_000157_000160.mp4 no no yes +7qFtea79UQE_000160_000162.mp4 no no yes +FUIqsc5hMMQ_000048_000053.mp4 no no no +JvfW7e6H-io_000043_000045.mp4 no no no +MeJZ8NOP4IU_000002_000005.mp4 no no no +S4E02y5K2Fc_000013_000019.mp4 no no no +TRcCyetKj0o_000304_000308.mp4 no no no +UbDd0WJE3LU_000000_000010.mp4 no no no +XUwYB1vVgK0_000106_000110.mp4 no no no +hXFx9M9e6Bo_000070_000075.mp4 no no no +mJ0yvdpCZdc_000000_000009.mp4 no no no +n7isYwg7ooc_000183_000186.mp4 no no no +pHtvEIWftR4_000015_000019.mp4 no no no +tu3JQ2edqA0_000029_000033.mp4 no no no +5CV-aGIS00w_000077_000079.mp4 no no yes +8FtwAj8XQ7o_000230_000240.mp4 no no yes +COppfZLpk-8_000205_000208.mp4 no no yes +JPBjBflm7V4_000052_000054.mp4 no no yes +ZsoPVDvWzLA_000019_000029.mp4 no no yes +dbPOVCeb8dU_000283_000284.mp4 no no no +ktaZ7IRhifs_000020_000024.mp4 no no no +nPCH31K5ISg_000009_000013.mp4 no no no +-N5zNXtTF-E_000015_000018.mp4 no no yes +05c4TS7obbM_000077_000080.mp4 no no no +1lukUCL5pS4_000270_000277.mp4 no no yes +7pkMMm2BhTg_000012_000017.mp4 no yes yes +CmwzhtJ98tk_000025_000031.mp4 no no yes +K2Ny4BjAc1o_000000_000007.mp4 no yes no +Kby4miFDaco_000030_000036.mp4 no no no +KqbXanPwUZg_000096_000100.mp4 no no yes +PVwuEaln_6E_000022_000032.mp4 no no yes +U7d5rZ944Vs_000053_000054.mp4 no no yes +ZTjXEV-mvgY_000011_000014.mp4 no no no +_qs0XOPOp5U_000166_000174.mp4 no no yes +cGt5BblpKvQ_000047_000053.mp4 no no yes +oxmXnucXlMQ_000055_000057.mp4 no no yes +yMVRfX-cYRk_000084_000089.mp4 no no yes +13UBd2tR_sg_000020_000021.mp4 no no yes +3CHYTZ1Zhug_000000_000006.mp4 no no no +CX77AwLLDcE_000026_000028.mp4 no no yes +FGM8u4oX9Xw_000085_000087.mp4 no no yes +N4WKMa_KKYc_000017_000019.mp4 no no yes +UOJPlbURmwM_000048_000049.mp4 no no no +aGZL2rw-qtk_000117_000127.mp4 no no no +prstk5Ol85U_000010_000011.mp4 no no no +rDr3LnYDK7g_000110_000111.mp4 no no no +rDr3LnYDK7g_000113_000115.mp4 no no no +rDr3LnYDK7g_000120_000122.mp4 no no no +x9I8pjBmX28_000077_000084.mp4 no no yes +xu3hDsutNjU_000020_000030.mp4 no no no \ No newline at end of file diff --git a/evaluator.py b/evaluator.py new file mode 100644 index 0000000..601e6c7 --- /dev/null +++ b/evaluator.py @@ -0,0 +1,291 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license +""" +import torch +import numpy as np + + +def n_correct(pred, labels): + _, predicted = torch.max(pred.data, 1) + n_correct = (predicted == labels).sum().item() + return n_correct + + +class EvaluatorBase(object): + def __init__(self, device='cuda'): + self.device = device + + @torch.no_grad() + def evaluate_acc(self, dataloader, model): + model.eval() + + total = 0 + correct = 0 + + for x, labels, index in dataloader: + x = x.to(self.device) + labels = labels.to(self.device) + pred = model(x, logits_only=True) + + batch_size = labels.size(0) + total += batch_size + correct += n_correct(pred, labels) + + return correct / total + + @torch.no_grad() + def evaluate_rebias(self, dataloader, rebias_model, + outer_criterion=None, + inner_criterion=None, + **kwargs): + raise NotImplementedError + + +class MNISTEvaluator(EvaluatorBase): + def _confusion_matrix(self, pred, bias_labels, labels, n_correct, n_total): + for bias_label in range(10): + for label in range(10): + b_indices = (bias_labels.squeeze() == bias_label).nonzero().squeeze() + t_indices = (labels.squeeze() == label).nonzero().squeeze() + + indices = np.intersect1d(b_indices.detach().cpu().numpy(), + t_indices.detach().cpu().numpy()) + indices = torch.cuda.LongTensor(indices) + if indices.nelement() == 0: + continue + _n = len(indices) + _output = pred.index_select(dim=0, index=indices) + _, predicted = torch.max(_output.data, 1) + _n_correct = (predicted == labels[indices]).sum().item() + + n_correct[label][bias_label] += _n_correct + n_total[label][bias_label] += _n + return n_correct, n_total + + def get_confusion_matrix(self, dataloader, rebias_model): + n_correct_arr = np.zeros((10, 10)) + n_total = np.zeros((10, 10)) + + total = 0 + f_correct = 0 + for x, labels, bias_labels in dataloader: + x = x.to(self.device) + labels = labels.to(self.device) + bias_labels = bias_labels.to(self.device) + + f_pred, g_preds, f_feat, g_feats = rebias_model(x) + n_correct_arr, n_total = self._confusion_matrix(f_pred, bias_labels, labels, n_correct_arr, n_total) + + f_correct += n_correct(f_pred, labels) + total += len(labels) + print('accuracy:', f_correct / total) + CM = n_correct_arr / (n_total + 1e-12) + return CM + + @torch.no_grad() + def evaluate_rebias(self, dataloader, rebias_model, + outer_criterion=None, + inner_criterion=None, + **kwargs): + rebias_model.eval() + + total = 0 + f_correct = 0 + g_corrects = [0 for _ in rebias_model.g_nets] + + if outer_criterion.__class__.__name__ in ['LearnedMixin', 'RUBi']: + """For computing HSIC loss only. + """ + outer_criterion = None + + outer_loss = [0 for _ in rebias_model.g_nets] + inner_loss = [0 for _ in rebias_model.g_nets] + + for x, labels, _ in dataloader: + x = x.to(self.device) + labels = labels.to(self.device) + + f_pred, g_preds, f_feat, g_feats = rebias_model(x) + + batch_size = labels.size(0) + total += batch_size + + f_correct += n_correct(f_pred, labels) + for idx, g_pred in enumerate(g_preds): + g_corrects[idx] += n_correct(g_pred, labels) + + if outer_criterion: + for idx, g_pred in enumerate(g_preds): + outer_loss[idx] += batch_size * outer_criterion(f_pred, g_pred).item() + + if inner_criterion: + for idx, g_pred in enumerate(g_preds): + inner_loss[idx] += batch_size * inner_criterion(f_pred, g_pred).item() + + ret = {'f_acc': f_correct / total} + for idx, (_g_correct, _outer_loss, _inner_loss) in enumerate(zip(g_corrects, outer_loss, inner_loss)): + ret['g_{}_acc'.format(idx)] = _g_correct / total + ret['outer_{}_loss'.format(idx)] = _outer_loss / total + ret['inner_{}_loss'.format(idx)] = _inner_loss / total + return ret + + +class ImageNetEvaluator(EvaluatorBase): + def imagenet_unbiased_accuracy(self, outputs, labels, cluster_labels, + num_correct, num_instance, + num_cluster_repeat=3): + for j in range(num_cluster_repeat): + for i in range(outputs.size(0)): + output = outputs[i] + label = labels[i] + cluster_label = cluster_labels[j][i] + + _, pred = output.topk(1, 0, largest=True, sorted=True) + correct = pred.eq(label).view(-1).float() + + num_correct[j][label][cluster_label] += correct.item() + num_instance[j][label][cluster_label] += 1 + + return num_correct, num_instance + + @torch.no_grad() + def evaluate_rebias(self, dataloader, rebias_model, + outer_criterion=None, + inner_criterion=None, + num_classes=9, + num_clusters=9, + num_cluster_repeat=3, + key=None): + rebias_model.eval() + + total = 0 + f_correct = 0 + num_correct = [np.zeros([num_classes, num_clusters]) for _ in range(num_cluster_repeat)] + num_instance = [np.zeros([num_classes, num_clusters]) for _ in range(num_cluster_repeat)] + g_corrects = [0 for _ in rebias_model.g_nets] + + if outer_criterion.__class__.__name__ in ['LearnedMixin', 'RUBi']: + """For computing HSIC loss only. + """ + outer_criterion = None + + outer_loss = [0 for _ in rebias_model.g_nets] + inner_loss = [0 for _ in rebias_model.g_nets] + + for x, labels, bias_labels in dataloader: + x = x.to(self.device) + labels = labels.to(self.device) + for bias_label in bias_labels: + bias_label.to(self.device) + + f_pred, g_preds, f_feat, g_feats = rebias_model(x) + + batch_size = labels.size(0) + total += batch_size + + if key == 'unbiased': + num_correct, num_instance = self.imagenet_unbiased_accuracy(f_pred.data, labels, bias_labels, + num_correct, num_instance, num_cluster_repeat) + else: + f_correct += n_correct(f_pred, labels) + for idx, g_pred in enumerate(g_preds): + g_corrects[idx] += n_correct(g_pred, labels) + + if outer_criterion: + for idx, g_pred in enumerate(g_preds): + outer_loss[idx] += batch_size * outer_criterion(f_pred, g_pred).item() + + if inner_criterion: + for idx, g_pred in enumerate(g_preds): + inner_loss[idx] += batch_size * inner_criterion(f_pred, g_pred).item() + + if key == 'unbiased': + for k in range(num_cluster_repeat): + x, y = [], [] + _num_correct, _num_instance = num_correct[k].flatten(), num_instance[k].flatten() + for i in range(_num_correct.shape[0]): + __num_correct, __num_instance = _num_correct[i], _num_instance[i] + if __num_instance >= 10: + x.append(__num_instance) + y.append(__num_correct / __num_instance) + f_correct += sum(y) / len(x) + + ret = {'f_acc': f_correct / num_cluster_repeat} + else: + ret = {'f_acc': f_correct / total} + + for idx, (_g_correct, _outer_loss, _inner_loss) in enumerate(zip(g_corrects, outer_loss, inner_loss)): + ret['g_{}_acc'.format(idx)] = _g_correct / total + ret['outer_{}_loss'.format(idx)] = _outer_loss / total + ret['inner_{}_loss'.format(idx)] = _inner_loss / total + return ret + + +class ActionEvaluator(EvaluatorBase): + @torch.no_grad() + def evaluate_rebias(self, dataloader, rebias_model, + outer_criterion=None, + inner_criterion=None, + num_classes=50, + **kwargs): + rebias_model.eval() + + num_clips = dataloader.dataset._num_clips + num_videos = len(dataloader.dataset) // num_clips + video_f_preds = torch.zeros((num_videos, num_classes)) + video_g_preds = torch.zeros((len(rebias_model.g_nets), num_videos, num_classes)) + video_labels = torch.zeros((num_videos)).long() + clip_count = torch.zeros((num_videos)).long() + + total = 0 + + if outer_criterion.__class__.__name__ in ['LearnedMixin', 'RUBi']: + """For computing HSIC loss only. + """ + outer_criterion = None + + outer_loss = [0 for _ in rebias_model.g_nets] + inner_loss = [0 for _ in rebias_model.g_nets] + for x, labels, index in dataloader: + x = x.to(self.device) + labels = labels.to(self.device) + f_pred, g_preds, f_feat, g_feats = rebias_model(x) + + for ind in range(f_pred.shape[0]): + vid_id = int(index[ind]) // num_clips + video_labels[vid_id] = labels[ind].detach().cpu() + video_f_preds[vid_id] += f_pred[ind].detach().cpu() + for g_idx, g_pred in enumerate(g_preds): + video_g_preds[g_idx, vid_id] += g_pred[ind].detach().cpu() + clip_count[vid_id] += 1 + + batch_size = labels.size(0) + total += batch_size + + if outer_criterion: + for idx, g_pred in enumerate(g_preds): + outer_loss[idx] += batch_size * outer_criterion(f_pred, g_pred).item() + + if inner_criterion: + for idx, g_pred in enumerate(g_preds): + inner_loss[idx] += batch_size * inner_criterion(f_pred, g_pred).item() + + if not all(clip_count == num_clips): + print( + "clip count {} ~= num clips {}".format( + clip_count, num_clips + ) + ) + + f_correct = n_correct(video_f_preds, video_labels) + g_corrects = [n_correct(video_g_pred, video_labels) + for video_g_pred in video_g_preds] + + ret = {'f_acc': f_correct / num_videos} + for idx, (_g_correct, _outer_loss, _inner_loss) in enumerate(zip(g_corrects, outer_loss, inner_loss)): + ret['g_{}_acc'.format(idx)] = _g_correct / num_videos + + ret['outer_{}_loss'.format(idx)] = _outer_loss / total + ret['inner_{}_loss'.format(idx)] = _inner_loss / total + return ret diff --git a/logger.py b/logger.py new file mode 100644 index 0000000..4b4557f --- /dev/null +++ b/logger.py @@ -0,0 +1,80 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license +""" +import logging + + +class LoggerBase(object): + def __init__(self, **kwargs): + self.level = kwargs.get('level', logging.DEBUG) + self.logger = self.set_logger(**kwargs) + + def set_logger(self, **kwargs): + return + + def log(self, msg, level=logging.INFO): + raise NotImplementedError + + def log_dict(self, msg_dict, prefix='', level=logging.INFO): + raise NotImplementedError + + def report(self, msg_dict, prefix='', level=logging.INFO): + raise NotImplementedError + + +class PrintLogger(LoggerBase): + def log(self, msg, level=logging.INFO): + if level <= self.level: + return + + print(msg) + + def log_dict(self, msg_dict, prefix='Report @step: ', level=logging.INFO): + if level <= self.level: + return + + if 'step' in msg_dict: + step = msg_dict.pop('step') + print(prefix, step) + + print('{') + for k, v in sorted(msg_dict.items()): + print(' {}: {}'.format(k, v)) + print('}') + + def report(self, msg_dict, prefix='Report @step: ', level=logging.INFO): + self.log_dict(msg_dict, level) + + +class PythonLogger(LoggerBase): + def set_logger(self, name=None, level=logging.INFO, fmt=None, datefmt=None): + logger = logging.getLogger(name) + logger.setLevel(level) + + # create console handler with a higher log level + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + + # create formatter and add it to the handlers + if not fmt: + fmt = '[%(asctime)s] %(message)s' + if not datefmt: + datefmt = '%Y-%m-%d %H:%M:%S' + formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) + ch.setFormatter(formatter) + + logger.addHandler(ch) + return logger + + def log(self, msg, level=logging.INFO): + self.logger.log(level, msg) + + def log_dict(self, msg_dict, prefix='Report @step: ', level=logging.INFO): + if 'step' in msg_dict: + step = msg_dict.pop('step') + prefix = '{}{:.2f} '.format(prefix, step) + self.log('{}{}'.format(prefix, msg_dict)) + + def report(self, msg_dict, prefix='Report @step', level=logging.INFO): + self.log_dict(msg_dict, prefix, level) diff --git a/main_action.py b/main_action.py new file mode 100644 index 0000000..15181a5 --- /dev/null +++ b/main_action.py @@ -0,0 +1,185 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Entry point of Kinetics experiments. +NOTE: We will not handle the issues from action recognition experiments. + +This script provides full implementations including +- Various methods (ReBias, Vanilla, Biased, LearnedMixIn, RUBi) + - Target network: ResNet3D + - Biased network: ResNet2D +- Sub-sampled 10-Class Kinetics / Mimetics from the full datasets. + - Please see datasets/kinetics.py for details. + +Usage: + python main_action.py --train_root /path/to/your/kinetics/train + --train_annotation_file /path/to/your/kinetics/train_annotion + --eval_root /path/to/your/mimetics/train + --eval_annotation_file /path/to/your/kinetics/train_annotion + +""" +import fire + +from datasets.kinetics import get_kinetics_dataloader +from evaluator import ActionEvaluator +from logger import PythonLogger +from trainer import Trainer +from models import ResNet3D, ReBiasModels + + +class ActionTrainer(Trainer): + def _set_models(self): + f_net = ResNet3D.ResNet3DModel(**self.options.f_config) + g_nets = [ResNet3D.ResNet3DModel(**self.options.g_config) + for _ in range(self.options.n_g_nets)] + + self.model = ReBiasModels(f_net, g_nets) + self.evaluator = ActionEvaluator(device=self.device) + + +def main(train_root, + train_annotation_file, + eval_root, + eval_annotation_file, + train_dataset='kinetics10', + eval_dataset='mimetics10', + batch_size=128, + num_classes=10, + # optimizer config + lr=0.1, + optim='Adam', + n_epochs=120, + lr_step_size=20, + scheduler='CosineAnnealingLR', + n_f_pretrain_epochs=0, + n_g_pretrain_epochs=0, + f_lambda_outer=1, + g_lambda_inner=1, + n_g_update=1, + update_g_cls=True, + # criterion config + outer_criterion='RbfHSIC', + inner_criterion='MinusRbfHSIC', + rbf_sigma_scale_x=2, + rbf_sigma_scale_y=0.5, + rbf_sigma_x=1, + rbf_sigma_y=1, + update_sigma_per_epoch=False, + sigma_update_sampling_rate=0.25, + hsic_alg='unbiased', + feature_pos='post', + # model configs + n_g_nets=1, + final_bottleneck_dim=0, + resnet_depth=18, + f_temporal_kernel_sizes='33333', + g_temporal_kernel_sizes='11111', + resnet_base_width=32, + # logging + log_step=10, + ): + logger = PythonLogger() + logger.log('preparing val loader...') + + val_loaders = {} + val_loaders['unbiased'] = get_kinetics_dataloader(root=eval_root, batch_size=batch_size, + logger=logger, + anno_file=eval_annotation_file, + dataset_name=eval_dataset, + split='test') + val_loaders['val'] = get_kinetics_dataloader(train_root, batch_size=batch_size, + logger=logger, + anno_file=train_annotation_file, + dataset_name=train_dataset, + split='val') + + logger.log('preparing train loader...') + tr_loader = get_kinetics_dataloader(train_root, batch_size=batch_size, + logger=logger, + anno_file=train_annotation_file, + dataset_name=train_dataset, + split='train') + + logger.log('preparing trainer...') + + if scheduler == 'StepLR': + f_scheduler_config = {'step_size': lr_step_size} + g_scheduler_config = {'step_size': lr_step_size} + elif scheduler == 'CosineAnnealingLR': + f_scheduler_config = {'T_max': n_epochs} + g_scheduler_config = {'T_max': n_epochs} + else: + raise NotImplementedError + + # XXX resnet_base_width should be 32. + if outer_criterion == 'LearnedMixin': + outer_criterion_config = {'feat_dim': 256, 'num_classes': num_classes} + elif outer_criterion == 'RUBi': + outer_criterion_config = {'feat_dim': 256} + else: + outer_criterion_config = {'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y, + 'algorithm': hsic_alg} + + engine = ActionTrainer( + outer_criterion=outer_criterion, + inner_criterion=inner_criterion, + outer_criterion_config=outer_criterion_config, + outer_criterion_detail={'sigma_x_type': rbf_sigma_x, + 'sigma_y_type': rbf_sigma_y, + 'sigma_x_scale': rbf_sigma_scale_x, + 'sigma_y_scale': rbf_sigma_scale_y}, + inner_criterion_config={'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y, + 'algorithm': hsic_alg}, + inner_criterion_detail={'sigma_x_type': rbf_sigma_x, + 'sigma_y_type': rbf_sigma_y, + 'sigma_x_scale': rbf_sigma_scale_x, + 'sigma_y_scale': rbf_sigma_scale_y}, + n_epochs=n_epochs, + n_f_pretrain_epochs=n_f_pretrain_epochs, + n_g_pretrain_epochs=n_g_pretrain_epochs, + f_config={'resnet_depth': resnet_depth, + 'model_arch': f_temporal_kernel_sizes, + 'feature_position': feature_pos, + 'width_per_group': resnet_base_width, + 'num_classes': num_classes, + 'final_bottleneck_dim': final_bottleneck_dim + }, + g_config={'resnet_depth': resnet_depth, + 'model_arch': g_temporal_kernel_sizes, + 'feature_position': feature_pos, + 'width_per_group': resnet_base_width, + 'num_classes': num_classes, + 'final_bottleneck_dim': final_bottleneck_dim + }, + optimizer=optim, + f_optim_config={'lr': lr, 'weight_decay': 1e-4}, + g_optim_config={'lr': lr, 'weight_decay': 1e-4}, + f_scheduler_config=f_scheduler_config, + g_scheduler_config=g_scheduler_config, + scheduler=scheduler, + f_lambda_outer=f_lambda_outer, + g_lambda_inner=g_lambda_inner, + n_g_update=n_g_update, + update_g_cls=update_g_cls, + n_g_nets=n_g_nets, + train_loader=tr_loader, + logger=logger, + log_step=log_step, + sigma_update_sampling_rate=sigma_update_sampling_rate) + engine.train(tr_loader, val_loaders=val_loaders, + update_sigma_per_epoch=update_sigma_per_epoch) + + val_loaders['val'] = get_kinetics_dataloader(train_root, batch_size=batch_size, + logger=logger, + anno_file=train_annotation_file, + dataset_name=train_dataset, + split='test') + evaluator = ActionEvaluator() + engine.evaluate(evaluator, + step=n_epochs, + val_loaders=val_loaders) + + +if __name__ == '__main__': + fire.Fire(main) diff --git a/main_biased_mnist.py b/main_biased_mnist.py new file mode 100644 index 0000000..b7117cf --- /dev/null +++ b/main_biased_mnist.py @@ -0,0 +1,138 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Entry point of Biased-MNIST experiments. + +This script provides full implementations including +- Various methods (ReBias, Vanilla, Biased, LearnedMixIn, RUBi) + - Target network: Stacked convolutional networks (kernel_size=7) + - Biased network: Stacked convolutional networks (kernel_size=1) + - We do not provide HEX implementation here. See README.md for details. +- Controllable Biased-MNIST experiments by --train_correlation option. + - Please see datasets/colour_mnist.py for details. + +Usage: + python main_biased_mnist.py --root /path/to/your/dataset --train_correlation 0.999 +""" +import fire + +from datasets.colour_mnist import get_biased_mnist_dataloader +from evaluator import MNISTEvaluator +from logger import PythonLogger +from trainer import Trainer +from models import SimpleConvNet, ReBiasModels + + +class MNISTTrainer(Trainer): + def _set_models(self): + if not self.options.f_config: + self.options.f_config = {'kernel_size': 7, 'feature_pos': 'post'} + self.options.g_config = {'kernel_size': 1, 'feature_pos': 'post'} + + f_net = SimpleConvNet(**self.options.f_config) + g_nets = [SimpleConvNet(**self.options.g_config) for _ in range(self.options.n_g_nets)] + + self.model = ReBiasModels(f_net, g_nets) + self.evaluator = MNISTEvaluator(device=self.device) + + +def main(root, + batch_size=256, + train_correlation=0.999, + n_confusing_labels=9, + # optimizer config + lr=0.001, + optim='Adam', + n_epochs=80, + lr_step_size=20, + n_f_pretrain_epochs=0, + n_g_pretrain_epochs=0, + f_lambda_outer=1, + g_lambda_inner=1, + n_g_update=1, + update_g_cls=True, + # criterion config + outer_criterion='RbfHSIC', + inner_criterion='MinusRbfHSIC', + rbf_sigma_scale_x=1, + rbf_sigma_scale_y=1, + rbf_sigma_x=1, + rbf_sigma_y=1, + update_sigma_per_epoch=False, + hsic_alg='unbiased', + feature_pos='post', + # model configs + n_g_nets=1, + f_kernel_size=7, + g_kernel_size=1, + # others + save_dir='./checkpoints', + ): + logger = PythonLogger() + logger.log('preparing train loader...') + tr_loader = get_biased_mnist_dataloader(root, batch_size=batch_size, + data_label_correlation=train_correlation, + n_confusing_labels=n_confusing_labels, + train=True) + logger.log('preparing val loader...') + val_loaders = {} + val_loaders['biased'] = get_biased_mnist_dataloader(root, batch_size=batch_size, + data_label_correlation=1, + n_confusing_labels=n_confusing_labels, + train=False) + val_loaders['rho0'] = get_biased_mnist_dataloader(root, batch_size=batch_size, + data_label_correlation=0, + n_confusing_labels=9, + train=False) + val_loaders['unbiased'] = get_biased_mnist_dataloader(root, batch_size=batch_size, + data_label_correlation=0.1, + n_confusing_labels=9, + train=False) + + logger.log('preparing trainer...') + + log_step = int(100 * 256 / batch_size) + + engine = MNISTTrainer( + outer_criterion=outer_criterion, + inner_criterion=inner_criterion, + outer_criterion_config={'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y, + 'algorithm': hsic_alg}, + outer_criterion_detail={'sigma_x_type': rbf_sigma_x, + 'sigma_y_type': rbf_sigma_y, + 'sigma_x_scale': rbf_sigma_scale_x, + 'sigma_y_scale': rbf_sigma_scale_y}, + inner_criterion_config={'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y, + 'algorithm': hsic_alg}, + inner_criterion_detail={'sigma_x_type': rbf_sigma_x, + 'sigma_y_type': rbf_sigma_y, + 'sigma_x_scale': rbf_sigma_scale_x, + 'sigma_y_scale': rbf_sigma_scale_y}, + n_epochs=n_epochs, + n_f_pretrain_epochs=n_f_pretrain_epochs, + n_g_pretrain_epochs=n_g_pretrain_epochs, + f_config={'num_classes': 10, 'kernel_size': f_kernel_size, 'feature_pos': feature_pos}, + g_config={'num_classes': 10, 'kernel_size': g_kernel_size, 'feature_pos': feature_pos}, + f_lambda_outer=f_lambda_outer, + g_lambda_inner=g_lambda_inner, + n_g_update=n_g_update, + update_g_cls=update_g_cls, + n_g_nets=n_g_nets, + optimizer=optim, + f_optim_config={'lr': lr, 'weight_decay': 1e-4}, + g_optim_config={'lr': lr, 'weight_decay': 1e-4}, + scheduler='StepLR', + f_scheduler_config={'step_size': lr_step_size}, + g_scheduler_config={'step_size': lr_step_size}, + train_loader=tr_loader, + log_step=log_step, + logger=logger) + engine.train(tr_loader, val_loaders=val_loaders, + val_epoch_step=1, + update_sigma_per_epoch=update_sigma_per_epoch, + save_dir=save_dir) + + +if __name__ == '__main__': + fire.Fire(main) diff --git a/main_imagenet.py b/main_imagenet.py new file mode 100644 index 0000000..e8a31b3 --- /dev/null +++ b/main_imagenet.py @@ -0,0 +1,157 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Entry point of 9-Class ImageNet experiments. + +This script provides full implementations including +- Various methods (ReBias, Vanilla, Biased, LearnedMixIn, RUBi) + - Target network: ResNet-18 + - Biased network: BagNet-18 + - We do not provide Stylised ImageNet implementation here. See README.md for details. +- Sub-sampled 9-Class ImageNet / ImageNet-A from the full ImageNet / ImageNet-A folder. + - Please see datasets/imagenet.py for details. +- Cluster-based unbiased accuracies. + - For curious readers, `make_clusters.py` shows how to make texture clusters. + +Usage: + python main_imagenet.py --train_root /path/to/your/imagenet/train + --val_root /path/to/your/imagenet/val + --imageneta_root /path/to/your/imagenet_a +""" +import fire + +from datasets.imagenet import get_imagenet_dataloader +from evaluator import ImageNetEvaluator +from logger import PythonLogger +from trainer import Trainer +from models import resnet18, bagnet18, ReBiasModels + + +class ImageNetTrainer(Trainer): + def _set_models(self): + f_net = resnet18(**self.options.f_config) + g_nets = [bagnet18(**self.options.g_config) + for _ in range(self.options.n_g_nets)] + + self.model = ReBiasModels(f_net, g_nets) + self.evaluator = ImageNetEvaluator(device=self.device) + + +def main(train_root, + val_root, + imageneta_root, + batch_size=128, + num_classes=9, + # optimizer config + lr=0.001, + optim='Adam', + n_epochs=120, + lr_step_size=30, + scheduler='CosineAnnealingLR', + n_f_pretrain_epochs=0, + n_g_pretrain_epochs=0, + f_lambda_outer=1, + g_lambda_inner=1, + n_g_update=1, + update_g_cls=True, + # criterion config + outer_criterion='RbfHSIC', + inner_criterion='MinusRbfHSIC', + rbf_sigma_scale_x=1, + rbf_sigma_scale_y=1, + rbf_sigma_x='median', + rbf_sigma_y='median', + update_sigma_per_epoch=True, + hsic_alg='unbiased', + feature_pos='post', + # model configs + n_g_nets=1, + final_bottleneck_dim=0, + # logging + log_step=10, + # others + save_dir='./checkpoints', + ): + logger = PythonLogger() + logger.log('preparing train loader...') + tr_loader = get_imagenet_dataloader(train_root, + batch_size=batch_size, + train=True) + + logger.log('preparing val loader...') + val_loaders = {} + val_loaders['biased'] = get_imagenet_dataloader(val_root, + batch_size=batch_size, + train=False) + val_loaders['unbiased'] = get_imagenet_dataloader(val_root, + batch_size=batch_size, + train=False) + val_loaders['imagenet-a'] = get_imagenet_dataloader(imageneta_root, + batch_size=batch_size, + train=False, + val_data='ImageNet-A') + + logger.log('preparing trainer...') + + if scheduler == 'StepLR': + f_scheduler_config = {'step_size': lr_step_size} + g_scheduler_config = {'step_size': lr_step_size} + elif scheduler == 'CosineAnnealingLR': + f_scheduler_config = {'T_max': n_epochs} + g_scheduler_config = {'T_max': n_epochs} + else: + raise NotImplementedError + + if outer_criterion == 'LearnedMixin': + outer_criterion_config = {'feat_dim': 512, 'num_classes': 9} + elif outer_criterion == 'RUBi': + outer_criterion_config = {'feat_dim': 512} + else: + outer_criterion_config = {'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y, + 'algorithm': hsic_alg}, + + engine = ImageNetTrainer( + outer_criterion=outer_criterion, + inner_criterion=inner_criterion, + outer_criterion_config=outer_criterion_config, + outer_criterion_detail={'sigma_x_type': rbf_sigma_x, + 'sigma_y_type': rbf_sigma_y, + 'sigma_x_scale': rbf_sigma_scale_x, + 'sigma_y_scale': rbf_sigma_scale_y}, + inner_criterion_config={'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y, + 'algorithm': hsic_alg}, + inner_criterion_detail={'sigma_x_type': rbf_sigma_x, + 'sigma_y_type': rbf_sigma_y, + 'sigma_x_scale': rbf_sigma_scale_x, + 'sigma_y_scale': rbf_sigma_scale_y}, + n_epochs=n_epochs, + n_f_pretrain_epochs=n_f_pretrain_epochs, + n_g_pretrain_epochs=n_g_pretrain_epochs, + f_config={'feature_pos': feature_pos, + 'num_classes': num_classes}, + g_config={'feature_pos': feature_pos, + 'num_classes': num_classes}, + optimizer=optim, + f_optim_config={'lr': lr, 'weight_decay': 1e-4}, + g_optim_config={'lr': lr, 'weight_decay': 1e-4}, + f_scheduler_config=f_scheduler_config, + g_scheduler_config=g_scheduler_config, + scheduler=scheduler, + f_lambda_outer=f_lambda_outer, + g_lambda_inner=g_lambda_inner, + n_g_update=n_g_update, + update_g_cls=update_g_cls, + n_g_nets=n_g_nets, + train_loader=tr_loader, + logger=logger, + log_step=log_step) + + engine.train(tr_loader, val_loaders=val_loaders, + val_epoch_step=1, + update_sigma_per_epoch=update_sigma_per_epoch, + save_dir=save_dir) + + +if __name__ == '__main__': + fire.Fire(main) diff --git a/make_clusters.py b/make_clusters.py new file mode 100644 index 0000000..c642cc4 --- /dev/null +++ b/make_clusters.py @@ -0,0 +1,154 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license +""" +import argparse +import os +import time + +import torch +import torch.nn as nn + +import torchvision +from torchvision import transforms +from torchvision.utils import save_image + +import numpy as np +from PIL import Image +from sklearn.cluster import MiniBatchKMeans + +from datasets.imagenet import get_imagenet_dataloader + +parser = argparse.ArgumentParser() + +parser.add_argument('--dataset', type=str, default='ImageNet') +parser.add_argument('--num_classes', type=int, default=9, help='number of classes') +parser.add_argument('--load_size', type=int, default=256, help='image load size') +parser.add_argument('--image_size', type=int, default=224, help='image crop size') +parser.add_argument('--k', type=int, default=9, help='number of clusters') +parser.add_argument('--n_sample', type=int, default='30', help='number of samples per cluster') +parser.add_argument('--batch_size', type=int, default=64, help='mini-batch size') +parser.add_argument('--num_workers', type=int, default=4, help='number of data loading workers') +parser.add_argument('--cluster_dir', type=str, default='clusters') + + +def main(n_try=None): + args = parser.parse_args() + + # create directories if not exist + if not os.path.exists(args.cluster_dir): + os.makedirs(args.cluster_dir) + + data_loader = get_imagenet_dataloader(batch_size=args.batch_size, train=False) + + transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + ]) + + extractor = nn.Sequential(*list(torchvision.models.vgg16(pretrained=True).features)[:-16]) # conv1_2 + extractor.cuda() + + # ======================================================================= # + # 1. Extract features # + # ======================================================================= # + print('Start extracting features...') + extractor.eval() + N = len(data_loader.dataset.dataset) + + start = time.time() + for i, (images, targets, _) in enumerate(data_loader): + images = images.cuda() + outputs = gram_matrix(extractor(images)) + outputs = outputs.view(images.size(0), -1).data.cpu().numpy() + + if i == 0: + features = np.zeros((N, outputs.shape[1])).astype('float32') + + if i < N - 1: + features[i * args.batch_size: (i+1) * args.batch_size] = outputs.astype('float32') + + else: + features[i * args.batch_size:] = outputs.astype('float32') + + # L2 normalization + features = features / np.linalg.norm(features, axis=1)[:, np.newaxis] + print('Finished extracting features...(time: {0:.0f} s)'.format(time.time() - start)) + + # ======================================================================= # + # 2. Clustering # + # ======================================================================= # + start = time.time() + labels, image_lists = Kmeans(args.k, features) + print('Finished clustering...(time: {0:.0f} s)'.format(time.time() - start)) + + # save clustering results + torch.save(torch.LongTensor(labels), os.path.join(args.cluster_dir, + 'cluster_label_{}.pth'.format(n_try))) + print('Saved cluster label...') + + len_list = [len(image_list) for image_list in image_lists] + min_len = min(len_list) + if min_len < args.n_sample: + args.n_sample = min_len + print('number of images in each cluster:', len_list) + + # sample clustering results + start = time.time() + samples = [[]] * args.k + for k in range(args.k): + idx_list = image_lists[k] # list of image indexes in each cluster + for j in range(args.n_sample): # sample j indexes + idx = idx_list[j] + filename = data_loader.dataset.dataset[idx][0] + image = transform(Image.open(filename).convert('RGB')).unsqueeze(0) + samples[k] = samples[k] + [image] + + for k in range(args.k): + samples[k] = torch.cat(samples[k], dim=3) + samples = torch.cat(samples, dim=0) + + filename = os.path.join(args.cluster_dir, 'cluster_sample_{}.jpg'.format(n_try)) + save_image(denorm(samples.data.cpu()), filename, nrow=1, padding=0) + print('Finished sampling...(time: {0:.0f} s)'.format(time.time() - start)) + + +def gram_matrix(input, normalize=True): + N, C, H, W = input.size() + feat = input.view(N, C, -1) + G = torch.bmm(feat, feat.transpose(1, 2)) # N X C X C + if normalize: + G /= (C * H * W) + return G + + +def denorm(x): + """Convert the range to [0, 1].""" + mean = torch.tensor([0.485, 0.456, 0.406]) + std = torch.tensor([0.229, 0.224, 0.225]) + return x.mul_(std[:, None, None]).add_(mean[:, None, None]).clamp_(0, 1) + + +def Kmeans(k, features): + n_data, dim = features.shape + features = torch.FloatTensor(features) + + clus = MiniBatchKMeans(n_clusters=k, + batch_size=1024).fit(features) + labels = clus.labels_ + + image_lists = [[] for _ in range(k)] + feat_lists = [[] for _ in range(k)] + for i in range(n_data): + image_lists[labels[i]].append(i) + feat_lists[labels[i]].append(features[i].unsqueeze(0)) + + return labels, image_lists + + +if __name__ == '__main__': + for i in range(5): + main(i+1) diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..5681988 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,24 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Target architectures and intentionally biased architectures for three benchmarks +- MNIST: deep stacked convolutional networks with different kernel size, i.e., 7 (target) and 1 (biased). +- ImageNet: ResNet-18 (target) and BagNet-18 (biased). +- Kinetics: spatial-temporal 3D-ResNet (target), spatial-only 2D-ResNet (biased). +""" +try: + from models.action_models import ResNet3D +except ImportError: + print('failed to import kinetics, please install library from') + print('https://github.com/facebookresearch/SlowFast/blob/master/INSTALL.md') + ResNet3D = None +from models.imagenet_models import resnet18, bagnet18 +from models.mnist_models import SimpleConvNet +from models.rebias_models import ReBiasModels + + +__all__ = ['ReBiasModels', + 'resnet18', 'bagnet18', + 'SimpleConvNet', + 'ResNet3D'] diff --git a/models/action_models/ResNet3D.py b/models/action_models/ResNet3D.py new file mode 100644 index 0000000..60f4c13 --- /dev/null +++ b/models/action_models/ResNet3D.py @@ -0,0 +1,255 @@ +import torch.nn as nn +from .weight_init_helper import init_weights +from .stem_helper import VideoModelStem +from .resnet_helper import ResStage +from .head_helper import ResNetBasicHead + +# Number of blocks for different stages given the model depth. +_MODEL_STAGE_DEPTH = {18.1: (2, 2, 2, 2), + 18: (2, 2, 2, 2), + 34.1: (3, 4, 6, 3), + 50: (3, 4, 6, 3), + 101: (3, 4, 23, 3)} +_MODEL_TRANS_FUNC = {18.1: 'basic_transform', + 18: 'basic_transform', + 34.1: 'basic_transform', + 50: 'bottleneck_transform', + 101: 'bottleneck_transform'} + +# width_multiplier = {18: [1, 1, 2, 4, 8], +# 50: [1, 4, 8, 16, 32]} +width_multiplier = {18.1: [1, 1, 2, 4, 8], + 34.1: [1, 1, 2, 4, 8], + 18: [1, 4, 8, 16, 32], + 50: [1, 4, 8, 16, 32]} + +_POOL1 = [[1, 1, 1]] + +_TEMPORAL_KERNEL_BASIS = { + "11111": [ + [[1]], # conv1 temporal kernel. + [[1]], # res2 temporal kernel. + [[1]], # res3 temporal kernel. + [[1]], # res4 temporal kernel. + [[1]], # res5 temporal kernel. + ], + "33333": [ + [[3]], # conv1 temporal kernel. + [[3]], # res2 temporal kernel. + [[3]], # res3 temporal kernel. + [[3]], # res4 temporal kernel. + [[3]], # res5 temporal kernel. + ], + "11133": [ + [[1]], # conv1 temporal kernel. + [[1]], # res2 temporal kernel. + [[1]], # res3 temporal kernel. + [[3]], # res4 temporal kernel. + [[3]], # res5 temporal kernel. + ], +} +FC_INIT_STD = 0.01 +ZERO_INIT_FINAL_BN = False +NUM_BLOCK_TEMP_KERNEL = [[2], [2], [2], [2]] + +DATA_NUM_FRAMES = 8 +DATA_CROP_SIZE = 224 + +NONLOCAL_LOCATION = [[[]], [[]], [[]], [[]]] +NONLOCAL_GROUP = [[1], [1], [1], [1]] +NONLOCAL_INSTANTIATION = "dot_product" + +RESNET_STRIDE_1X1 = False +RESNET_INPLACE_RELU = True + + +class ResNet3DModel(nn.Module): + """ + ResNet model builder. It builds a ResNet like network backbone without + lateral connection (C2D, I3D, SlowOnly). + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "Slowfast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + + Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. + "Non-local neural networks." + https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__(self, + model_arch='33333', + resnet_depth=18, + feature_position='post', + width_per_group=32, + dropout_rate=0.0, + num_classes=400, + final_bottleneck_dim=0 + ): + """ + The `__init__` method of any subclass should also contain these + arguments. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + super(ResNet3DModel, self).__init__() + self.num_pathways = 1 + self._construct_network( + model_arch=model_arch, + resnet_depth=resnet_depth, + dropout_rate=dropout_rate, + width_per_group=width_per_group, + num_classes=num_classes, + feature_position=feature_position, + final_bottleneck_dim=final_bottleneck_dim + ) + init_weights( + self, FC_INIT_STD, ZERO_INIT_FINAL_BN + ) + + def _construct_network(self, model_arch='33333', + resnet_depth=18, + feature_position='post', + num_groups=1, + width_per_group=32, + input_channel_num=None, + dropout_rate=0.0, + num_classes=400, + final_bottleneck_dim=0): + """ + Builds a single pathway ResNet model. + + Args: + cfg (CfgNode): model building configs, details are in the + comments of the config file. + """ + if input_channel_num is None: + input_channel_num = [3] + pool_size = _POOL1 + assert len({len(pool_size), self.num_pathways}) == 1 + assert resnet_depth in _MODEL_STAGE_DEPTH.keys() + + (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[resnet_depth] + trans_func = _MODEL_TRANS_FUNC[resnet_depth] + + dim_inner = num_groups * width_per_group + + temp_kernel = _TEMPORAL_KERNEL_BASIS[str(model_arch)] + + self.s1 = VideoModelStem( + dim_in=input_channel_num, + dim_out=[width_per_group * width_multiplier[resnet_depth][0]], + kernel=[temp_kernel[0][0] + [7, 7]], + stride=[[1, 2, 2]], + padding=[[temp_kernel[0][0][0] // 2, 3, 3]], + ) + + self.s2 = ResStage( + dim_in=[width_per_group * width_multiplier[resnet_depth][0]], + dim_out=[width_per_group * width_multiplier[resnet_depth][1]], + dim_inner=[dim_inner], + temp_kernel_sizes=temp_kernel[1], + stride=[1], + num_blocks=[d2], + num_groups=[num_groups], + num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[0], + nonlocal_inds=NONLOCAL_LOCATION[0], + nonlocal_group=NONLOCAL_GROUP[0], + instantiation=NONLOCAL_INSTANTIATION, + trans_func_name=trans_func, + stride_1x1=RESNET_STRIDE_1X1, + inplace_relu=RESNET_INPLACE_RELU, + ) + + for pathway in range(self.num_pathways): + pool = nn.MaxPool3d( + kernel_size=pool_size[pathway], + stride=pool_size[pathway], + padding=[0, 0, 0], + ) + self.add_module("pathway{}_pool".format(pathway), pool) + + self.s3 = ResStage( + dim_in=[width_per_group * width_multiplier[resnet_depth][1]], + dim_out=[width_per_group * width_multiplier[resnet_depth][2]], + dim_inner=[dim_inner * 2], + temp_kernel_sizes=temp_kernel[2], + stride=[2], + num_blocks=[d3], + num_groups=[num_groups], + num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[1], + nonlocal_inds=NONLOCAL_LOCATION[1], + nonlocal_group=NONLOCAL_GROUP[1], + instantiation=NONLOCAL_INSTANTIATION, + trans_func_name=trans_func, + stride_1x1=RESNET_STRIDE_1X1, + inplace_relu=RESNET_INPLACE_RELU, + ) + + self.s4 = ResStage( + dim_in=[width_per_group * width_multiplier[resnet_depth][2]], + dim_out=[width_per_group * width_multiplier[resnet_depth][3]], + dim_inner=[dim_inner * 4], + temp_kernel_sizes=temp_kernel[3], + stride=[2], + num_blocks=[d4], + num_groups=[num_groups], + num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[2], + nonlocal_inds=NONLOCAL_LOCATION[2], + nonlocal_group=NONLOCAL_GROUP[2], + instantiation=NONLOCAL_INSTANTIATION, + trans_func_name=trans_func, + stride_1x1=RESNET_STRIDE_1X1, + inplace_relu=RESNET_INPLACE_RELU, + ) + + self.s5 = ResStage( + dim_in=[width_per_group * width_multiplier[resnet_depth][3]], + dim_out=[width_per_group * width_multiplier[resnet_depth][4]], + dim_inner=[dim_inner * 8], + temp_kernel_sizes=temp_kernel[4], + stride=[2], + num_blocks=[d5], + num_groups=[num_groups], + num_block_temp_kernel=NUM_BLOCK_TEMP_KERNEL[3], + nonlocal_inds=NONLOCAL_LOCATION[3], + nonlocal_group=NONLOCAL_GROUP[3], + instantiation=NONLOCAL_INSTANTIATION, + trans_func_name=trans_func, + stride_1x1=RESNET_STRIDE_1X1, + inplace_relu=RESNET_INPLACE_RELU, + ) + + self.head = ResNetBasicHead( + dim_in=[width_per_group * width_multiplier[resnet_depth][4]], + num_classes=num_classes, + pool_size=[ + [ + DATA_NUM_FRAMES // pool_size[0][0], + DATA_CROP_SIZE // 32 // pool_size[0][1], + DATA_CROP_SIZE // 32 // pool_size[0][2], + ] + ], + dropout_rate=dropout_rate, + feature_position=feature_position, + final_bottleneck_dim=final_bottleneck_dim + ) + + def forward(self, x, logits_only=False): + x = [x] + x = self.s1(x) + x = self.s2(x) + for pathway in range(self.num_pathways): + pool = getattr(self, "pathway{}_pool".format(pathway)) + x[pathway] = pool(x[pathway]) + x = self.s3(x) + x = self.s4(x) + x = self.s5(x) + x, h = self.head(x) + + if logits_only: + return x + else: + return x, h diff --git a/models/action_models/__init__.py b/models/action_models/__init__.py new file mode 100644 index 0000000..96f6b08 --- /dev/null +++ b/models/action_models/__init__.py @@ -0,0 +1,3 @@ +"""Kinetics model implementations. +Original codes: https://github.com/facebookresearch/SlowFast +""" diff --git a/models/action_models/head_helper.py b/models/action_models/head_helper.py new file mode 100644 index 0000000..24b7184 --- /dev/null +++ b/models/action_models/head_helper.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""ResNe(X)t Head helper.""" + +import torch +import torch.nn as nn + + +class ResNetBasicHead(nn.Module): + """ + ResNe(X)t 3D head. + This layer performs a fully-connected projection during training, when the + input size is 1x1x1. It performs a convolutional projection during testing + when the input size is larger than 1x1x1. If the inputs are from multiple + different pathways, the inputs will be concatenated after pooling. + """ + + def __init__( + self, + dim_in, + num_classes, + pool_size, + dropout_rate=0.0, + feature_position='post', + act_func="softmax", + final_bottleneck_dim=None + ): + """ + The `__init__` method of any subclass should also contain these + arguments. + ResNetBasicHead takes p pathways as input where p in [1, infty]. + + Args: + dim_in (list): the list of channel dimensions of the p inputs to the + ResNetHead. + num_classes (int): the channel dimensions of the p outputs to the + ResNetHead. + pool_size (list): the list of kernel sizes of p spatial temporal + poolings, temporal pool kernel size, spatial pool kernel size, + spatial pool kernel size in order. + dropout_rate (float): dropout rate. If equal to 0.0, perform no + dropout. + act_func (string): activation function to use. 'softmax': applies + softmax on the output. 'sigmoid': applies sigmoid on the output. + """ + super(ResNetBasicHead, self).__init__() + assert ( + len({len(pool_size), len(dim_in)}) == 1 + ), "pathway dimensions are not consistent." + self.num_pathways = len(pool_size) + + for pathway in range(self.num_pathways): + avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1) + self.add_module("pathway{}_avgpool".format(pathway), avg_pool) + + if dropout_rate > 0.0: + self.dropout = nn.Dropout(dropout_rate) + + # setting final bottleneck after GAP (e.g., 2048 -> final_bottleck_dim -> num_classes) + if final_bottleneck_dim: + self.final_bottleneck_dim = final_bottleneck_dim + self.final_bottleneck = nn.Conv3d(sum(dim_in), final_bottleneck_dim, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.final_bottleneck_bn = nn.BatchNorm3d(final_bottleneck_dim, + eps=1e-5, + momentum=0.1) + self.final_bottleneck_act = nn.ReLU(inplace=True) + dim_in = final_bottleneck_dim + else: + self.final_bottleneck_dim = None + dim_in = sum(dim_in) + + + # Perform FC in a fully convolutional manner. The FC layer will be + # initialized with a different std comparing to convolutional layers. + self.projection = nn.Linear(dim_in, num_classes, bias=True) + + self.feature_position = feature_position + + # Softmax for evaluation and testing. + if act_func == "softmax": + self.act = nn.Softmax(dim=4) + elif act_func == "sigmoid": + self.act = nn.Sigmoid() + else: + raise NotImplementedError( + "{} is not supported as an activation" + "function.".format(act_func) + ) + + def forward(self, inputs): + assert ( + len(inputs) == self.num_pathways + ), "Input tensor does not contain {} pathway".format(self.num_pathways) + pool_out = [] + + # Perform final bottleneck + if self.final_bottleneck_dim: + for pathway in range(self.num_pathways): + inputs[pathway] = self.final_bottleneck(inputs[pathway]) + inputs[pathway] = self.final_bottleneck_bn(inputs[pathway]) + inputs[pathway] = self.final_bottleneck_act(inputs[pathway]) + + for pathway in range(self.num_pathways): + m = getattr(self, "pathway{}_avgpool".format(pathway)) + pool_out.append(m(inputs[pathway])) + + + h = torch.cat(pool_out, 1) + # (N, C, T, H, W) -> (N, T, H, W, C). + x = h.permute((0, 2, 3, 4, 1)) + + # Perform dropout. + if hasattr(self, "dropout"): + x = self.dropout(x) + + if self.feature_position == 'final_bottleneck': + h = x.mean([1, 2, 3]) + h = h.view(h.shape[0], -1) + + x = self.projection(x) + if self.feature_position == 'logit': + h = x + + # Performs fully convlutional inference. + if not self.training: + x = self.act(x) + x = x.mean([1, 2, 3]) + + x = x.view(x.shape[0], -1) + return x, h diff --git a/models/action_models/nonlocal_helper.py b/models/action_models/nonlocal_helper.py new file mode 100644 index 0000000..8c1b5ed --- /dev/null +++ b/models/action_models/nonlocal_helper.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Non-local helper""" + +import torch +import torch.nn as nn + + +class Nonlocal(nn.Module): + """ + Builds Non-local Neural Networks as a generic family of building + blocks for capturing long-range dependencies. Non-local Network + computes the response at a position as a weighted sum of the + features at all positions. This building block can be plugged into + many computer vision architectures. + More details in the paper: https://arxiv.org/pdf/1711.07971.pdf + """ + + def __init__( + self, + dim, + dim_inner, + pool_size=None, + instantiation="softmax", + norm_type="batchnorm", + zero_init_final_conv=False, + zero_init_final_norm=True, + norm_eps=1e-5, + norm_momentum=0.1, + ): + """ + Args: + dim (int): number of dimension for the input. + dim_inner (int): number of dimension inside of the Non-local block. + pool_size (list): the kernel size of spatial temporal pooling, + temporal pool kernel size, spatial pool kernel size, spatial + pool kernel size in order. By default pool_size is None, + then there would be no pooling used. + instantiation (string): supports two different instantiation method: + "dot_product": normalizing correlation matrix with L2. + "softmax": normalizing correlation matrix with Softmax. + norm_type (string): support BatchNorm and LayerNorm for + normalization. + "batchnorm": using BatchNorm for normalization. + "layernorm": using LayerNorm for normalization. + "none": not using any normalization. + zero_init_final_conv (bool): If true, zero initializing the final + convolution of the Non-local block. + zero_init_final_norm (bool): + If true, zero initializing the final batch norm of the Non-local + block. + """ + super(Nonlocal, self).__init__() + self.dim = dim + self.dim_inner = dim_inner + self.pool_size = pool_size + self.instantiation = instantiation + self.norm_type = norm_type + self.use_pool = ( + False + if pool_size is None + else any((size > 1 for size in pool_size)) + ) + self.norm_eps = norm_eps + self.norm_momentum = norm_momentum + self._construct_nonlocal(zero_init_final_conv, zero_init_final_norm) + + def _construct_nonlocal(self, zero_init_final_conv, zero_init_final_norm): + # Three convolution heads: theta, phi, and g. + self.conv_theta = nn.Conv3d( + self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0 + ) + self.conv_phi = nn.Conv3d( + self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0 + ) + self.conv_g = nn.Conv3d( + self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0 + ) + + # Final convolution output. + self.conv_out = nn.Conv3d( + self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0 + ) + # Zero initializing the final convolution output. + self.conv_out.zero_init = zero_init_final_conv + + if self.norm_type == "batchnorm": + self.bn = nn.BatchNorm3d( + self.dim, eps=self.norm_eps, momentum=self.norm_momentum + ) + # Zero initializing the final bn. + self.bn.transform_final_bn = zero_init_final_norm + elif self.norm_type == "layernorm": + # In Caffe2 the LayerNorm op does not contain the scale an bias + # terms described in the paper: + # https://caffe2.ai/docs/operators-catalogue.html#layernorm + # Builds LayerNorm as GroupNorm with one single group. + # Setting Affine to false to align with Caffe2. + self.ln = nn.GroupNorm(1, self.dim, eps=self.norm_eps, affine=False) + elif self.norm_type == "none": + # Does not use any norm. + pass + else: + raise NotImplementedError( + "Norm type {} is not supported".format(self.norm_type) + ) + + # Optional to add the spatial-temporal pooling. + if self.use_pool: + self.pool = nn.MaxPool3d( + kernel_size=self.pool_size, + stride=self.pool_size, + padding=[0, 0, 0], + ) + + def forward(self, x): + x_identity = x + N, C, T, H, W = x.size() + + theta = self.conv_theta(x) + + # Perform temporal-spatial pooling to reduce the computation. + if self.use_pool: + x = self.pool(x) + + phi = self.conv_phi(x) + g = self.conv_g(x) + + theta = theta.view(N, self.dim_inner, -1) + phi = phi.view(N, self.dim_inner, -1) + g = g.view(N, self.dim_inner, -1) + + # (N, C, TxHxW) * (N, C, TxHxW) => (N, TxHxW, TxHxW). + theta_phi = torch.einsum("nct,ncp->ntp", (theta, phi)) + # For original Non-local paper, there are two main ways to normalize + # the affinity tensor: + # 1) Softmax normalization (norm on exp). + # 2) dot_product normalization. + if self.instantiation == "softmax": + # Normalizing the affinity tensor theta_phi before softmax. + theta_phi = theta_phi * (self.dim_inner ** -0.5) + theta_phi = nn.functional.softmax(theta_phi, dim=2) + elif self.instantiation == "dot_product": + spatial_temporal_dim = theta_phi.shape[2] + theta_phi = theta_phi / spatial_temporal_dim + else: + raise NotImplementedError( + "Unknown norm type {}".format(self.instantiation) + ) + + # (N, TxHxW, TxHxW) * (N, C, TxHxW) => (N, C, TxHxW). + theta_phi_g = torch.einsum("ntg,ncg->nct", (theta_phi, g)) + + # (N, C, TxHxW) => (N, C, T, H, W). + theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W) + + p = self.conv_out(theta_phi_g) + if self.norm_type == "batchnorm": + p = self.bn(p) + elif self.norm_type == "layernorm": + p = self.ln(p) + return x_identity + p diff --git a/models/action_models/resnet_helper.py b/models/action_models/resnet_helper.py new file mode 100644 index 0000000..8705017 --- /dev/null +++ b/models/action_models/resnet_helper.py @@ -0,0 +1,507 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Video models.""" + +import torch.nn as nn + +from .nonlocal_helper import Nonlocal + + +def get_trans_func(name): + """ + Retrieves the transformation module by name. + """ + trans_funcs = { + "bottleneck_transform": BottleneckTransform, + "basic_transform": BasicTransform, + } + assert ( + name in trans_funcs.keys() + ), "Transformation function '{}' not supported".format(name) + return trans_funcs[name] + + +class BasicTransform(nn.Module): + """ + Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel. + """ + + def __init__( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + dim_inner=None, + num_groups=1, + stride_1x1=None, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + ): + """ + Args: + dim_in (int): the channel dimensions of the input. + dim_out (int): the channel dimension of the output. + temp_kernel_size (int): the temporal kernel sizes of the middle + convolution in the bottleneck. + stride (int): the stride of the bottleneck. + dim_inner (None): the inner dimension would not be used in + BasicTransform. + num_groups (int): number of groups for the convolution. Number of + group is always 1 for BasicTransform. + stride_1x1 (None): stride_1x1 will not be used in BasicTransform. + inplace_relu (bool): if True, calculate the relu on the original + input without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + """ + super(BasicTransform, self).__init__() + self.temp_kernel_size = temp_kernel_size + self._inplace_relu = inplace_relu + self._eps = eps + self._bn_mmt = bn_mmt + self._construct(dim_in, dim_out, stride) + + def _construct(self, dim_in, dim_out, stride): + # Tx3x3, BN, ReLU. + self.a = nn.Conv3d( + dim_in, + dim_out, + kernel_size=[self.temp_kernel_size, 3, 3], + stride=[1, stride, stride], + padding=[int(self.temp_kernel_size // 2), 1, 1], + bias=False, + ) + self.a_bn = nn.BatchNorm3d( + dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.a_relu = nn.ReLU(inplace=self._inplace_relu) + # 1x3x3, BN. + self.b = nn.Conv3d( + dim_out, + dim_out, + kernel_size=[1, 3, 3], + stride=[1, 1, 1], + padding=[0, 1, 1], + bias=False, + ) + self.b_bn = nn.BatchNorm3d( + dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.b_bn.transform_final_bn = True + + def forward(self, x): + x = self.a(x) + x = self.a_bn(x) + x = self.a_relu(x) + + x = self.b(x) + x = self.b_bn(x) + return x + + +class BottleneckTransform(nn.Module): + """ + Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of + temporal kernel. + """ + + def __init__( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + dim_inner, + num_groups, + stride_1x1=False, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + ): + """ + Args: + dim_in (int): the channel dimensions of the input. + dim_out (int): the channel dimension of the output. + temp_kernel_size (int): the temporal kernel sizes of the middle + convolution in the bottleneck. + stride (int): the stride of the bottleneck. + dim_inner (int): the inner dimension of the block. + num_groups (int): number of groups for the convolution. num_groups=1 + is for standard ResNet like networks, and num_groups>1 is for + ResNeXt like networks. + stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise + apply stride to the 3x3 conv. + inplace_relu (bool): if True, calculate the relu on the original + input without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + """ + super(BottleneckTransform, self).__init__() + self.temp_kernel_size = temp_kernel_size + self._inplace_relu = inplace_relu + self._eps = eps + self._bn_mmt = bn_mmt + self._stride_1x1 = stride_1x1 + self._construct(dim_in, dim_out, stride, dim_inner, num_groups) + + def _construct(self, dim_in, dim_out, stride, dim_inner, num_groups): + (str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride) + + # Tx1x1, BN, ReLU. + self.a = nn.Conv3d( + dim_in, + dim_inner, + kernel_size=[self.temp_kernel_size, 1, 1], + stride=[1, str1x1, str1x1], + padding=[int(self.temp_kernel_size // 2), 0, 0], + bias=False, + ) + self.a_bn = nn.BatchNorm3d( + dim_inner, eps=self._eps, momentum=self._bn_mmt + ) + self.a_relu = nn.ReLU(inplace=self._inplace_relu) + + # 1x3x3, BN, ReLU. + self.b = nn.Conv3d( + dim_inner, + dim_inner, + [1, 3, 3], + stride=[1, str3x3, str3x3], + padding=[0, 1, 1], + groups=num_groups, + bias=False, + ) + self.b_bn = nn.BatchNorm3d( + dim_inner, eps=self._eps, momentum=self._bn_mmt + ) + self.b_relu = nn.ReLU(inplace=self._inplace_relu) + + # 1x1x1, BN. + self.c = nn.Conv3d( + dim_inner, + dim_out, + kernel_size=[1, 1, 1], + stride=[1, 1, 1], + padding=[0, 0, 0], + bias=False, + ) + self.c_bn = nn.BatchNorm3d( + dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.c_bn.transform_final_bn = True + + def forward(self, x): + # Explicitly forward every layer. + # Branch2a. + x = self.a(x) + x = self.a_bn(x) + x = self.a_relu(x) + + # Branch2b. + x = self.b(x) + x = self.b_bn(x) + x = self.b_relu(x) + + # Branch2c + x = self.c(x) + x = self.c_bn(x) + return x + + +class ResBlock(nn.Module): + """ + Residual block. + """ + + def __init__( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + trans_func, + dim_inner, + num_groups=1, + stride_1x1=False, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + ): + """ + ResBlock class constructs redisual blocks. More details can be found in: + Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. + "Deep residual learning for image recognition." + https://arxiv.org/abs/1512.03385 + Args: + dim_in (int): the channel dimensions of the input. + dim_out (int): the channel dimension of the output. + temp_kernel_size (int): the temporal kernel sizes of the middle + convolution in the bottleneck. + stride (int): the stride of the bottleneck. + trans_func (string): transform function to be used to construct the + bottleneck. + dim_inner (int): the inner dimension of the block. + num_groups (int): number of groups for the convolution. num_groups=1 + is for standard ResNet like networks, and num_groups>1 is for + ResNeXt like networks. + stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise + apply stride to the 3x3 conv. + inplace_relu (bool): calculate the relu on the original input + without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + """ + super(ResBlock, self).__init__() + self._inplace_relu = inplace_relu + self._eps = eps + self._bn_mmt = bn_mmt + self._construct( + dim_in, + dim_out, + temp_kernel_size, + stride, + trans_func, + dim_inner, + num_groups, + stride_1x1, + inplace_relu, + ) + + def _construct( + self, + dim_in, + dim_out, + temp_kernel_size, + stride, + trans_func, + dim_inner, + num_groups, + stride_1x1, + inplace_relu, + ): + # Use skip connection with projection if dim or res change. + if (dim_in != dim_out) or (stride != 1): + self.branch1 = nn.Conv3d( + dim_in, + dim_out, + kernel_size=1, + stride=[1, stride, stride], + padding=0, + bias=False, + ) + self.branch1_bn = nn.BatchNorm3d( + dim_out, eps=self._eps, momentum=self._bn_mmt + ) + self.branch2 = trans_func( + dim_in, + dim_out, + temp_kernel_size, + stride, + dim_inner, + num_groups, + stride_1x1=stride_1x1, + inplace_relu=inplace_relu, + ) + self.relu = nn.ReLU(self._inplace_relu) + + def forward(self, x): + if hasattr(self, "branch1"): + x = self.branch1_bn(self.branch1(x)) + self.branch2(x) + else: + x = x + self.branch2(x) + x = self.relu(x) + return x + + +class ResStage(nn.Module): + """ + Stage of 3D ResNet. It expects to have one or more tensors as input for + single pathway (C2D, I3D, SlowOnly), and multi-pathway (SlowFast) cases. + More details can be found here: + + Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. + "Slowfast networks for video recognition." + https://arxiv.org/pdf/1812.03982.pdf + """ + + def __init__( + self, + dim_in, + dim_out, + stride, + temp_kernel_sizes, + num_blocks, + dim_inner, + num_groups, + num_block_temp_kernel, + nonlocal_inds, + nonlocal_group, + instantiation="softmax", + trans_func_name="bottleneck_transform", + stride_1x1=False, + inplace_relu=True, + ): + """ + The `__init__` method of any subclass should also contain these arguments. + ResStage builds p streams, where p can be greater or equal to one. + Args: + dim_in (list): list of p the channel dimensions of the input. + Different channel dimensions control the input dimension of + different pathways. + dim_out (list): list of p the channel dimensions of the output. + Different channel dimensions control the input dimension of + different pathways. + temp_kernel_sizes (list): list of the p temporal kernel sizes of the + convolution in the bottleneck. Different temp_kernel_sizes + control different pathway. + stride (list): list of the p strides of the bottleneck. Different + stride control different pathway. + num_blocks (list): list of p numbers of blocks for each of the + pathway. + dim_inner (list): list of the p inner channel dimensions of the + input. Different channel dimensions control the input dimension + of different pathways. + num_groups (list): list of number of p groups for the convolution. + num_groups=1 is for standard ResNet like networks, and + num_groups>1 is for ResNeXt like networks. + num_block_temp_kernel (list): extent the temp_kernel_sizes to + num_block_temp_kernel blocks, then fill temporal kernel size + of 1 for the rest of the layers. + nonlocal_inds (list): If the tuple is empty, no nonlocal layer will + be added. If the tuple is not empty, add nonlocal layers after + the index-th block. + nonlocal_group (list): list of number of p nonlocal groups. Each + number controls how to fold temporal dimension to batch + dimension before applying nonlocal transformation. + https://github.com/facebookresearch/video-nonlocal-net. + instantiation (string): different instantiation for nonlocal layer. + Supports two different instantiation method: + "dot_product": normalizing correlation matrix with L2. + "softmax": normalizing correlation matrix with Softmax. + trans_func_name (string): name of the the transformation function apply + on the network. + """ + super(ResStage, self).__init__() + assert all( + ( + num_block_temp_kernel[i] <= num_blocks[i] + for i in range(len(temp_kernel_sizes)) + ) + ) + self.num_blocks = num_blocks + self.nonlocal_group = nonlocal_group + self.temp_kernel_sizes = [ + (temp_kernel_sizes[i] * num_blocks[i])[: num_block_temp_kernel[i]] + + [1] * (num_blocks[i] - num_block_temp_kernel[i]) + for i in range(len(temp_kernel_sizes)) + ] + assert ( + len( + { + len(dim_in), + len(dim_out), + len(temp_kernel_sizes), + len(stride), + len(num_blocks), + len(dim_inner), + len(num_groups), + len(num_block_temp_kernel), + len(nonlocal_inds), + len(nonlocal_group), + } + ) + == 1 + ) + self.num_pathways = len(self.num_blocks) + self._construct( + dim_in, + dim_out, + stride, + dim_inner, + num_groups, + trans_func_name, + stride_1x1, + inplace_relu, + nonlocal_inds, + instantiation, + ) + + def _construct( + self, + dim_in, + dim_out, + stride, + dim_inner, + num_groups, + trans_func_name, + stride_1x1, + inplace_relu, + nonlocal_inds, + instantiation, + ): + for pathway in range(self.num_pathways): + for i in range(self.num_blocks[pathway]): + # Retrieve the transformation function. + trans_func = get_trans_func(trans_func_name) + # Construct the block. + res_block = ResBlock( + dim_in[pathway] if i == 0 else dim_out[pathway], + dim_out[pathway], + self.temp_kernel_sizes[pathway][i], + stride[pathway] if i == 0 else 1, + trans_func, + dim_inner[pathway], + num_groups[pathway], + stride_1x1=stride_1x1, + inplace_relu=inplace_relu, + ) + self.add_module("pathway{}_res{}".format(pathway, i), res_block) + if i in nonlocal_inds[pathway]: + nln = Nonlocal( + dim_out[pathway], + dim_out[pathway] // 2, + [1, 2, 2], + instantiation=instantiation, + ) + self.add_module( + "pathway{}_nonlocal{}".format(pathway, i), nln + ) + + def forward(self, inputs): + output = [] + for pathway in range(self.num_pathways): + x = inputs[pathway] + for i in range(self.num_blocks[pathway]): + m = getattr(self, "pathway{}_res{}".format(pathway, i)) + x = m(x) + if hasattr(self, "pathway{}_nonlocal{}".format(pathway, i)): + nln = getattr( + self, "pathway{}_nonlocal{}".format(pathway, i) + ) + b, c, t, h, w = x.shape + if self.nonlocal_group[pathway] > 1: + # Fold temporal dimension into batch dimension. + x = x.permute(0, 2, 1, 3, 4) + x = x.reshape( + b * self.nonlocal_group[pathway], + t // self.nonlocal_group[pathway], + c, + h, + w, + ) + x = x.permute(0, 2, 1, 3, 4) + x = nln(x) + if self.nonlocal_group[pathway] > 1: + # Fold back to temporal dimension. + x = x.permute(0, 2, 1, 3, 4) + x = x.reshape(b, t, c, h, w) + x = x.permute(0, 2, 1, 3, 4) + output.append(x) + + return output diff --git a/models/action_models/stem_helper.py b/models/action_models/stem_helper.py new file mode 100644 index 0000000..de2a6d1 --- /dev/null +++ b/models/action_models/stem_helper.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""ResNe(X)t 3D stem helper.""" + +import torch.nn as nn + + +class VideoModelStem(nn.Module): + """ + Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool + on input data tensor for one or multiple pathways. + """ + + def __init__( + self, + dim_in, + dim_out, + kernel, + stride, + padding, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + ): + """ + The `__init__` method of any subclass should also contain these + arguments. List size of 1 for single pathway models (C2D, I3D, SlowOnly + and etc), list size of 2 for two pathway models (SlowFast). + + Args: + dim_in (list): the list of channel dimensions of the inputs. + dim_out (list): the output dimension of the convolution in the stem + layer. + kernel (list): the kernels' size of the convolutions in the stem + layers. Temporal kernel size, height kernel size, width kernel + size in order. + stride (list): the stride sizes of the convolutions in the stem + layer. Temporal kernel stride, height kernel size, width kernel + size in order. + padding (list): the paddings' sizes of the convolutions in the stem + layer. Temporal padding size, height padding size, width padding + size in order. + inplace_relu (bool): calculate the relu on the original input + without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + """ + super(VideoModelStem, self).__init__() + + assert ( + len( + { + len(dim_in), + len(dim_out), + len(kernel), + len(stride), + len(padding), + } + ) + == 1 + ), "Input pathway dimensions are not consistent." + self.num_pathways = len(dim_in) + self.kernel = kernel + self.stride = stride + self.padding = padding + self.inplace_relu = inplace_relu + self.eps = eps + self.bn_mmt = bn_mmt + + # Construct the stem layer. + self._construct_stem(dim_in, dim_out) + + def _construct_stem(self, dim_in, dim_out): + for pathway in range(len(dim_in)): + stem = ResNetBasicStem( + dim_in[pathway], + dim_out[pathway], + self.kernel[pathway], + self.stride[pathway], + self.padding[pathway], + self.inplace_relu, + self.eps, + self.bn_mmt, + ) + self.add_module("pathway{}_stem".format(pathway), stem) + + def forward(self, x): + assert ( + len(x) == self.num_pathways + ), "Input tensor does not contain {} pathway".format(self.num_pathways) + for pathway in range(len(x)): + m = getattr(self, "pathway{}_stem".format(pathway)) + x[pathway] = m(x[pathway]) + return x + + +class ResNetBasicStem(nn.Module): + """ + ResNe(X)t 3D stem module. + Performs spatiotemporal Convolution, BN, and Relu following by a + spatiotemporal pooling. + """ + + def __init__( + self, + dim_in, + dim_out, + kernel, + stride, + padding, + inplace_relu=True, + eps=1e-5, + bn_mmt=0.1, + ): + """ + The `__init__` method of any subclass should also contain these arguments. + + Args: + dim_in (int): the channel dimension of the input. Normally 3 is used + for rgb input, and 2 or 3 is used for optical flow input. + dim_out (int): the output dimension of the convolution in the stem + layer. + kernel (list): the kernel size of the convolution in the stem layer. + temporal kernel size, height kernel size, width kernel size in + order. + stride (list): the stride size of the convolution in the stem layer. + temporal kernel stride, height kernel size, width kernel size in + order. + padding (int): the padding size of the convolution in the stem + layer, temporal padding size, height padding size, width + padding size in order. + inplace_relu (bool): calculate the relu on the original input + without allocating new memory. + eps (float): epsilon for batch norm. + bn_mmt (float): momentum for batch norm. Noted that BN momentum in + PyTorch = 1 - BN momentum in Caffe2. + """ + super(ResNetBasicStem, self).__init__() + self.kernel = kernel + self.stride = stride + self.padding = padding + self.inplace_relu = inplace_relu + self.eps = eps + self.bn_mmt = bn_mmt + + # Construct the stem layer. + self._construct_stem(dim_in, dim_out) + + def _construct_stem(self, dim_in, dim_out): + self.conv = nn.Conv3d( + dim_in, + dim_out, + self.kernel, + stride=self.stride, + padding=self.padding, + bias=False, + ) + self.bn = nn.BatchNorm3d(dim_out, eps=self.eps, momentum=self.bn_mmt) + self.relu = nn.ReLU(self.inplace_relu) + self.pool_layer = nn.MaxPool3d( + kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1] + ) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + x = self.pool_layer(x) + return x diff --git a/models/action_models/weight_init_helper.py b/models/action_models/weight_init_helper.py new file mode 100644 index 0000000..ff7e7bb --- /dev/null +++ b/models/action_models/weight_init_helper.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +"""Utility function for weight initialization""" + +import torch.nn as nn +from fvcore.nn.weight_init import c2_msra_fill + + +def init_weights(model, fc_init_std=0.01, zero_init_final_bn=True): + """ + Performs ResNet style weight initialization. + Args: + fc_init_std (float): the expected standard deviation for fc layer. + zero_init_final_bn (bool): if True, zero initialize the final bn for + every bottleneck. + """ + for m in model.modules(): + if isinstance(m, nn.Conv3d): + """ + Follow the initialization method proposed in: + {He, Kaiming, et al. + "Delving deep into rectifiers: Surpassing human-level + performance on imagenet classification." + arXiv preprint arXiv:1502.01852 (2015)} + """ + c2_msra_fill(m) + elif isinstance(m, nn.BatchNorm3d): + if ( + hasattr(m, "transform_final_bn") + and m.transform_final_bn + and zero_init_final_bn + ): + batchnorm_weight = 0.0 + else: + batchnorm_weight = 1.0 + m.weight.data.fill_(batchnorm_weight) + m.bias.data.zero_() + if isinstance(m, nn.Linear): + m.weight.data.normal_(mean=0.0, std=fc_init_std) + m.bias.data.zero_() diff --git a/models/imagenet_models.py b/models/imagenet_models.py new file mode 100644 index 0000000..e60be01 --- /dev/null +++ b/models/imagenet_models.py @@ -0,0 +1,385 @@ +"""ResNet and BagNet implementations. +original codes +- https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py +- https://github.com/wielandbrendel/bag-of-local-features-models/blob/master/bagnets/pytorchnet.py +""" +import torch +import torch.nn as nn +import math +from torch.utils.model_zoo import load_url as load_state_dict_from_url + +MODEL_URLS = { + 'bagnet9': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet8-34f4ccd2.pth.tar', + 'bagnet17': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet16-105524de.pth.tar', + 'bagnet33': 'https://bitbucket.org/wielandbrendel/bag-of-feature-pretrained-models/raw/249e8fa82c0913623a807d9d35eeab9da7dcc2a8/bagnet32-2ddd53ed.pth.tar', + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +class BasicBlock_(nn.Module): + expansion = 1 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1): + super(BasicBlock_, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + if identity.size(-1) != out.size(-1): + diff = identity.size(-1) - out.size(-1) + identity = identity[:, :, :-diff, :-diff] + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck_(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, kernel_size=1): + super(Bottleneck_, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=stride, + padding=0, bias=False) # changed padding from (kernel_size - 1) // 2 + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + if residual.size(-1) != out.size(-1): + diff = residual.size(-1) - out.size(-1) + residual = residual[:, :, :-diff, :-diff] + + out += residual + out = self.relu(out) + + return out + + +class BagNetDeep(nn.Module): + def __init__(self, block, layers, strides=[2, 2, 2, 1], kernel3=[0, 0, 0, 0], num_classes=1000, + feature_pos='post', avg_pool=True): + super(BagNetDeep, self).__init__() + self.inplanes = 64 + self.feature_pos = feature_pos + self.conv1 = nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0, bias=False) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0, bias=False) + self.bn1 = nn.BatchNorm2d(64, momentum=0.001) + self.relu = nn.ReLU(inplace=True) + self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], kernel3=kernel3[0], prefix='layer1') + self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], kernel3=kernel3[1], prefix='layer2') + self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], kernel3=kernel3[2], prefix='layer3') + self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], kernel3=kernel3[3], prefix='layer4') + self.avgpool = nn.AvgPool2d(1, stride=1) + self.fc = nn.Linear(512 * block.expansion, num_classes) + self.avg_pool = avg_pool + self.block = block + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1, kernel3=0, prefix=''): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + kernel = 1 if kernel3 == 0 else 3 + layers.append(block(self.inplanes, planes, stride, downsample, kernel_size=kernel)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + kernel = 1 if kernel3 <= i else 3 + layers.append(block(self.inplanes, planes, kernel_size=kernel)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x_ = nn.AvgPool2d(x.size()[2], stride=1)(x) + x = x_.view(x_.size(0), -1) + x = self.fc(x) + + return x, x_ + + +def bagnet18(feature_pos='post', num_classes=1000, rf=43): + model = BagNetDeep(BasicBlock_, [2, 2, 2, 2], strides=[2, 2, 2, 1], kernel3=[1, 0, 0, 0], + num_classes=num_classes, feature_pos=feature_pos) + return model + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + def __init__(self, block, layers, num_classes=1000, feature_pos='post', zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None, rf=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.feature_pos = feature_pos + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x_ = self.avgpool(x) + x = torch.flatten(x_, 1) + x = self.fc(x) + + return x, x_ + + +def _resnet(arch, block, layers, pretrained, progress, rf, num_classes, feature_pos, **kwargs): + model = ResNet(block, layers, rf=rf, num_classes=num_classes, feature_pos=feature_pos, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(MODEL_URLS[arch], + progress=progress) + model.load_state_dict(state_dict, strict=False) + return model + + +def resnet18(feature_pos='post', num_classes=1000, rf=43, pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_ + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, rf, num_classes, feature_pos, + **kwargs) diff --git a/models/mnist_models.py b/models/mnist_models.py new file mode 100644 index 0000000..546fc23 --- /dev/null +++ b/models/mnist_models.py @@ -0,0 +1,60 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Implementation for simple statcked convolutional networks. +""" +import torch +import torch.nn as nn + + +class SimpleConvNet(nn.Module): + def __init__(self, num_classes=None, kernel_size=7, feature_pos='post'): + super(SimpleConvNet, self).__init__() + padding = kernel_size // 2 + layers = [ + nn.Conv2d(3, 16, kernel_size=kernel_size, padding=padding), + nn.BatchNorm2d(16), + nn.ReLU(inplace=True), + nn.Conv2d(16, 32, kernel_size=kernel_size, padding=padding), + nn.BatchNorm2d(32), + nn.ReLU(inplace=True), + nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + ] + self.extracter = nn.Sequential(*layers) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(128, 10) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if feature_pos not in ['pre', 'post', 'logits']: + raise ValueError(feature_pos) + + self.feature_pos = feature_pos + + def forward(self, x, logits_only=False): + pre_gap_feats = self.extracter(x) + post_gap_feats = self.avgpool(pre_gap_feats) + post_gap_feats = torch.flatten(post_gap_feats, 1) + logits = self.fc(post_gap_feats) + + if logits_only: + return logits + + elif self.feature_pos == 'pre': + feats = pre_gap_feats + elif self.feature_pos == 'post': + feats = post_gap_feats + else: + feats = logits + return logits, feats diff --git a/models/rebias_models.py b/models/rebias_models.py new file mode 100644 index 0000000..7a7bf54 --- /dev/null +++ b/models/rebias_models.py @@ -0,0 +1,65 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +ReBias model wrapper. +""" +import torch.nn as nn + + +class ReBiasModels(object): + """A container for the target network and the intentionally biased network. + """ + def __init__(self, f_net, g_nets): + self.f_net = f_net + self.g_nets = g_nets + + def to(self, device): + self.f_net.to(device) + for g_net in self.g_nets: + g_net.to(device) + + def to_parallel(self, device): + self.f_net = nn.DataParallel(self.f_net.to(device)) + for i, g_net in enumerate(self.g_nets): + self.g_nets[i] = nn.DataParallel(g_net.to(device)) + + def load_models(self, state_dict): + self.f_net.load_state_dict(state_dict['f_net']) + for g_net, _state_dict in zip(self.g_nets, state_dict['g_nets']): + g_net.load_state_dict(_state_dict) + + def train_f(self): + self.f_net.train() + + def eval_f(self): + self.f_net.eval() + + def train_g(self): + for g_net in self.g_nets: + g_net.train() + + def eval_g(self): + for g_net in self.g_nets: + g_net.eval() + + def train(self): + self.train_f() + self.train_g() + + def eval(self): + self.eval_f() + self.eval_g() + + def forward(self, x): + f_pred, f_feat = self.f_net(x) + g_preds, g_feats = [], [] + for g_net in self.g_nets: + _g_pred, _g_feat = g_net(x) + g_preds.append(_g_pred) + g_feats.append(_g_feat) + + return f_pred, g_preds, f_feat, g_feats + + def __call__(self, x): + return self.forward(x) diff --git a/optims/__init__.py b/optims/__init__.py new file mode 100644 index 0000000..8c0bf5e --- /dev/null +++ b/optims/__init__.py @@ -0,0 +1,37 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Opitmizers for the training. +""" +from torch.optim import Adam +from torch.optim.lr_scheduler import StepLR, CosineAnnealingLR + +from adamp import AdamP + + +__optim__ = ['Adam', 'AdamP'] +__scheduler__ = ['StepLR', 'CosineAnnealingLR'] + +__all__ = ['Adam', 'AdamP', 'StepLR', 'CosineAnnealingLR', 'get_optim', 'get_scheduler'] + + +def get_optim(params, optim_name, optim_config=None): + if optim_name not in __optim__: + raise KeyError(optim_name) + + optim = globals()[optim_name] + if not optim_config: + optim_config = {'lr': 1e-2, 'weight_decay': 1e-4} + return optim(params, **optim_config) + + +def get_scheduler(optimizer, scheduler_name, scheduler_config=None): + if scheduler_name not in __scheduler__: + raise KeyError(scheduler_name) + + scheduler = globals()[scheduler_name] + + if not scheduler_config: + scheduler_config = {} + return scheduler(optimizer, **scheduler_config) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..af96283 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +fire +munch +torch==1.1.0 +torchvision==0.2.2.post3 +adamp diff --git a/resources/Table1.png b/resources/Table1.png new file mode 100644 index 0000000000000000000000000000000000000000..c8cbd976415280e4f1cb1fbd8432b12884ecb348 GIT binary patch literal 62858 zcmaI8by!tf*FH?M32EtWq`NoWjY>$DfOLm+H%NDf9swyy>FyBe?(WWS?Q@=Uj=%4D zuGc@jV6oSlbB;Cg9{1Scs>(7ikO`5YprBsJ$x5n2LBaC?Kd*rhfd8GFQ;VUXpuH?5 zBvj=jB*;}A?cQ5jn?XU*e=;#Nd?m+3-)CfGXxKN*$bjtVrXCg+rEb{T**4tPN#14H zO`e^qtGj}avC`>>?bX!N2FhpM4;Yd^fAIKO{N3vbm*-oWqXvyY{iQ#&oBRg{4p>+! z#JH?X^~_8Z(`~3(fqr-?8qq6Ma-F&#g-~5!Xwyg>XcCa=GIZrniUV|VSFF_GE*K4L ztPNCC1*lmVIgDC_uq-qq9lx1=f^Y`WTEsq>^(I?NGta9 z^KHpjg{3c9k;p}WLz{=|8=Ema2$ztbd&dScu&(F%n!OFqG419vw z$-~y!$er2NiR!P1{QVqBGba;AOM7QaJ6m$dbB&DcT%3g|DIp8}^Y5>Hnz>v4yOOQb ze~twlkQMS5R(2LP)_jbO;X6NMO68iIo|N7IvOa6AN z_P@7saPa(o=Wl=c&z(Z7kQ4lNqQAuT=T$&nU}PcIe`F6vmc#z|9tuhnN={PjjXU%~ z7Ggb))Ya7>_jaZjI|afJ64ef&Y6^E`Dpa)<4CbS5^%~hrmlsxT-9N(k{V{Q>@!2Wf zcLnipUo0*=?9bJuW$zyCrtPMAjGiwDaxT7HmM>Wnc*{L}%)CY|E(!ucQ;@?2`C(wd z{MRr30^%#IE{Pg6Z~w1PK~orl!e-t`{f`Iy`{>*w42()`H>qMclz%V&kHw8&yZn!< z|L)JfQ9`uq{o>0sET(@i{*NbyDgF8%SARR?9vU=ehKr`De8q1U|62@{JZ};Iys8w? zrXupVvE+5GbUnm5TEFabUuNt0xc{xSk>Pc5eTu&47u$)_Amo2n&BX%cc6vXY_8#4C z=fslyaBiDX?au>Na|vs6c1e9tqYaVj7#GOv>2}-pVrTq{*Qf&yv47hv-Llx>r;12( zhTD?IqBsikhK80Ad|fxLPCZSE?u4M_dH8el_0s$UvG2p_R&8`3@#D?o_44A!6=eTEIsbddty zbb$4V%g3ZpTYyM{8cUI|Z=|(Y#AL zM@`4z>$OPMcV2)1EQ!A<3m7t`ZpeZ_{sK~>xEU_*?B#2L6A$~hWjc-Qs`}ht?r%@& zIw@UmB&amXCD1mM>oqmzNrX1G2O>6Qxz6=Jo(-~Id4Dadb)?I*54b<=B`bjeQQ~x* z%-glPR1a|szBxX>@T(c&p4OF~^0{7gJIZofa##l5N_PC`ntyd!DFmvq-*w(*8x4G8 z$U7=@o~nPc?|u4-Eg79_}&g0 zERpjtfCRj#a?2kuZ;@ZTW0F~nHaC*F)<`q$3ZLX1MRd}`Iiczv( zS{>7LJ|c+ik%E>AV&TVy5zigLkX_#>(W|zL4fD4Tsgi2jHwYqp z(5J_}NK1R4-tWxaH_BB=yNZQ&r$g8*D`1!xKh6m(NC*$uVYGVuKq#OmFJhsPZ4ysn zZ1xqHzb9sO-p-om5VRAx#x~U;yYMkR0^9x9JKO|1Gljr5;uTVx7fj0+!cHN3bTa}6 zD8d}mxR9qRJqd<5wYTDaPTc&Hr0pDjWh!E;YZN1Yc&&xg4Sn!C+!tFdEk-1DZ(DLI z-wZ%u*_qO@6Yib*l8Z4?nCt|$YkhRR?x)L{A$V=w|IWIy+xp|1F=lgL$Kpv6w*@1_ z9Veb&jr-+R$wgXxaKZNcB_D=T*~^_e?Kn)(h3OqD_NfDKyLDZTOG*AI&fs<&_$NMvyJ8U<|l09VVm z8;Dlr#P9J87|^Uu8eNuiyvg9bLoZ3M$@TK{)AV8M^V3lqEP9Pdw%8l+)r`qe6z51q z=egUg8QwKu-g7paB99jdO-GIU_U{9J?F{oSTl0pBzb~)sA(Xa{M?+f@*-ABeH(C&o z=)IE{x&Zh(yE?C<%3e}5v*$UhZ`|<9pWg z<%pbl-t8|<1MaN-r~6|!JY9Q#W}eXVs3yQnJ6;fS^|9cr#Y^D^k2ffTZ%qYbUke!G zb3@c27ZVesayG2!4@*_^Fc7fhx8O|8mTVoBe#`$^Gk z&`7@8pr&Tnmck^nZ5Dk4KI@~xJ^E6Ty_c;0o`6cJp=M_@#h`XOlBMzU{&H(8=GQ_p z@t6B!uxNa3I8LP~Gga_L5Lb4ed&%&R8wU9|N*l)TZ`teEd{2@vm~&h9R^kol3Mw@1 z9ZpaITjGg>&xbf&#nhHtyyrKjkF&*Co3_%ehDHI`zdzP+BYAy4>ia}RQt{%$9R;BF z1(dm*JVf73lUN~Eg$u2*LGeQSSL;rG=O{{A61|q;#bbTe|&BOUxnTSbO=6U@2i6*u9tn6qw(20CeXDbs#G3=25nBXJmr52s$=w^ z+>AW)Jdw(0k(vIg$?A4c z*_&?L9l*|8tAL0~Rk956x0EndGuB<|Z?hpbVF9<5<)&YEf7H|jSmt;s2?ZjZV(JP@ z%Z;(yueX}B1W6x{8cK-R3`H{}_9#&JKXNiG-K#D<}IT*HUlDF^Y z`35ckBR%AqF@&AYlR%Wo>}sEKEtTz!A+gdasa^84Iv*5aQsjq?u;;^z;1r=y1|pG45uSPLc9}bt-(RwfU$@Z(ai2_|5^m9W0fsZ#$c&`jS)&$P=Z#4(sz9Dd zQ^GQn{WnNdD63m?90lK(?nc}spy5lZECU|Qf(uU4n&(~kD-p=Cg<$AQw73yi5j(K3 z)K6-DDm!gwxn*bdb}{c}#%jxhVj}}?&bN&nV`$}hG!o)6m$0zaoxU|+RaFaA)_VcD z$Rc0_`%kx{zP0ZHtKO0Zr{7R0UISjXj~atVXE+>h5FRvI;T|mfpxgSV`~1s|sWQS% z?9j!fI&>vuYrQ|yl(olJgk7c((9|}2*`x~b2}~Ph!<@T*HY;Q3fcov>_`3^RWxZGK z|I=dw8OQlBZ;l=)$apX=`rxGs#=78Hs(}pp*FZ3tPOf|nsxRv;v)~CyF z@B902Gl6ubPCEjN%#=nWO_vtVNL#Uj$NRYXI*MmRx2IM}#h z{L7hx-me&#WF>5AXHO|fD&lGKD}cj%Mu4H@`RZS__GhB{2HKy%Jac@+EMP^lJw$io zU`V~AEIo6)zicNiSr@MB$~o~^JQ+SSssGBUwgsJlql-zH%=<#EX_J`ar5T}kqP>Cp zYOt&VY@*kMTw@weuGYHDzd07jUx=!09E`$T6GwyDF+nzqo%DX8!f(HSjNVOK$ecZ$ zgpK4~4ALu_31|h1!fQavld!<<_VW@Gr6Imj6cvsfx%F<3Q7{ro0R}W8uG7>hR{0hQ zMX^@EWq)dT#N`Y(ef3Z!v&mz>tZok11?PQjnp3#ibh+LlkdoT+Ru>#Nod_d80J((4 z{8XpISxg2PaA$}0BqZ&PP7C!8MkRO5zp1G%MO!{o7cFjlT>6e^H{2sC$p;|eVcjY{ za63phnnv5UDX*RTK+~CkriDex4~aZzkc1vFxRCnKl$RV|`;e73+qsKm;0g$4Q(Ff# z55@#?TGa~nA!#CAogC~^k_3BPb9;Q{XgaHV~sVIo1y^Vwlt9N25<`@LVk zwahJgoe#U#PK*d1A9!Bw{X#&O6}O$*2vO=&gd0?>O*UDSyMaV88$@%W``0SMW_Tbm z+a;$_;Vm?&dmwYJsK2{DwtK3?F&h+2d9IMkVoT9=l$)-zGvKvf5e1HT-B4CLH_)fR zBD9vQJmC%az`10%QhEM@b4d1Dx9FN-zP zm)9jc9zP+r__zCggY6Gg5JQ(hBoqeXSMffvV;_a9H2(&Td3?2Mq>t<2_i2HPdXpIb zo|yepOG)337k31W+9!OZ+1_`%CB@HPH>;tweNjH}c>^Q(xL)JCIqY|Zn4HCsunfo$8XDB6Rv=BV+Q*-&C21@N3DUgQGi>wX zVCNmx>y<$C^!UMGFkQBL{=8*L*1?uDo8kS>RCbdgVQPvldU+0H4s*+VC?^)$ZBy}g z;Ohma_YO$2AZVm7NaNvVn-PrVJ4Zjxm8pacJa=iTce;@MtGW3zoO(&zxJ0bS0~V~8 zTbhG;Ehnh2_r89c(_m)erx2Rf|7a75hcb%BJU>@?<^`OQnZ(+SP6d*B?$1;fbczJg z-gb)YCjDDgi05j6MD_EDTy`hAOL>kP_gQ$2yFhN;FE#4{t6Rzf!pzZXC|T{{HY@@v zTiuR6h?=$$%{)mZTWB^+g-MXVfS60c%SKDgHF}KL5x~^+xwERmUKMu6IKZ&|t5*`ikxgo(JLpVZ$g%_3&utmZ?J-y|Uy8Y$!o61ij zLtnhaH6AY-)7CO&DLyy-r2H)sGj{stM9_XG(w>t(9hDGS?R5W-k4x~6CxY}~)LlQZ zHy3(<2A2Y9Wg2%rvdV9P=AT3V4VL&vP+{2cygvQ>(jStH|67{;|9>GT0gm)@mDb?@ z_zZAV44@R^hBHw6KOvGh2yn4$p8!eo{1cra3aj20e_=UDxq=vmWbe zrO&x#;PG1tCCZJlQoI5bXJ0O*mzmZ#uGWuRpTUFVFOz>ijVP4AWpP@M5>N%$_7Iq( z3fbJh?Ru{x^v9VMkR;lkthC!sepY)MuJY-(UE~sAY611fVjP8pE|4j54ucD~0EpuF z?&>hZvL0FScd4$ZzyTEmT3siaU_Ic&PDexY?0!BhQRbQ7X&b@$-0jj`Ln?RH)`zMN zb_Vy~j?Kji!#OHcZ8cq)P&u53Q3-H$(3V2hE~Ccl=e{ z`b&%ZIh<|h_`2>mV^PH6Lf7W20QiV)2*LX>RB@egs=O};D9E8A;s3~R3>O+_mCWa+ z9Z1|i+8FGlV916+6>dQi?rm$@i+2+Z&JFPx{r^!RE_N6uRY+hw%CKv(G{#B1bi_1t z@Eqaac1B^TH#AkH<$&05a2LaoOYF62<>k$v%kc zJ84qO$NeLa|NamLOcBP|rdSciAE);@zE88>0DKWc)cr$I@UX_VcJ{qf^(eSW_zr+( z7J$&W-Ey;razD(R1>ixE8Gz4iPMORhp18Z3w`;=FcW<|;8JD&#Z`i7;-~-(NvDy)$ zI*VM51Fyc$tUj!of1w&_qnh#Wj}>{Eq2)Ts074!FxS88(c*aaJe{Pw}F?`%D$!XLy z@czYIGbV1^Pm@yWiiMN&*9o}zV4{+uR)M11m&Gw+>HKoqAfu&ocAj)ptT5 zrZR@mnM@9|2uN+W&NFL@&1={O#p|%vHFdq|<^QG`D8m=z=(09?fO4h+_JtB3E(_7FQ?Nt0=Q|}m zTOfm=enkQFcm`Qp>ml!|87T1HYx6!{{G8_C=Z^dcn4Enib8V`&xwyTNzY^TAkg;FiwRUHey z>tm-6c_xo>SZ9twLaYOPLtbz=cNHFOfEKEW6dZ0%=m+GfCauQ_^jj}KXq$3Orm$E3 zG%l-4ACJFF1wz1`$3r_`C#Dv^c_8qv14j&# zMtdOodesrBr~8`8A!vx)#sU8NA}P)Yh$NmeOYZ@Q`b$MPQMsEYz1JOWHntDDH3S-W z1J=vv7G77+4r)o^8Dv4nRjnQ)sp=zh=TK(bf*5ZIiHp#eI5KCtp161NOBejkk>Z|2 zd@nuq6%J$*E*j1hMDH>$8h|d@oi0{Qd=qn_U0xkmI#|YMOk_N1O_oBBk+qR{HwBDW zg7fU0ki^pafS@;jj_BKSvR!wU_D!U7V1p=&9@nok6y1$Pz?tpL&t`0~$;}M_u)Z?X zr%Aq{^H(L<%5+k$n|f!}z+AU5A%X)g@^xS-g&0@M>_(MsWeocqIe-@Ih~^y zyqOM{xD?|>sS=FUNhdE9Emxdbp~I5L_IHu^PA6JtEeCNzI{?i}9uW5^7dvB?pFr)G zXtbhg=x&w z%U?w#kSGgll|qY1LTmh}vT)J&`7t`zU)KNg^K|Qznd!8e42fA+IeH0UPFy<1a$)oX z2ZJ9}?;IZEyZ%WK+m7?$+^@Smt8dLy#Hw2>?!XpjDf?I20~fIrn8x=Af;Rzt)5Sd7 zqj+5lWk5i2bwMmfTIgjz(2OK(&w0kJoy~OU;MhRBwV{E79A-i=F`NnSdX68d+wJ*1P|$rkIrlCaZcffZQzltzk<;3(~S@`g^OgENP7}*$>q8El@!#*3%I@|k7o2s=A z=&VdRfLSerP$i#pV>pzLn~qyNv9E9A#T?k zBz8%puY-z&=}M|Q5F|a-If{^bK(@H~YVPZcS$MH)0Q{OSPaN(-l;(mDPxj|gBt2aXux5IdakqG zrCy&owZ=aw(;iQb0_HjEVnd-${zm`@P&w(;pNv+Zwj&d)`E#tIQz-T!^^F4bl&yTuwERN@*=0z_V)M zR9Q&ynTXFtngp)UhzjPRVgM@cvaL9>i!V`rfdXxBjb#W)WUqm*U5YgzAh`4_S6z{2HL(% z;_2b+k-M`HkuFyHHqC!Bioe~t&fiM#v0Z?kDage_d*?l7^eL54G2G(TqoBuN26+2^ zm(5je)IlaAVuHDI)%02cH23z5%e-NjF!LF09;W+5o(qv-$ z)$4de9mVAj8s2ESXQhK`M0;;@zJ{NWD?9Ef=^VyG4HCjk1+L9-9YwF-%4dq2pBt(yqVqG{~d)=;AOo?P`yFA#+bwM8*$H()l> zG$$&ter#udERC8Wl)V+Zo0gZwqo%rTA4pOOl4EJz{oF?dz<+Nl<=aVejI3UV4XkJ= zK9p5-AWN5uRtN!cR$5+kigUHc5nbW)XSq75cX_@WnCc@szG@;j`tlYDTJGxTpUpS? zg9$5MQO6-9a(eW>LURN$+MK*V!KR<%bP2eF-c@XLmtL+|i(q`G*GV=?p=%!*Ol?Ev z=qDiU+c8@mO#4l40-d1Ub<1JBM|(~a=$k1>uq}3pk>np(B5H+wqTNGCFzI^Sb`lW9gq*37jn@=1?Y*HY^ROx>XD-}wH#k*WKIsA$HK#e+w z?Lx|^8k3(_7V;D8BJd?{%3w24jclQ1g@;3%4H+bkO&dlllSvU9QTX5zy1|e=gS>=L%qJuiRn_^D*X2JQV(p6T)4qh9n*UVaX z>dz9DoGFKZ<*w0>BMpPGf5K91$6XE9RGj`JiABca*H^IS3q7=~=z=JG z#)6B64?B(x6g+8Bj{ORu$!|SmN$p%fAg^s~n!pYQuGx|f?y5Ln4N%40!SuHr%uhhF zp1^RYh%-|@Y8rCRmbQWlG;V0iLP7qgMFi71usHoAhe* zfK#TIYHTPP$p?4i)xMx)IZ}rLIL@>{Nj{*(dBAH61lXt1uv8$Yn+u}|QwcL?D=6Ke zPGy^_X7dlBHn92rD{4bV5*(9Rf3p@D?Uz?dU$P!6vb5(xgwFpY2D~1p zG+a1j+%Gkp11}jl1fFwMm-)UX%ZVh*VwA)M)tj?JD>UZbKu55B`DQ)evJtOP3LckP ziM2WNeZF5N2||=aljMJdbwTPt$X^9;#tQd#URf+^IgvmikKmmE>fqcJUQ2rCp7P^f zSzc3+PCAADESl2nIhek_V43a_BfEym_f&I?WSQcEuhH-DV34UgiiBNkxg(!IhGD^d zJzBb+)MUot8(E1AMm=DM|YRU9INf@*?ld)_KIdHx5g^bFnh7uy{c<+CZ?cr>Y z9L7pP_ejg~?XgqWi{y7T#on1#N$R1*e_4T%>%Zw2+{1{hz6cBsx`G>*6)w)=s5G*s zVk^>y*p(eNsGy>gDAhqi+>b0>1`o8ry0s=6tPTNkK8j-X!cM z3qad%&kcCdAWav9<*z>2Rj3UgkHW}8i;aO}UDp}^f`&|im5HM=3Wk%GLh%thWQz5xXO{KXG}Wmu`ExC`o_8j-M@Y+8H;fSijIakSR?a>)tK*Khm%Sj=| zn4u=uP6Hf?Im*5+GVC|BaFYzI7|B{1;!DBs*NSL62Yq;5(Ayu*GFmHC&H0~WYDY|A zP{>(2kg?#6L_F?aoJt6Gz6*CO&KfuOSPR|O^0S}H!NdJv5g3;mx1^;dHZe{oj(--j ztzVHISkXrL$w`Sn2L}r?-@pE3KU{twq7hXMs^15oLg1hJ36{#vW)BJ4Ev8UEZiMr| z?aJy?FoIGX&A-QVxa6|HhuKI%*;MV0EKKRTqe*>FuA(2Ob`#d9m!{NXvgv9?zb zXpmyWe1F|2d;1*}2#eK@*LhMH2nEU(j^^2X!h0A)uWVzh?f$S(hkhK(Bb`;0g^A#j zS_{R)gyl)W)T8YxFs8!sv;vm5z*}r|srHEjV?9s0^`L=-m*DS}?+l!w=R{CoYeZxv zaf2%6m~rteMe8W_v_#`wSvPW8vah6a%)})Rl^W zt-cI?3hfJnbclB^=_bAv4-gp-WTF3Oro&<7%P24VO(E`V3qJ0qQ%%IdknZ~h9Ky+ z!cR!_8qA7)%ksfbK4>)tN$Cb&o>mv(_+^;)m2WR3e@srcUx0zUK%X+h!WX7qN1f$-ahzxIDD)Aq<R`R`~Ic~WxHyPL!2 z#^F&+U;INL=Vj~@X@0n;6yac!;XMu09)%>H3AaXv*7(-14>2{Iw8remc-LbzJI{eL zW3IEL!^YS$GC{%Dpi3cHYxNWm*#vr?66cn zRKaJe?kA;YKjA+S%}tlE9z!BHcQwe7{{i$314Qz!lt#6@!k!OkHVwgS0;DDDVKVn@ zhaLi+b|Z0>?vVPgNgK?YVwF}! zBDu^ov2Mk<8J*>iRa5Vp=LP);>c`DNTSG~pd?Gm=JDR~o{S^5fjN7h_WQq%i#8*(^ zBm{-~QCp6fsXBu36){`kv-l?{iL(e7ipWx=ce)gd zC1cg{*H#nT$W)50`gV=P{QZ$&O(ne;uio+PS7n2b!9kPisYAxl0RbenPlk~=I+JN^ z6?(i6Ey=s(W($f_GnAd8aAIABe&(R)a5J) z^W;da>Pb&pr{Ob56cE?himlZ-JX4PMDeX>`HC7|T?UU?#IlwJXzP>X(Tbolv**_bq z)oLu)BlSirPz>WI3z1q|3=)zgzJ_g7QRXlq&QgKBDvMa2UZs1V#x|e0Zk$b*|9AbR zZW9UNA`yziOr6bO0aV*eVWnq-9rXP*zDPt;qBYR^!8+qrIE&sT?LD^SgZa*4^Y`4Ceo$U_6g-MbXhcLfQh_M~zKn^bhY=fz z*JZR)B2;kTwM6iNM!yF zFPys-P-6a95$qaz#JZzJ+*(?Ov#_7&ZUZrU=fbNGQ!nf-{dz-sVpSW+BWC6zH?e{+ zXR68&P*4pjQ~liIU+ip)dV19--B3p>E_h{! z2R?{`AGbxG*%g#%s+uG4N6#GShb5yC3gLvN3QYMV>i{e?BTF0L({T-bCXjvf znp^?6KgvGE%_x6#${g1!6-v#{BI7{~ou4P*Fgg2ku>D#?Ygo|3X$I%HGEi5?YjkO^ zf1-X=?CWQ|OnvY)azvU92qs7fq^CI6YW9fd9ng#4wmvOUk~p((VYb`|*#a0c-@hx- zAbrbTQ17k43Rx&T8lV1{r{pjNG=1+T37~LvSIL_aQ={S*RX{1dYKb*O^{1DVg+;8C zn3QM<2{uouBlZw`UJb6fugrE;RkZNM;F^3XvbCOlGtOQCW40Q66{TuE#xRD>hmyd> zN%4MPFtw^Fw;5=MVxG*wmoh^rdfJ^d2IMjybXtwa7m_1hTcM$gX7d$gY&Kw{qp?K(n+tpXAl`6cPFGE@wZoo4l%$EBq01Ll4156uf-kznW$c*F|Y^7O<< zAkkv;55)6`-HKXhDpb%50}NA}NNUqx0CaD88NRLcIslK4!>DZyAwIrrBaU{mL>&7K zQ;WqheRrJkyD%EEP)Gg9?!d~7wchH zqGBb~qT?*>3Z`-}09oTH&M*r8N3UbLEBvlPYfMlldac16txJ|eyFtg+BX#>YMb7yNUumt3WF4g@6YQYZYE;d-Lo&V%y1j)^WBcf^;;p8BuIc9`SN zjPzpk5TX)-sJxHR8E2&@A(h4CsOQwCtcdr=>K!MKvCX#37R@_i*_xz^^0!ucF_XUl zIFeZpEpu(hDQRl6*pi_%JxxrENlXCJ%H_Ge0o=1?1Y_3=Nu&sPWa~^DL#R1x82-@w z<-G0=3VKCwdz=U@$fpTE2q_Rxo~v7V(^x!&BSP>YPw1>)65}AP6cat^K5X3+fh|%fTSQUl;dK4ryFQ}b!t7I-J5`M~KR3fRx3f{WPuEK-e4~Zm zaLP9V*;4z&3^D0W**#3pdIjxGrC^2H!o%rZvbe|#!?zO|k7Uuautvkl6dh2ueqkA~ zwoT~F!tbMaQ054&$6jzY^yKskrD~i(FloIGkp#1|hyYZLmZ-Kk&j_*skjO6_gJ(HJ zOHtX6TC#3zHcNMDjzOm6IT*BF0csU3>$pGY?G}KNpL#vSGfefUON_8B5+43!4ZRej zqSzLW?y8doZ{K`3O?>Cp0adC_dl@kj=i(i0O~v~?{rHug z$HY5o@Bp(#wAKK+i{Degk9b#+_wbeeS_LfXB3`8QI&*|BaowmVeudk@HUsO}Xv?7r zjk2sWV=AZNE7?M-v_H1dMV?~qz4f|;cgY_Uv3LFALQc9!cBMhfCA}pXDhoeDF?dl> zqV9u86bs-n?qs}}1pGKn-Bo3IFT;e!$}Q|3{KJpk>{T8vR%=A&h!0aZR`_k(KM9C8 zO9s?qvFsMJyk_xIx04fG#(Xe-5n05BLfH-;Ax^xJNgw+t`%ypmPSeJFWL@?)pwe!B zMM^s;=RBKBdN(;~Vw0Y8kJ2(9d?wRe?BlNIU)$}rr^DbK12miDkaB=Ju}el*KV@C; z^tKVed>k(M@9gi^rKdM7OeQVwpwH&|?ugg1%;bH=?~BPyt;wHjT$Wx=M7GmgLEBxR zO-QzO%q)f75qr}_CPy(iP?uWeapt;tdWW3mrZ*B>mYaK*6E+g#-?D#d)RoO))J&n$ zy^v_FPT=cQe{Gx)gRQ_;n_V3;{XIIi@V>^ zs`5yvyC@ufo9VTJkVhr+&0kGE478GnaF#K2znsyDaz4g;%(`J5T;*~ z@nwEGe@uqY9^G!Q@a{J$ve!zwd;Ba0qx1#pE>+35h01wwbQC;;O01W7UKS-a?Z$`S z7wRK!uXo8`ILu*bd5C}y9UeX*JQ%lkV8Uu?8#|EVE}Y2BsHFxix?itWAV2mni<5#x zGF?mM#<81f4>H^SpnvruxMNFO7$bwU)qY8?)fcaetTayi=uj&7X_J9@)0ABqY#-=e zTF_&*w9d2FjNaLFx=LHtn~eYX6$vKMfpf>vPvH?@MJ0Sl=9};w-;T%12=@nhunLWm~E#@EKUBWS(U+)qvdC%xg+AUCRWVi{Ft2u2whG1&daLd}wxAk;Z@d#TfAZWS*-K)ArQ)}Pu_$$ zuLb8Tr$a~IoYKO@YY~cm>RTD|*h?nX&uYtokG5qKNn9i~tu>nw226*16Mlyq^hLzs zSU3wOnu;B^@(t)An~Nd$A`=~_IM$I{7HG1X$2>c&m@T2FC|h5~n%@2TDO5xgrZ#a5 zG=Yeyby+(dx)~yKiO6jEh#yB-VG<1h2_!;YzP{7-**9i0v+R8|#E-tox*|MqvP$bM zw9I=fV%B`X{kiQvYQT=v#IOk_PIekwcFFnvhf;<+I;WwmXW*jt|M-mkOK}=3& z>Uyprmq_{>GmYyN%#Puc;^>bJOcaI@f$p_-9q0`j8+pj6*x#JAvk=k0;3#e?o_^}& z^o8OfInPDkcbU$|SD=_PV5}MKaz(%PhcBshf~zG|S!uP`N4FK%fbE&55o9{o6pR zDCohzjo4|(DCG@g{xVHG;O+rnJ2wKITQkCPn16@=fSuf6`vVlZrTT`EEWzhnwu`bC zl}H(A4YvRZG&GZYSp`jC^I#O1I{`A#K+X+X-cE(TxBsW)UIjv-{EEx)de00QKD!4B zl`1ri)&Gan`GD@vx1dYJ99yC<1hiW-NF_nTz?j!amg4|Jnu)*E0fhbXS*?KM zzYhYXGZu1GIW*K&ydq z$ru|W3ROzD0kIWU-`mIx0RAG$(T;R(D8+p1{+CsZ$%5FFp8)boBWl$rG@-|Y*(7g67IBgLcU0yL9(0d^>xyj7SC*q_T%s=8%g?EAqGv9wh40rR z*~`F{a*OM%(PU%>RNju#dpD=+i$do^>l*qVn|-C@?TP@i9x~2QD`l1CGUcGa(qKA% zcy0M0NlR|d`YKq5%0%N2(5&YPKpma)_+GRi-CUyQ3*pP9F*oky+XC(9SjRuM&B&of zGApCtv8I$ZZRg+k>Pj0Go}?ojbNS@QC!1-hIdH9=e`~oLXa*jkJk@NU51sB8-Ig0519HOv zWzQC1W?seaUvVO3@33SBkKUGQucjfctQm{Gt1i{-g3JnLDEB?WSC0r>sx%D#{apZ7 zq$?I-TwTnsdv=*Fh1w#C+$5m%x)rJ0lN^5ueTfLh4vTQ~1ZJWayTY-DDP6*8lI?dK zb;U_&EFa)HJ4C#9&2zkQy>w6RnW~4@fc^;ke23ncSfFqzUu8I@4=|tF1n`=d5@BnT z^5|^2~UHe>qRfOhgOfL!wcRLuPb--mUw*9{DpP@DUId@Xm>)O85H2gWv|?X7@m zj(K3{1wx_Kzle%fH`;&90p+5Oqob067ZBuV?V0TTfGOwWOnmdpUSDIKXgkXj<#oDO ztlA+jU&<5+KLhG;03&wS2RvlD=NRy!or$aB9*+?IH~^e%+P2)jhj66$aNU6kP5in^ zb%lmNbZ|ZSYGRK{Uj1b$52%YoqTz=HFtu+xQ(+_xW&wD@u4&`I!0X`^@TRN!sP@Ky z{^15o!v>byq?$|vz(i=|F*L=nA{1(Hj^N?>vg!Dl>12qU$P6A650lUN`^1Mjle&lkjeW3Qm;>0ENARH%~ZNw5k}foWw+ zY_e9u0c6f79||#}Jk-xS^fnNBN7;aJGe0oMlVJ+X@LLvBe|)xlaBuZ^C`GWnjF?Si zc)b8jzOQ6^o%QES^-jTr_2e3QcS%BK{oMdI>}RDvBD7y1J|eWX0x9YdFxSqrUkGWw z0-QFlDSu9XA`a8A7o_(G)hc6=EhVCD*A0o>8tK-v8w6%Dz9#^yu>t7W)qZ~I#`ca( ze*rAAqq&?w75At4s{dua6HcYCDn?8nyaChKVg7Tdy6=?G%;R9D@(7rMuDh%Q)IB45 zT5EhC-(a5=p1F7DXLUX3xs;zu${f{d39hZN!ygaGpZu6ORaMu zGumlP^+olYzWXYCC`%a2EkGcleL;zLG}xZdEleAty`rta7H=pj0GKvD+QYuVvm-E( znLZ4wotPIhfu27|;?RW&OfQXaM&`dWeq-lA$M9N2*}Q0*CcJ6MzRb)sVJ#gn?@WFJ z=F>l17UBl%Ss_&I!^D%`#R_s+vVqioN@e?1+UOkdTk4n;1E1L_ALQoiQ8slQz$i;N zC@+qQPC4%xDW5~Jwkp|XfG-qqBC`8YSvFJ|ZFbs?=Z5jpx`2?~I0*8M5Vx{>q$-vV z53C+9PQ|+4cK{P>*Qx4k08S(*AdNFYZ={11NHj%Se>i6Rr9T-X22sl_=`AsCh*n62 z_%Tg1*)eZy&abvIef9#A9+*hzgkR))$jFu_v98U)H%nx=V8S)x?U4jIlqhE{0I7qm zjc|nSwGXnlIv5!I2+9g~1;zk@ewpA%X}oH{^}L-3lyiOkf(3$Tc&cq%apV#BG1m&khC=@oeYGWOv=jh#w3^q|B5AieimWBR-H zu4z{KM7aSjD_`m1RK+(kf1uXzPWw(Cr2hZ$_LgB)ep}nOfOHGe(k)1LgMf4i(kUQP zlG5GX-6P|Rjd=sW3nx>9V7T?vKKPjt8nm+Pgc zrLO~|m`TGmrr%Qk1)Pu?#?6ZALc5)jpAK=b*88-Q<`rWU!I8o-tizh4GOxZr5cL}? zYgebx#Ix;~Zz~&};pw0m5tg#foeeZ062loB4>sL`d2Q)W7oMp$>dh2~e@D8oB7@jS z{u?z#ir%CWk;Uf^jusE;&;yU6n`oXARFZ7 zM0IJ#N-44!M0Mswz_kx6TWICTCVkHCs*D-fNBwLD3e4XmP?|kp%ZUA4QH9}(>&)Yx zA;4YV3415IIJaCsXOgPXjF7=7ZYlr(B_-Ch#Z$OUZiQIJ&um<9uF+2ZM!AQrhWUm&I6-EQt1(~cY49$p-ScoMEC7U0f! zi|-Ae^EFe^gPG>eTcA}7Ux!ZF~sq2uIKpcrJ2UoYh0Q22m2bu#DzQeANb z)~==FYTN*(!$o!=F>6d{1^&f>(z%Z*W2KA_)ZS2)u1Je;i>pp}jgx^@Uz#tXOOeOe z{rC9*9?(Czw8*_rl4*(~EAi#ef1h4+!8Sh+7B4+sl~ zHDmqF?>Il04{(7s*;jefzvu$WQEV41>(Uu*Co^5L6}o?%f*_m?hQe{1GxlSbA(nEW zM6Vi5o5_KQqT*O3r?7sF#<|q}| zma2~yL7srDSNlj5{x^*U$35sNR0zjg8qpNlu7Tw3mD1yQG=!6;_^2obh zTP&%KUjBW(cNjr@0iwd-aMSZH9f<%v3b99eANP^3ktp_OeTNMq*kIBy_ld68545$P zo$WrE^Iv#LYNLEGPq+$cLDYe1J_FH^wcR3I9?g!$M4-qw=@>GMLfPu+3pb>VuA`-W zHbH2qZ^K_RJ|3qrc^nh;QauuMq*LO(Xm8j5+-ajblso)DsD-yh@UgsorV# z5vl9z_3@IWDf{o-Tca}~E)Q=koyz1q@-FIF0|scND8;s0WTaNIDM(?OmeMwL5#@0u zwiE%p2Z4^FNhsobxVLbnS>*NT^gkc|mI{?{1HepJG>OzBSVF`r#^HnubqBB6;M5Sr z^w2xD{8gFXilpH~lc^zQW0^!DBqEm>s8h0K**PoL>lMA%B;b%8a0YZ8Hlj}MSkpV$ z(qGB;`mk!!QzVI^v+*f$jA_u%^$eivoBbtZ_r^?4C0zUj+b3mpH7bH{Y{(KVyMufsH2WA zWsI~UYStL!w#j5~k}nM8P;Og*eQSzP)**Ts%=*SY`&dfW5@%)NZMQ46&%3&ZA3nop z&$Rc!@DfncxD#}|dlRAe-dbj(7n<%g(iy+U8;`8=gFJy#56Knu6P%IYwiUoTsCi=$ z+$-5oGZ-)XB5`d}hp4L4ehHGRrPWtHcq>VCjS?ia(WX$1j3QK0`LrlTU=T%w^i-f- zpGF#O(YFq;umCC@Ym#MkIdsLS9;kWW5Xk+c5g?~X2R&WgCtM)k8LljhUy-;|z6 zbg|4U-xs_@E*uMEMI$e zTDjM4%HW3{a3pyTL>I<3Ho1xj;ShtS*haWJt=zVp{086s9(q?RMUb%AE9v#qD2=2V z7_@mulo_e^BW1|qj_%o=3eT|O_Zd1vLUQLa#ndLyHV`)8OD<`rq#i$Th4_AJSrk^Q z>I}8t#XnfDmP+Iz`@!_`z<$1Jd~Ukr94a?-&RdrIYLru{-imL8ET<%>K`u(fV-jaseB*8lbrL{(-P(dH>4 zxl_e?CtyI(<;!Zc)@Lnd-P7_5ELMFxkumC23&R~Y+cut;g}8AnXk7U(6f?Ll`N2aF zO`1S5?V{A}j^HX55oKq-3lqUqs8V{LrtF@3ug^C%ubxGu&5$j^DeVA?trgXMFF(QEn4f&+zS_Gu*=eXl47 ztSR_M2}WJI^ImZ?S8jjsgAioig^15=)x$9O0^7@oPwa(DcX`G{y2NNn;RA<^=35?x z;=gI-%Y$J&`=Bc0bEvO{$jQ?0x;J~O40>SF`}aa$gR+C8zS{cVM>!qEJ3FuWl;T9e zGsJy(-_&AojvoGvebi6r>c_M}AFJjoH3~k1nuTBHraKsvHt4%j8$Wh@2FQdpGBeo!3{43pa3jTv=6 zWbn$AxxXb-BTr1J)Mn{{RH(@xcEFOTWVrf*Dzy|_!|pNAr@Z6}TV7v>?joZ(V*6nqe(Ogx zC7qSFRi_@&8TZZ`HeT7%n;+BWR0bBB6Ey+I;&iJ6b2p==S|G&-9?bHE~)yG%vDp}#b)g+6O$pm z+V%)17Jy;{DO20CUR)ov5#5bWlN17|2CY2#82JW9bZ;abGsQUhZ#7PhY^)`Jb72ymvTx{@)w8(tv|!640oRSu^$S;FgybYq-~Ui1e+vo zWkU$n`oaV77nT)ec{bhsU#tC)8fS&tVcvukXj)ksr}|Oyy`(iNb^LOHGGxZkFmbsy zx0Q7(D((9n+|WJ~NboXzN0g7ZgmA#qW76e$ck)z6BQkkY>2+0JhLM$~aVW+W%v0Oy{w$>-K9Sv{X+<>C52D)=mNyV) z<6Dbe*MA%`oRsuB_qC0XVkqfB(T>%ut!ACIQ6n+Mc&V|KcN|O;PLkPTa`(~{fs0-M zI=H_@Smajg^Pd}X5UQL!)o+c+BzZm z8bS!dj%+x#VOX0KTJD|enTh0_$zGLsgCr}yo?%zAalw&}BYc8{a#dZ`8I}D6@Sf4I z4hS%~f|&bb0LW#r(_jlnv&p+tBtsu63{uR-D#c@iB3X;h${E{kTdRtwtwiPRuyfxu zi3V^Cq2f;glFT&_QfA-xVVJV*mfa#f(Q~d zB+mmO7On>es{=?s&BcxG4X?Hmv0<>g^0;?ibG1@IA-8?L z3ll=(;qJs%juj8js`v$6k}jXlJ16Ja{yo2?MChHrV(rP~KP$WVUn|>Xgx<5xYH1O? zgUQA(8rJsYe)!fsEDdgnY>9~*vNVWl7m=+$vq2?daT&tLZLFXT{2}or+{02qo5c$mR(Q!6?U`>Er(5HKI zL?iNqpGnKOsOAZZ^Jh`_h>*P$Jxa@b$fg6*A_ra~O6>m?3e6hpLLIneKp+ zUt}hQXL^4^ZtCu2n1_oXLX*q1@_u8oI3psm=ZDv^_3o0)DEqIlzLh4;&QsE%L)Fqp zST-z%w?mLS=G6m|w>Ea$KnDC$67huKQ@<7POzH07%oBaW?$mphdp^=)#j<9KIgY%A zu_&;XS>4wLb*bIdUMuhU0QdfdC6?!55F+tjY!A9i+v`u-1|cl5>FtldUtj%?Q>R?|_PSvMz8pK&K&(G`u!#JMgE-6FHI80~TR!0*Xj zz+DY%v}s)#31%YQxOu&U-u@Tlq-*cZ{B8(63C6aTrk6-6h60?H-sbOg2lF{cZ02ff zhYrUgT|FCgO%C057_i@2+jGCue1sGj`qfRr_dAgg{mDQa@x~FP)RlnS{TWDREU%-x zyGRD@dyHyftBqq}KM1L1iP`At06Umg?*Y5VF0#71FGj8|b0rzE)DcD&QAl`pSa;Mi zq%dyD&jrYX%rGVjb9b(Do{u2}6kMs7ZnrSNE1U*{WZSXA+% zc*ER0*d#>aB5<_z5bT!yK4vv>1++FUsxXmvyEsBpONCb(lQRgJ`e@^Jplmw110#)g z{0H}p_fbBfWli+Kl5GNAJ;g_tI0H3lB@c9=hi?#W-gl5j;*Ee`{p(W7?iN}WRwUjq#Y$KfZ2o^)zBrdyT)te?!@3VCPS?y@`210_H zHpM>R!(&%;y6Yl)u=U{$kzi+)7&M zC4NA#vLx=nl-u=Tnl zHfF1WMVsxDAuj5L%62`Vw4oZZg_CUS5pk)=GEsOqim?6dB&$Gep1Yqg(s#|n^qmAe|i$;fw?`z+0 zFuAt(mc;DuT3p+iaK70avU7I`d;5lMlq+@y9R#*x8-}=uG?Et3-Zy>Vun7&tpWVdd z+DF*_tNwy*&16uKg$!MGqVl1g$;U_RccC@HUF1IBkKRkdDH2}${dWuGpafsHT9dp~ z^r~9D@v@8&x&^i3=H=XLs#V!!SX|zmfuD2t8?-2IulOOSfvMuAqTEZqOQ>+t%|KY# zJIOolyMe5mGlD_cyZY5GjMmE{br@z7EI6El!-_0VhA9&2Gcgk3S=zp^ISUf1LkY}U z6|pI}-pr#gk6=kFMJ~>OTsnkoSpswP#7m4h17y zfq2h0iAPc0QLT1E+3jU-qEWtky<1no9oR%BAlm`hZX%Nh`<1$XiW%IS0I!wBi^nj* zh4`#Dc%e`w^9=KYkNLc_a?z8}UK*3tF`n40X1M&P6Hy+P5g%hZk<5z+NoAp3hx|*{ z@}2C3T(*uC4AWes2Qnv|g-Q+1N<=3VFE7H6UCt8X+gQ`dt)c^U@IpndB-Sz9ZrXPb zSWpAu68nQi>iEujHV>--%(p0Srv=kop2B=Zm_2Qlc2K!8tpv?FHA2fN+FN|N-I`FA zr=-^3)sV(GxM1t@zeI{B9GxQR?PNm!1d*2lYo>FW_FxEtXNPiz+Wo3lBW*a2o!<0D zqNmyi+;1>U=ngXP2hYleVr{|G$!W0Z;=C1GTy058t=ap6?YX*vOPYc|3N|l0RAwsK zJEncTG{I7({g2ICVvp}zWWpRb7e2-~D_7cH8Js6?rIkkq4S$_R8t^_BW!L4~fGQTX zP^*ptteO9~I>u%Zvc`gEEe=`B)Q(fUXG?JsLL~oq$jH){O<&ug{WH^?RbkB`O92t6sw z7M^OwTSO_)T!5zGHI zM{tO&f<*K8)`xQ$xplR?a9=NKm2j1Wfq(m2rf{KmZ9%3j^|IlPzIHJ`LHz5!GD{Ib zzEz3=2U2xsS6#YA0{rTx`0R;TRA>|;0Nx3A1v$+nz*O3TKyv}8 z^0x)_C)(E{q{B(cJql@Eo)DNfu!npHiQ94r)oH`YOp~jo9Plc-hXISuYR)(@ylV}l zz8py0PQ6`0=H0SM*DOE2R1Sx*9PDATUb-J&L(OjkKF%&}r#P<86ogqOzM-(+@3~^Xb;~8l-+38@5y4MhzCV=G3hn1G-R+quPM<=Y5Rxd&+lkYeb)ma6jX#aiv;2FqqHdAd6B-fM?XNz8kxb&}ZJuQ1#{GaP(C z%`P#U(!T+s^>40ar%r?=kov6n;v)EXsRH2~Hj;Qgj03de*D|Bt2=OI&y@||Lz%{a3 z?+WI$)-nvHl1)ql$adcmf_1(PIM439^)Bj=8-PYW%J6)0K6t<4dLkex29Qyf5Vtw2 zs$^2vBY5bp9=ASwd^iebG&~`HE;sq1eG=a)%?B@&c#db z^8d=*g8^y!M}7bX5~0^bW)HHjKa3E6B&c)gVvzeSy+Kn<4~FL>k&7-K6x<&$BYFU? z#8^VsvZ5W%3S8b1nV~}10NuNgzU~E{>O;!sq04^hjoy34t6Pw8{+(*o*slX7%khug z2wN@dW(PVZxohGT~6E=&s` zNEQJRPCJTUexq;?i98TLfVO_WxMUH8)oV$I!UQ$eE6Ya!Y-9zd)OWTaX0gK2LuWTxGt5@sCM3Eb6u2Nue zw<91BA_zTec!MP*J96H6C%te#R}(9nDHtPY^4VnW*;GUFm2r&-kyyPF@%KfWKv$5T zmMhl+in&D-U4r=CRy2hqgpWV3Z4QiFp>h_Y86Ee`W$iz+wW^Y!Smp~ANCMk6tEI$) zhrz;FO8?e@SiA2$tzkNcL*VEWDRvV#EAI2XX{BpH~2x(`1@+*^@wM*_v3W#9OP` zFrh#v{|rl&sR~F?Xad%uFMP?=j3A7mtHLOyC}r91tPd!hjsvxQg$#`U5Lq{KET?`y_Xj| zWJu8L40ftuZ+(w6%68JEeTGYD65RxYTjtc;Sm$NnI507KF>N5C0g~)GiSKkYzpwZo zaJ&@QG)-|ix`KZ&%)a8Ocym8mCp>y4I8b`LBkZLL zjf#rJ2vZlc26xyB+%m4^P}nM35lN5zqCyL|oXmhJl)4?-)~FxfgdlnU?mb3)vMxG> z4wD_LR|lg%>i74YyMyG^Y-m@35w8JM{&fLm$=T!{9*J-vQ!)(C0LKU0L-`t;RE>uH z@SlQiybdiHP~~b2@wBD9eotYQn|JO*5e{AQ5dncff+f41nbhFOdjX9xAtjWdDu}~N zRZXMYz#zyN0fwRr4(#xi>{E9-9N$Vl4NcUP0F%_dFeGW4y<>^#rRWSUTG}jDLj*WN zWkBet<*1$IoYAMHuOlV+bDLp5M5$`_K-rIqL&^}rQp-#%_4M8=n0BEL5WB}3#iP2* z4Ldfsfm&e?2@?-|sD9Ite&7Qw4)6v^Kj1%K&zmzL(}!monvMP5Cl%J1K%Y$1wHxM; zXcG~v_>K4`QOJJ3#r>XuE_%v+LYrMwtAMd0NKT9A;k5G&Ur!Pj_NyQ+9Ak(Y#}|Wm z-g~0mA?=cCS!)9%lopM~$|yyqn3ul#8C7G*ZcZ8jTb&kv6>2kBTLZrILeC-dYK^VG z9dsGCu~io^!gPd~5IVT*-t^Fh8K`?1eS+e1Z*(}=j?EjzMu^+@tUi3ME~TKW(wxz4 zL!)R0^u{(E0&bV!Zb_$irT3x3*{AnFGim^I9G)kcx$tANjX!-z5pjcK6R$LIcpm>796!&IX-sfsXiaHF|
O|Kgj5BzfFDf-!9w&50!-mAuHr77D(4GO zASMk6x|5PR|D#oY<;cAF1WSSkQ+Q_mUIB>{pau>VTYv|D-5FQeipjUYc-)tW(}ozW ztN!^iEZI8QO{~;*y038D!e%Tadr}C}1JCfy@e!ESb|!D=65yIk(4eQ_2dvt>pb9AN z)p=GR#t#b}X8j3ho9L-#bfyv0+#2Icjd{c)=s_8r7dZg)r3ZRp7$>~D)T`lOl2wC% zH27M>ECWD5QRsiLvUAIZ2q#FQu?OpyXsj;iKK#`GkZ8F(bj!^fc9HDcfw#8Pq%m#Y zLsr`iXy4-zb`K9}3w5u1*cK~zadrs3zu?>FTM41Gf}B%;-> z*T+5X@dIy=E!CY_B{0=qWv5qaB~qxWe`vEMXnu7Z0Ly&`7*S@i3|6;Kq*6pz`?WXe zdvtc)!V=ezdn%?Gz4Lhi{jnNSZQq_T8cex3kE8>fHT*tkwY` zNP1=LLI_j;03$(|wx1!22+$#ax_4xjNqed^#P`t3;7awX_Y8rpx&R+p6r1k@w^|9l zB&8*$ewa9kD9swWacx{hbFdCRZFATR@uv>aX#dqbZKbHn#rG38bn$4-<^0gQ{Sb?R zwBFx4B!_^(eDy5hn8wIdUkUTU8p3%bZk*IbmIe|wld$ObB1S0RI+>1MKa7Vb^H0Zi zb+_%%y>0e7twI?rET0qN33z#6xs9*>q2+Qy1`hEmtyN!Ov{=_DxqmT=6N8c?ILRi+ zXn2gQGAcro|I1(7(Aekte|A_k2TJ)Tf_~Jiw58X(Gd1WV$y-0N47EnxpESCee(VvBf?=qy36dQwcj>zJ=b2LX!+0+vclH=uMfTm-{{`Tb4VU56{ zmaHGw&`%faS}MO=uXLJ2kMUL`Y0s|qMfx{3gn>^i28upgroT~rT!oa*TLm)MdEa3I z9|UxHuBXOE0G*R^+6DBd6vucFC3`@`T{MAb7^pk5eR|UngYgb}qj|zoRjWTgRsS>m z+aan3j{1k-8C8o+TGndf`04ASh;uh2`~R|;&WX+O+k;7dM(+2C8?$ZJK9yxM%A_>E z16bohc_Qj6ZQ{+Vy}aX%?Oz%TRdrSdy6rDH7~)VzHMrZze=1SQG?NZi5WGIgyY=~@ zPnIHMSQKc%wgfaWiyewaq7&a3)nm^U6#X}hs8_6S`^|Ez&x zialHH%$8XmZzW=!hZ>$RzS-!FpNXMcNaoTgP^Y6*^UH0eNtvsB^CZ8~XZtZXzm1`i z@@TjT&r)^aTxBiep%f~$;+l3SZ}a~6CT5j`;0K#U##uyEvE1@)y*}_IBlSmCD+E*4 z-Z={U7q3hU`b&o@pJNQ$*5<;4H2yWRfBIQVe{iSl z@i@e7yyCu(Y)KMF^{nY#dHT!6mznevZ7XEpr}oRjznS)7#zV3jru#{S9vwGw0)|l0 zf7Vx%Sbv!d1c!Rf_N-Y$iXDOq#OWdb|fl)S^5%s>u;;s9ppF>e8@3nj+1U?7Y2{O zkF}Fwd$tJ4Qz$c)-go-Ert0PSrpB09Q>C9Q5mb&r7Q#ZXV@>XnC|SL?`I?NAjE<)~ z8W;%tlF^W38K@^bLRDW~>yD!b?s?w-RLB0BYbAE=EO*}jB8bX>DBCx0ttuvVDw0Ni zqe}(dDrP+(rX7-|oPjYhn^y(-vwGuy@@JPsojBGsJc+jDy)L|aK zJuzrn-?krE=r0GwxCIbp$<|?$<}Uq3q7)CPG`6ty`DRe@ZFBKeh}d|5HcGz+yGVY32(kl)rz8yffT# zRpb_2hj_I@Wl&C5JF2j`34>I6-j?#FM#Bu^j6cVtVHv+eSYLdB6b;ir#@(F7uTRzG zml%vu@LOszkvBD@J@$g8_)_rmKq5oyTYkzL&wL=;;smCq{5}G&8-(s=3;Aqu~ zH-dT_h?!+3%YcWxgYlmPc)`wPyX?05$9YhUam1RJZul zs_8%}NO1cdzs8p8SB>O|u?c1j5B83(tslzuP#EYMgW$Qyl3+(nb5RCd;5>F>FFJOb zbWl)deU^WJx8@`EsZW&xo)~D-Eakqa1S73dA2i?`UnqlHa~c=NokPM(Co8Bqn*Swp zC|Ddtn*D=4R#O(_wM6lwAG}{I;I7LH;`8I@&8kIBp(j*@Q%czwDelzX0;9|b-@m7K z(!Zy7ns@ROZhn9rhX&7kr2^urKS9-T^?d-*hwn-BF;n8Bm6>Ab4$U5(%nyjEC_siR zNtk8CFkeKEx*rY7K9O?6XN5j>N`dU4=Fm1fDu&z(Rx%K0(0OEpkk|f4y7!QVrn;f9 zy{(3bVoW3Y-PKBJGb6kMe~^zT-RZ7yJewj$bi2+ssF?3UgK$B3r2bF z>R@Kn63pq^8y3I;U8p+Vv2J+^nB31S{5ga1h(V_wUoBj=@udSH@kvMiSs9vvPHuw=VYxcWWh1 zGYC5^+kI>MqA{i-O=3f~@ZeGz!-0D9>x~6JaDfJDyqg(QnT3E1kYUgW3n*mw1&gzO zSdIt3ESz1c9yWt7 z6U2Yzd3o1|e=5*0Fu=~9~nGS4!#iZ(Y!Lf*0O)1QQNeDw-*6a$vBsMlLkbX!FPkAswgEkA=GM6rQ(17Nfb&!Q zSxf#|FP!%kN%-Bj{-*xel)R*t2pg#ECs5|_@Y9aI2W1{XYle(tu6rn_6QsAcDyoPD z-RhSjtTMEo#vRuxNX6#xF+L%X2&sn{ax|ay5OP^+mHyzNkmj5wN*d_=)~P=Ef`=5+ z9FJ3+NO^K4D*^0%(58HdrV-zw+mYUuD)vcD3L3>H!}SiNStqsk^nV{fXlL}J4(BM! zlz`!;vr01`|B{q%_6f|)Kk5Itw~{mLo{(_i#;;io-r8gPd~(|TJtv>>uUULT^N;w( zEw0TFDB4qti!hRv6;^2u?GF)Ws_dZW3hV$yn1d1@W5_jOMJdWG)1!&{@YLzZa|1^d z?ClJbr4?0@FFFj1@)N3#EbA6+W%^sVi(G$g2~p=p_3gSSNeZ75m1x{4;5`0!11Wo- zQPHwfD%S&b2PODcqkQ~i_E$SUCad`LbH5D7!4sF+v)&$W3{mvMy?z?PJJQ@h7C|TP z9m6#JPiZ6yq(lwnes*7Ikr!S^e`BVauX}0Y zeHZi)Nkrq}-rizCeGB+zv_J|SjA*vz5BGsWxB^Gr{p!z8i=SUAs3(ZO=(Y5zP7*UraIiZJzuu3QKml2_u3XSCIN zc`~A?7rTg;uh<;+FY_><<99t{FQ4N}Dg4hD5JtZk?V4oJzQ;w@{FD=T`@LRVXoHIg z`GOIr&&N}wRgj@iy7ik53Y)#?yWIeljhkMeX`7jM{8%fg7G8te{Oz>_-SK#CpzE4Hu0_sfuxv$v_gLGPxY7@xo#YZU zNYyj*$T)y98XO$1w==HJAl=eVPB-G*%?Obj3%`Xs)IOq3y8Sw!^_vhdZ8!V*Yr0D7 zzM{#f#7J=$UETQQH5#L73spSBf$&6q>L2$B7Ob?18}BiGkZ@kWuq3vtU1ILB`*7z4 z1+xe-)pwb&(Ww!K+6rkHpG^NueFKY0$x6qDgGS1UZ>|nJ72?2CLBxPKtK4_f3aSYk zIi}Tf7dHnzB@h&Q;&?0(e5&IpZOUsO;8Paig$W4+Tx#}uYkp@{7+*q67GkK5DICTB z-dNVtjaznmE0K)qP))7KX^eb#cK@uOt=o{>hiZ3x=zJ!8}&hnys%}g`I)AH^Lw_Ws3OIuF~D09)&ES#eqMvB?xRf(o@ut_ER2Pv|1*epnmnjrE(SlVP}V30}{$ z-4*Y0PHWC8hw?o`l<4He(LAQA!Vj%`1*rvc0p|je8w|GMn{4Z(`b?F#wJo_1@-Fyd zbgA=ng=sNqwgtJM%TB)3Cw<|qtluEMnrN1qXf?H%#pXojCM^^AWo1&>hax@qz4d-+ zJ;^A7j^SJ5q&(m2>5bi&j!TyWFGn$9f>2+vs(g2U^#O+IJ434-yX5UX3-g=pE-VGv zNd-n^uc=I({k3=DmbK0@hQ&^iy{p!cTthJxTm}4peH(mv|>F} zsT5R~mGVoc6gTu3U*a9$vst3}S)Y@hUz(f~j&9$se8~+z-|FHjK7II6=N~mZAcd8z zL8b89L)xki&E#D2D_7HG94o)jV6>Z11ac13psvf6FkIcMu%CA3k{0Gndn!S!7AdiA zVeng2?kStu@DQtSHz+NLMahRNfXg@b7`wANbN!2vCC0)Zy8J8kE7N{Li;gFN;Fp%) z9K271;WT4yqg=@whWXfdDWJmEzA9bLmNh(uHtJ?Q8_~>s+VVJsoRYv{QI9%#&zytc z!^|dHqnCVo2HG&N@^qF~2Hq`|^3OIY93B^7GSmVUV0sDlS0Dh@PIg{6!tlF=1AC#h3!KyjsGd z9#XMn2Xai$W?2cLxotbla|>3#e{uthZzzb}8a7;r941{8zORwJax~ZRQ47%X$h6__ z?E~h5M*mESIm7;$o59Z&MUzBsr|pOo0|X@=eG3L)FhEP3imuLO!3akVPa_e0?XPtYVpDOxhYb_wQeE>drR|a zR-Ej{y{+WUr&66s@e531V^)*Z_p^qfj3L0M#X0=B?>hYLkg%qK|Lcj7NggkwsnemA zjL6&jfnJY&Sh)fER+%&fNnux`kJz*wZB};6L~TB%cFOn7y~M70Kja$j=gRsK3FBtn zA3-=E5BHHbZ)^C0>0$2DJX=pl+}*KIs3(1p9UNh?npl{9 zRfU$AJ~q9^excgBO4*6d^QiS)#SbMWFS$Z-zp!VVVQR;GFuf2IRJNLYIq=ZSbClAk z1M(^o7;D?*{JBALuiLzQ=WdP6M!K>jSbF@Jnoc`DYnc2hU=MmhUl?C#b@!0w#(p%u zUPO)};XZHoRG=%Fq~df`)*YllOlvkZt;(#b5B6#L+H}o9?BfsE;V!FGS*4H~obINg zq5@n!HyZ=z7r$phCm2-H`T4PVtL4iC@jxdmb_RB>`akU`oc~$bT+2#UG^zvig^?F7 z2&cUJ&zwlPlv-2s^NlDeX^MeFwjU}1?2#d?G}5Ic@Pt%_D>~WX;^WT~5s#hvxZxyW zL;>KNjO=d8a@f|Od~wCjK|M}KT# zj#AE^3EarE9ZHJ8%k$N~Mu&uR7Cb}tzJPs-2=1J=@<$1W$w9W@$7B_qR?L3tSFtv` zpliqn_4z3a5IW0j;LVOj1FgE;KGH^ln5`|$sIKN*os~0XtfZw|TN(sH<34 z`haGmO$4=J*-jJo+R2Y)noea>F%gI^UFh5m%NH5fvZ-?8Wje89 z{m5(q6zT8rvAKJ=Ji#Nr|PslQ+aw$%wsy4QXvCiC07v!nS`l&YL9z`_L5h5oFHGPeE^CJ(^ zF?V}B;oF}NYNJQ2TW;PweEOX~8IIL1+nS{ni`x=8H2qYKm9ubdLJ0MUY`o(yn0M&t zg6~oG93#;Dp*24+@v2|fIp9|CKKrDf?bCeoK^ST(5^T!=9n4Njz+s>7MtN!6uNK** z%b&Lbdd7SSoi19F-6wkppBHTf97=N2s-=m|;`U~X5w>k5Zq;FZ&#zVGge>Z}t{qF~ z%l9|?E%S!>=GWAAZnqw6bl*VXsU(~#xz}p7hP#wR9AqgM`||#Ci()U#NYd~2NL$7) zdipEo(8Kg|&>r_PRks}C`5P=wcv7zx(Y0OTS&#zcRHWQ)Wg{_jEU~DaX-tt#{Y@rC7NL!cqI1pGUgIg>JiZ$d28ty zv*3Uqt0cfw=SzQ1?rF;8p;{Qe$?E#1_fJJ~E1}6Zs@DDxt8?<5BHN)Y>4kFE68`V% zlS3&6cIr?;*GMm+y@g3J5WtUdF)vZvfZ_3yIs>;2B^C=b3db|~|Gd~Hi-GsOb6d6| zxakk(55w8C@okYmdQ{4A|L4#DeU<F z#;CV`4hf0*AAgLoiw^B5>^A00JeApz6BjJL`gGmuS=seUM)vT!HxT|mziQ|I;?@-e zd`y=O4aHt8&Sfm#j#J^_>!YUE}JZ?P3m9QY5PKsQRT#bV5z>WSs_-=$!;Dn40-QvU<}Zzs1;m8RM-txTDnX} zdbr4Z)_z#WZKX}nw(VbsLU$4_{A}d#$klo)cQ$P8QSEe@OQKU+H9-HS^(gcC!Su|R zj2FFh7+HGR%L%!31N=D55r%~BAuh^YE0&~`F5uxgfCnW{Xqw`Db?0AFS8dpAm zXPMmjl=I1tndL8!VBndKL2>l@8oH-bC1AjOA36o&SaP$c;He4e1rVGGI@}Bd*Eyyd zE>SA<;sz0lg?qKg&ioZ5?vr>Pz>IT5kk8r2 zvJDznT&x4Dh(dG%@MuVgoFMEALGNd@xPih>%7%-}LZRlEpA4;tI+5?p0w%FCZ@S7H z)6z_}BVo8Kl)FZOt!BL-)k=Dx9kf;1_~;Chu^N@2u!ga~?T=jMTR1H4!-~$36ZaF> zTV-C>yp~;YCVB`p$84teC{W_DljNxa(()k$SOEC^-2mMA_zh}mE9j4%eB4}Gnsp3MzX!J)kZ$2OrNQmj&WN~{ig)+2CmpF zf%@!72ikl!bbpOigfw;tiIwLqavCxsj>Th;czoCqYIV9uUD58RO+mIie@NqsNXKMW zya%&L-R;>g^3};i_RtfL;kIWQgEZ3-*oTc`@?o3MUOr}Sh zc@zc@S;Q1B5K1&VikY6dHjN0~&y6YzHG!TK$hKdAec<@Z>;w3!irT;#+ieP7U2hn5 zw>sOT5o-7+>IMyF8l4svmeu(8l_E6{$>h!%)IMk@alA}9*S3uzz7AG!%eet$-gNt~ zX!~5vdve|2t7)j~)x1-U#&j4=JGfMBEh_CyIxmRi_+zkrm#USi_ZsH*j*KFuc5nJR zVkH#gONg1!)t<;{nz$Z!vZ?3&!b%9FbDGoj@0IkPlS^A}h+S8~s0_l%P=_2aU`(To zhV&lyKXn5VP{wlb2pIx7N-A=(u)B`y(XVbvp}X#$YtMa^OnM;K*Bd@P-1bM& zjdHb|g|CD98M%-u&g)fpA#T$>CyVok2>^5rtEfPt|ue0Eswbr_U@*r z@j#*}Ou>C$=&;;0TsVW%ws=e3r-f(qU((*Hb8N7IJEX z>RC7nY2nw+2foj2$}TA# za_9yP+hU-H`UrcZszY5ZqjLGLM%NyNo-c&DL3foTlw^~yZeV7<6THku&*!WLvV%kT zUrYCLn;QYkPIqywPqx8_{&o0EKRLP}#kJjBu1s52&iK zDzV3;rysHsR-?d_U}Iu;RhjxSX8-b1OyqOgAW`7kvU)>J=^ts*tIP00H|?K|%yc|h z2F>Itco!i@x6d`k%Z-eTAK&S{Nv3lE_BNX4Q;u@y;f`d!Wz!{x8Aid|eg&LJ_d}ye zi!9q^8zGV;gUKxQ&rwB0C?*Y6MEDaBEjKSLS+KetjxbNeKK;@;T}W1I>+5G;vU1U6 zekMZWftlVlBDjKREy7rlS8#FjR_~e5SnKgLLMLc^QC@t`pW)KUEZtXIOrFjJ+=(;h zScoIszB@@ix>h7dnYxs{{dwnm@<~INE;{(3Cm=345}?s|a_r1s67a>8haMr57l-p> zMYl|p)WhxYqHGv$6aO*&==+$}kfM+`PSKtJ{OLxla+l z!_}>P-&N|4y~_xtjwvOU(%Xu`dL_@{--0`5IQ-n{cwFJtvMj`#_1bx@w89&nC%Eiy zpza7C^zP?p3i8qRjJ8-_byD4$GnjvMzRlom0dq9;x@kwi|Hmj}?X2M%xA~n)<8zKs z$d4_eIzN499&*Qucr{JB@X7o+VDl&#-rdtZ-(5++-Cwt4RN5C0{LZ9fjz z_wm!Xfy2bXVzT-W%S#xWWizt+zBnS!q^|myk&Y6I6na8BSNFGR?*KB)ALXOR=Zdb? z_O|{`*U^tbHGOhZlnMjfdC&h^;?LblS5oFJaf$40Kk$DrpI6}yWUWrfFv&&uS5b&z zdDV&3hqKhYmAJ5)+PqyF2z5oqXhS}Ge>EP?v`&??4;+4vU3z&;v5xIgK9P2%AG4*l zCsj|2QVYt$sMu0B_pWr6%9}AX?K8vj(f7EXPP=s9pK-kHTouiv%8ahMGX{bqO`^M-{-`gc`udQ!(#up^88L?xaGu5MFMPkZwvNK?gxkUj>_Kz8e1efa2Lw0Ihrzv%?gvkx7`CTapv97ex%&L|-s+LgX z*nhATyr6@k{S0qvHnxs1wsN>cUg0Z0!z=M>)dVTvlh*9`e0zs zC7m!Z8S@DL(9FI=c4W@~|0C?IqpIB6wXd|&0@5HMEnSkLgmg=HOG&dx5dmrGT(op| zgLEU^-6gHCB)+-a=Y8LEzH!Dl|LigL*1guVo@dVa%z0hE`?_~goZkp+(vDr=TBo<_ z2I1gZHsXIA{OI|41>=43fB@Hl3-m%L1TEXlv_cE3tlH(Cciu7^|6$=;y{Po$e1moy zl)6}Ti-#(Q&5fbWMWt)dtrL@vcB{cIZF&dm&#}*jdEq^Si6q20H3vsegau)mVg+f*j zCBEl*QmNhh=n>2Qefth)fw%9(AAPn4|ND@~t2YuCxo3^CX4fx1wV&b0pI!J&yydV& z+CS6XOTR3(Ah37uBz;P%2GQ_6ZW4OqL$>lotvrr5IBtS()+{}{!y z;C0sGAd1yhuJx{-n2(rCJNQ%SYY4M@Y?WV$j7JZFso^)W2^%7LXPIfM%)05QDn_g_ z$sY(v&P@7yWPUU6omubSv1K_VZxZu09~eIiYhxYh{7h_ayMF~XJ&E%l|^&IczP`Ztxo|3utu+u2{J=k_2(w53Mf*SfDO}4_Ap(>3-(UI-3$P#)c z?DxTdCTA6RecC_0oX3PFtqeBeq$+EP?5yndOp0=bbY@DY=}M9X;rY1=Y3Mwoe~Gc6 zY=<3|?5c^mAMvWqCGn>s>2lL;JTA1gS})~iEN`jCn+&|%+wnaA9krsM#)3)3x$Dn_ z4r=C4l%ic8ctPbmRSa(_Ogtip%YKPeh#66=Z2kUX!hpBKxV z+x$1@^QAQWUq$6>k%le$o5b-5T>j%k#ZZdXMhjED42`4>XWwUm@^NO#g)}E^mnHTu z0*N0EyvpJ#xDwcNqSt8sV2Hk-j8gPn)4~ayq~uW)1OxA8d~Y?*>_E2_Jon5gu1QP; zMA|Xx=2({jNR!HIPY9Wy^3skQ6MgA#%7-X66D0d`Z?Hv{?mhCEP5OuE zAdM}MNozi5bDzl9GPtF$H-i`+>fba%Dn-<{UPhV!!iBH@eqgV{SW@+zi8fyl;da`n zQPkG`_?M2PGi;pBwoz5f!TTWslpXrQTfkkRDxRTI!PFokVkl}u#)Oh;VGki`GSB*! zB7wAX?sLR#*F4OITkI0IZ{kNu!Lk4*G{RgmPr%u(fX{Pe7H1%F7HbcLQYdM&mM_H2 z(Gu0yV4K*DzW?T>r(+a!z`JcXEV$o>I_{{RJOJN9Cb!qkzA)tC_;|u^*wsw%RIn^Y zq)dM6WmvWkSJ!NdR_r#|;lCS77ZoMm7HXPF_PK89xwU-I*$4uXCU#L9ERPx*2@wrE7W6)!UoK!!>+<(vvGy9->#fC=5z<19A-mp zq*D-|I>&W=U-(Z+&@xY|CKHPr_gB;qE6`7F74QNc97A zLhNATyVKLDH%nYDZ_;anF*-t7MlBfmIiU~3yrkR(^djt(T!u8w!uk_s%xC)-64?~J z6|uv8YjBhdQbTwxp?vns)y-s<7~}gt=97V+CN+;X(4f_WF|fa^60-CuyBhr$SFAr$ zo^Zai@JyRP8a~MvBRqE@oDOT^7i{U;dG_E)EG>y^UFCh%NvRMOmuRF{ zu&Mpk1yoxo3YMAtdg_45G|p{wAfQI|SOF`lEJccYhCj+`(uJM?cD6+JxBH{08&dhPun}A! ztVuRZDLlL+1OD=o#o_D~f zI_Mbo0C6bO>*4lZCKX_}K`2G;GTDLp_2?HL!+B8vpJ3_*i3v#x1OO7y1XD?f>MWK- z&lVQiZjd=Yr%Wk0j7{{s{i$XAQF)v*>{o@MMuu+%F&gdPgHXI9S5G6nSGkax-wXP( z;eva{rns|9-TRoF9n7Kf{&jncC6LuaW4oKZ0pbuvtYnz&JAs=3Wtv6Ui7ji~(_yZ- zS@_t_s$9I9g&lgs)HUX_iW z;8uRu3z9ceQ;}g4ppU$o*I7)uTzcP2s&x{h@+0bxx`h3YWvx8D?V&q)sJ(4|G=_z^ zWvprC+sW1=SCH>{7AQt|NprM{OLbu$C83&sE?x>`-!AQGeEUYL`c(o^Ei+Zh8yS-x z-1gF%$Wai?RDD+w9x&;kWRHZB*?Hl!{~?#CIt+~JAiQsD9!2_zSSsmQX6+ZnagYZQdEpN{-nyPcn*>z{w4YL zJ*GlbkXR^~8iqaJ?VNQ0 zHl2(ZH9${o9e`={Eoe6;>wztbhMU~{3-;bgH?E4I+!x0|uiF@;5R#1mM!%E${0?9g zE!+#Wz6PQc&yxbN-uTIbGWm<@NaK3i7~9eXt5iAHm^X)JsRYn3!(?)5 zE+|%cJK^(}eiAn3&m55-yvsBDjAK6d!B{UCnA)!M)HN;>J3G}Bm?-Rz$0RzM{ht{SgC$I>Pwx zpoa2=dzARU`%}Fc!64`Hgt}~S(Cf-p>C>)yDyIBd4#N7%4=Sag9TKJB+=`26mu<|n zPlYr;?8$RsF+qG=&rHRkyu!_W7&9rC9nMNH=M%s^B6@wXvlWtl zF*M%}=J-;eKr&Z_*yPjGXYKVjg&HkPw|!v)*qR467)ptB8xwHZWPEDa|5{I^rnIm? zX~CvS9&iO3Ilu}s(8Jiz4H_VYKkl@?vPh*Aw-a5GpzA0N zLW|sTY7TW!#ZmGB(OoyO>hiPI{3?A>k=*>Mt#+C8m#n0k?ZoA>U8U;p4rd(rbNF8S z@=OzH3ofqH9>CpIV~}pHh*EX4=@ZUzDqNcgS9m7wDUDnjg4obYpQ)ibiXn_~-dB%k)*7ni|@!nRt)c`OGWR&4|NJ zPeRNIkV6DcBj`p)M?*~PktXFEd^~m-RA|34=#!#5<3%6sb6+_Ve6~oFgic3Qq-g?H zMj(!FpWyLupJ5ChM~ufaf1Ti{fl023h#bPGpk3|Uz^ufo-r zEd0MZGL}3CR3%E)zGup$*!{+GDB(hAVYe+*bSoUC6UsYZ=!L#R>T^}->BJTllK*4g z%o$j_Z9b+Xyc3B0kekm*z;CCnJRvLb> zlQMS%xqmq?7Ho9SQ^-FaJxBfe1tui}Cot)US>Hs|M{N1+cz zIIo!Te5+x0_zpRU$+mIkPuJ}pn%ej3V3DN)k(86vJ$tgYMyglaX+Tiy211~QW}8~< zHqa&G5W=(uZFZU8H!htyejH%!R`?v!D_*;10{hzjb!IuI~4QXPfWo>sbD>EoLJ_ng#sX^nfr3AhoQ z)TRp=tFfvI(&3VCG7sB!gpcI=c&B0R z@qS6@XkQJ;{z~~$_!>e{6G1jCQk~}m6ETPE*22Z|ymO{&jc;M(TXD_Pz995_ms>vg za0PU>pLmAm`$Ba|XtO0Ba}2?m3u{APHFIDz^!$$cN9aySAH1?V1b7mBHN*mC58pEr zCl<;^@q%G9a-BTzCDoTGQoZdFZ;1egmR0;~LGqruUmZ@GC;7nAEMcbEEcgVh-;&e} zkN+Anr6MV3sU8O0kb=E6P~+jrY57=zdXP9<+%oxPFQfo#VJts_W5Iin!C-LW$DYE{ zG}mQ#7VnEO3)k-Jopggma3KL1l(O{W=x*~?d7C*oCL7%m<}>4zgFUw1lOE6oh49fm z`_z>As9!YeGhqYpHhhVEW(!H&+Qu?e$zxu4lGIT`0(pI($g*sbW52VHiW;VK@R$*- zLItdrE|?x?PZ2R}+_~oxDX8V6OurW*uNzQzA!*vhq#D@IkRxgR{`Has!#UL=TEl{h zh_P!RqunU>ZqxQ#A;g_c{KATGS$6Z)L3>fn;oOaIO1*mb&~K0~2ut`yCG*Aj@RrDc zLshOp@4n+p&GXykXuOdUWRtJOzVQo&^vh8TuffXD49Af!dp}Y$$*xF!rb`R6-@rS1 zs~y3py!zQS&-tvj2yR*)u^;u;(81*ySC{=;RJ}6yJYK2VNW^Q3#65r0nmf&0mNLlt zrL;F!Of&2SGKu0ll-7X;*;=>g8TIqkg@K-`ej7w4vq*GeoUdX_?lbJ()?keg{&9;w^Fzx|zBrdekPN_Qv?v{P>b-PA7I^#{%Z1&Nnnoy_ z5UXXrn)A~>l@jViBh1;_J6cvAqR>*7Ts-Og&=o`3wMU~z-M!XC3w8JNW33EwHR8by zMD^b7fo~QJgv3o1m_Ou`arC;}#`k){FF;tGcVHCf!gm30dSV)EoA8qGn5h;A7p&Pq z$W%#f=FfQT_XXWqVsYxFzAolrb^?MrnsL0b={%36Bz=C-ayH-%&z2V$w(ldEJfoJj*WkO=I63NBgJ*^L)v1Z;Jdl&MA>6r<%{FEyAsu&z>&>$|z239QScPxk zvFJ*k>?!0=wPEc590xCXv;TV1$Xb-#w1vqt6(i-++WnM_T{-BkZ%W@w^O5@brt?|# zbd`0sT(>XX;UL(ZGf4am>Oe$HqCGoWV8 z{{mI=Keh9d@)mjd=Imi3sAHrYffnYY=h?CU=3M^u7ym<>{PWQ$pI#C1q9R!1ll=dG z@-KMhzb?UF{24}yzc}9j)_>@d|Mk=Tp5X)z`?Zq<{2vHV`!n47<7eoRm5BfOkvNv0 zJ_dA9o_s{*?k9mD_ywRX0wcMs+W)2l!{Nz)uFzY|5cKbO+vF1@GDFBuUyEacAlT=Y z{nM>!E6nr1eH~DvOFHFu?E#H`^#+_3A*LTr`yy{!WEvcu1 z*dFC+kS6nV=H`dOCu9J%C>*7E1W=mCfQq=+y4mJyW4r@rV0=Z$fa>G;C23kBFMvw0 zP~7fH5f&9!%1EKDUj7TANiO&J-w7J}A@*2!JO}*!sdWQ~U*bW|$mC3;x><}6bl|{3 z3!299bLMm!5IH;j2bXXbqJHJ}=gfHwIA&<=s(s2IWn!3;#q%nr(0788P_}@+0wF=7 zH*4LX@5IWfi+cf_&3Ot!T1=v60BG&F-jeI?l0^p*h$EP>AoK%1I%PL#u`{@?=nGRl z&7l`AaPNCVF@A}hygQ_ap4zxaE_5Fi>1A4$L*vx8&o{Q<1>A<;O%=q7mNacw-E7ej zZGan@rl7gK9`$AQ0ZI#gBxJ}WL`ryHnMQSI<0i}HT72nZshl)cfA%P8!;9^p6Mmo~ zF-dZ?#;{MlY)2I--^eSPfY+{H_3*VS-vH)F<~GKs9CT`O8B*N|JlmsypMlV;Ea9ez zz#KjG3&`n#!F}Tu_=mR8Plrfmii-@?%bTBc5D$VvIo|Rh;X|XA=!m0RKT=+oKp-+@ zgF^k3XvdQc%KQ3fWPITKXACya+H&R7@@3{k+Mt0+^m?%Tio>noh z1E%#Z&m!!P3t}>eDr(^e^&K&1ozKoD^F(UgCsLNxn%6Mx8H(S`$jF~n$B+GVo3s#A zOEy3knI>y$=(GB8Qk8yNd<92zbiGcy`}@m5k((R0%^d)$7+3+}ybk`{=5%pKq;B== z|G;Xdr~7`;j_MlXj;2OD$istPqZ?ek9v|e-fB9t3vgxo=wgNOfkRSXZZjtuxRSIPo zWPTgWgzNQgHXnxykY`jU&7+-A=%We|`kt5($w3l!=KeY*>%ey%Fa52*y%^;uqP{yo zHdK}z+L!3vY=TDAj0!@hUB)ZkzhCe!yKhX>hTih_wE@L21Vk;^RCsQoD|*gPs4V#O z%j&DNlI`f>tqal5UaGZs{yd&~k+D{&s=GGzblWaGbq^nlfg*QsPp2Gma>8C{} z8Uo=mP-A~g-u!59<8gLc5Dj$=fHWYAoBGMmbCsToGR--kZpjscK@FM&)W6_mx&_UEdap$ zca)|;^<0h#ZaFsmS`B2#s2_IE*1MnU%Fuf{+47!-uWCh0`PCnFSu(bz6QMI~g zcR14LB0HF)6?~Vx7d81J>kSIDO(B!YNy@Rk0KG-U7oqI!~2! z@9#46Xy(!UJ*@0rW=x;Wb8axCSL9a(qe{w~FDghoKyWf5!IwXiW6l_k%pc{=7+O_1 zsUZHyhpHt}znZtn_MCf^eeKykZQ09DY#@_s9dw|78-a#M?vD|rntnZG5#0bcB1OZi6aOKL&|b?sZ`&Erdn9Jwf-?kT>#v%! zZy6u$s(c#bcvyb#d#N6{e_9(23sk01+~i#+6}JjOaFV|rzb2L>67`Vq38m&@ZOf@XueY!RJSAFA$)eTUsR?bz)#T?e;T6EOg+c;R}eR$ z{G*wCI*@xhp3urImNg2eFOdD|cc#JYIa~)VloYX4A0bV7fyI>-zycPuO>fD`s&G^F z#3o@N$ti6)JjPbnmeJ*;-|CbP#m6i5MJPqD)#tqE%${M;FUxnmi3@;MwStvgmVzLc z$1b_RT6fuYTd|3==V-ot`#pO9*3m6&rY()SNwY*=&mDY^K9MGRj0gyujrm5J55Rnr zuX3{#piH7)fK@;ht$c*O&R=s$vy)xDU%y%dL3K_sG3 z3_0Q!iBjTxbMJVEC=qL7gx|_|(1wK>D?YO4^YCWQNzN`pp0NcZd^I4%H&hBDWOU6I zRp&koCU^rr67`eW;!;q)#D&=>;YTXvOoLnWi6V@+@JT1SKl+>5AV5!X|1>OTyC!S-PQ@-F)c9Z0Lp`w9B31eoScC_BQP5(i9WJ5}_fLxlV`R*u|LGT|XL4d(`8tK1iA8oH|+F zAt*7f&H6EXa5fRc>?54+$UB_|&ZLoKE2HX?4M^jZE;IE^bS6nOieU$>54H^12bW(- z1#-Wh`Zzy{2FlS0{Hu`2w+#L6*|HpdKG7$yI1dVR zm;%12I=gSe8L#JNz17|>uTQ^=OQ0GChJ8DckUzuV>H(`lzKLYJR-QUH+qZbsv1$}K z!QWfc0(Q}PHDNbR>1JD;^p}{pR9 zaWy^lvN69JcD0MbA}h_MZ@JlGQ|u|yKh9X;{2bkng6~XrGxKsIclSZ1EGl$l`0kd9LzbSjiN;ONkFPMyAp7khSx9D7cgFmgE zFRV%&vp@q^V&Ch z&(;>PPfYyN&bGlS?%c_>54Oa^Keu3{wD*)QBGVXi-#4jE_TIo!L}Aiwl}}@aZ!rWI zDmJ-P;{8yOlUvPRutcTgOQ*$h+zm4#>?>Cz>&;>WJ66+fAZZo1Vj3Up&uUrxy!4w- zbPcHEr$jA;$#3wc{fOT^Gi~}p;gT#ZScyTf6!Bf$)J*UR=0%CUH}gvk7WJ5G#ir_# znK0?05gpJ2l#ujFwg~Ffw;F)J{izVR5Q+>Hb}&2pG5ePCg(|u&iVecs*~4(TP2rBF z-7WV@Ytuh)O7SQ0u?|Ubaoxd8m=5(V?$Xzas>hHZI9IqwY8rXVi&Se$Di+5{#~2Q4 zH$k4(!YXGelbEW^EH8+?YzA9KqmVLdO6LxTsJWKbhM8N5=u(XVlHZSHaQ!}0m*2+q z+e5oqGX$zWED7vE3Zt5vX9Q{k%5{Qg7F>ohQlu(*R`TyynOToo{h~Vs2bukI*qE7o zEEI9=i-wJpH?4jj8nsT_FUnn(g`TsoW#7Y4tY?w~Yd}e9?NVyi&4~P2?0LfIAFX!{ zi_*gM7bhLluB+!_Ogd(37cKx(A-$`%o%npim~xm?ip(9;*$_bR^p6LjQO2IG82 z5Z-?sdZ8HNNOxUzGo4UUH}%&ia|(U%1_!l8pxNuaM@kL%rItSxtRhJ&OfAduWRgzc ze9>p^gQ)jn4<1SbXjEPmZDZeSeS)Pd5=M2=>}u*|_PFWGAd?^77H6e0T_oec@6-kC zc5bZ{BaUVi^*0$<=}N|6OTV0z4u6TtjSDv~x^v*A9c$CwGW@1OsUmy0>rphOEP^8A zJy<*iR54m)p3O=Da)~GNH$5uq7|vIIS~V_zyzE`m+LUdrc~nz?0Vz_yWf465VM$_%ydU*w8@w8cnlrM)17wgksWG^ z6Y6xKz&%iw6L^%g9XFDOjQFjRBl||sWSVxgeodDU5gm1=8sU+p}makCgpO~Ir zbJVHt!p{$0LxyuIy?#wr9=@wX;R4I)PxP!!0W@}YEfFi=-N;GGr=W4z*jJB9cEdSN z+8_{Ld6FYLu~w-pI0NA$PZ=SbB=&P4Q(VQKk8N{}g`m&`lCS9W*N0K$>jfX0vhNYD zSz&O0QrnHjFb6hgekXmbCZBgron~xdfqs4d=YA1YQL|nIeKq|-Ua4y_>2x#h3196$ zVW-JS1rQCFs828#!G*WX>lj73^~{JB(DD+p5A6^4=0liE6SE4dpt(Hvw0D7HGR!5h z@efTWg+L@`5mLT{87#YKizgM(Jw9w&W7U9U)(y*Bd{tuY;M17z;(1#T zP-2%iVog~7^S7`c+POgl}}A@_>GRbT zt$JqIIlGbhN3UzOF0^jVwu;5h=KFJ@Jx^DU1)o1h6A_-*q$!*I5S>WyC~6L+@550^ zunQmLT%*bQ z%3ozGGuF?qJ)*j!s}%B_*OG#LY#pVK|XnMW* z{-Dh%5ccJW^naAw|9&{2(FU8c+HoYE?{gcSRI?;~;PBOj#wz?Y{QT?Lr5mT@*9g5f z;83jK*Bl91;v(eU*n^H(-M)7_zI45nUuNLP5k|{VlwpkTC1YoxyurgbRb9*wKvv|P z2ve>B>w2q?3|EJ`i2Au+)$hG}+ENmeM*muh@X==g%j(q`wnbPg^&)cgi#OD~QziJM zMWH!0tjgmZ_nRn?+r=Rpf4g?>;Fb%}Ye}vBDI;^wJT^fBhD?(M9Nv=ONwIH*%xm_& z3SE5zUJ)I+>1Hk-J+%70Zbyw*Kmnk|)UL$siWmTLh})B)l6nAJYfDaAFjG67dyZet zE@mvY<&l){3aWo0Tcge1nEdCPzi@NCY5zFI@~F}k9H-vGlK0xFcMd;t6XLCm^ObqdtFOZG?z>@LoHwY^7_?iUbDQ?NxoK)o)AZ7}-+a z!Xx%?+;eJG==IqRfDWWpAM8ttmOo^NlR~|k(z#BW(Z_Fny9cYtKFIqVY(*32Mut8X zzYr!30$!|EL0Mj??HxyImR$+tPBI$kX_{Mk(%~ce)UVUNcB(_-`o`5> z;53#TmXi9$nKA0fy@+RY@G|z{`)K-|Z~)Aq7|&i|Eg~3d{JEc1+ebPYUWyeGF#C)9VFsCV@jX)>8MTTJY z8M=OcY;<#2l#g`1cJ>zhO#LKhc~l^L+NKgW+|}raPw*NL&IK<)3~_jaa5}9BZ{6G9J9}hRE}NfVJV(WL zX=IG!>2(n*<{=-&=q&J-t+c6bXCGXGHb!s9_#j_(G%V8gJj0#tIPHfv0;i9MXQaG^ zGH<_haqIYR%yT=f#P+xAvOnGkv>Z8WNj0aFLLrQB!Zl0t8%3Uy%3w4VBMVSsjoW2h7e1lvSu^7 z`QdD%2>SEbpn>@p2G;``(GI{LP?){=`&c#)_G7=3a40yjTdidIl>!&7_6K)=40;G` z=(uj@WE z@>Dj-rg1OVim4h+0Wv4a-y;xJL4jyVhnsOl)j>L|5Jo_B(u}u}D|GAXzPS@Q9#ypp zV!LG{vf!)-@~$u_32N%#8;=?Fhp)y?x~?c9%4L%?BTlJ~D>)&+`llycoo4S+72hoA6Tutg8yR`){9wOPBE>xA15$&=t+kfDs0bf5Ig)=nGes z#s$2>o=-12>2>AxtqW|D!%vshj8U%=fIm06h37Ic(i&SkA_O?%u6dgU+*raNN;x{)>;rpK(-hK8T5MiaDmtH<~~{-B%iMA%n(exsIw? zawCs4J^@h{N7dcxMY%I;+|*Qy)1#)y_TRjOLFw98T;cAw{al~jt%&kCjkE1sP<}${VR5@gHTz7LEk3y~^Tl3^Q zWKx_)E3h2mxx$vueJ6gz5vyVQHF|;e=N|S|3@&;tACyP<2hn0NgUL|H9`qQNrhH;$ zVwY0g7CMUPIS$L{ySRi`PU=-U%Tqp8f2$~U_`t05c~VR|V@ajce5}#y`{4I=KN-B> z0(}6fz*^M)fVsj?ht19W1v&^F{xY%muXfCot-6gz<*uI@c?D89uF|l?Z|NBLvaYATLQKoPbw^K9HsNsnA3pQl9|nUS-M#b zqX=spP6qqlpU)hK76gSU^!rth3#(#sSq%zPmNhWYDO1SBQUBmqslYYPi?5Ybs_AC5 zhO?8DFC)%JdgRX-dIg$5K(hh7WD7r68F`o$VLyQ@>aN>TjSf+!hfcw2Z_-!Lu_Yp6 zsWeQwg?D7*kqMQoZ;~VvNiokS<^vA|iQ&Zr23y!U z^I@w@Q$KK>BRDWy9vg?|6~HB3gPY)~(#a|GqBw!1fo6OsvI#`$?(zHv6t_YNEpFl|G_)d@Y7hc zhy35B(yPJARl3VHcSXpgGq<2{`mTaQDQ{%!xq7J;HwasoI|b@H|Iupq$+w5q*-U$n zG*b)=5prt>WBPgY#q?!xorP96ZkL0P`V0YA>i>S z2B{iS4}6>9?!<#-FgTda4&(FmppxG2KDkH+e~KJNYt)rLi+Wd^7ZRgL5}{+I6eD!;QB_fxXU`$4F}3dO_C~>h3l#xdpx|BViSbhM>_PKjITRiSYuhR@b z;5}pV!~^pCj6ZdE{TB0u476iuKV?(G$<#lzx9!IAwmu0UQ=|N6lHEwE?{nq39Oe?n zPjo}v;OLU%0TT%FZ|MeMtMNTQ02+1r+B3MKQ9cowOpE$8k}Jg^v!31ug?$Tn>%@*P z!wM#H3RR#-(`AAE1Xpa_5vud~MTN|R2ONK-#l_SHi;K|Q2b${K?n6%7nBNoFW$CQV z%Un_G_Y>I^hb3 zER$t8U9?aXYc0)wq%3;1>aVKyV57|H=4E3Xjwws+3A+Tz)bN|AN|RcT$vuvKS6Spu zx8%VMc&HCCMj$fZ7!}#Wzac)Kw+lv(zHr>=tiayQPmQCXygpH05(3oHQsp>$S6c+y zZYXsg+@)GybQ;xTAe;IohC*RC%!1Mf^ik)+iTu&9{nd!?y(mFP&t&NA2ggp_+mP>c z2vj*WX?Y++yM`o{V)F3>vF6t7Bk7{Pnay_5^vpXbT${XzpZ{k4go&q)pomRxWwn{9 zh?-OG?d&(YkT|GA8}K4WM*F3<#um;v3@Eou?&7^{ZsX}Tf(zYiMXYOmtoe9RxKRW^ zOGDKQz9ogtGSNW|_S9JjD9O;UyU|wv@U_QoH(uF&4c(w#Q_XQBNKvx~z6N`Ia0OVK z$J%H0(J+yOA>q?5yij12vC12nYgC{&yrNMEkaPasblfT!USx}69?FS)iq zkPH+gqfX3QwR0aj$V~TVQ$7}|IIV4YA2uU^yCmPTWs>I{^P$DN-n5%-^r;njmjPr6 zIS>aLn|A^_;K?;fmVm8WKbo`ybKYe=$Ui#uGSf2Em}OhD8AgmkPqX%B(vP&8Sc>B+ zL~_Esg_BHj`9ezLnIl5yL)@2L=JE>)C1%MjD6z&phC>mjZXhLqWm{gJ@u`b7d|mtz?13K#$TzT_I=Bk;qZskMb2%hEq@JxtcNn1;y?xLt$a} z=D9;&*sGHYPLfVvg}HC(AKIhC@mJ(fXf9~|@vU_>N;V%-zu8mRtkL6zGfUNUtaJ9L zYW~j}6_WP1sgC6}@F1`F@Z`;wmNMqvJh(U0&FY2sagesaivk@dO~;RIWZE;o%lk1VHk~4oi3_`sC_2YLxO~r( z;+`QaeoUEoVn8)f@_Sl_EE3ifuAibpKqmKw(B(5@=UJUE@q`hP0^-4S2r>7ThWRZyINojOCdMm|# zLHl6Pk5BxJPbBs$6e#xktpjafCUf=BgOAW*P8#zUq$$>lS9iU@$2a|B>}zE!`trD* zA|cUOjVsyS_(L}$s$3K@#P-=l4jRYPO^B)_SKtfsT0MGXyQPb*$C<0AOz!bkKSv}e zm4_fjm33LE7Gxvp(4L>30Sra{oO^PuOC4>o!JJrU`ykeOAcB*f7o-j~(8LcGB?pa# zP)RhM=wmRJAeK2cRC$ZQX($Yo&cGBOvv%m8I0l*?hYn@Zesr0HgEECQg=U%rjlAMi z`iM}Pk8JsGZll%^*vkD4mL-T4?T(J+&)gmB1j1pu&35LFW{cN}$Jym|Dcf9li`z_P z$3`~os+Vjxt#VrWp+l4OjMYh-Tfx2l{WfMl9EQq&r0e`C^Kv=cO&Q1)jhaJI8=xTM ze3pS-8>7|vD}4Toz*FcTLmu>NLu#12D@8thu$)?sj@MT6z^KNtYNQ%(8CGYs(_Px6 z4f-}7sfuBeHHFhRuFb(%tFCqK`9x0cS_|;j$oFe_5;J*((*mR{NiVJ!v78$9`7IUQ zmLV4Fd#QZcv+-wjSe+H%7<#PD=iLOeDJ0x>`?9f))_ws3CA8)ZQPOA1E--s5v3?}0 zdPielctoIECFpzXaZ7fV^&g)l8u&DqFU$%Z?79cKjj!~R`@(MIU@Wbfaso^IxZaNx z013+*cB)cIVty1B<#M)~!b_=AdMU3nbMCB1a`dq0t&WmL*pH($(Sa+j@s1jy&hkr7 zbDLSUjFDGg;srCUU%4Wcwz&?t`G8n8r`EGEK}+`Qq^;kKd7Q` zwvO&q^_;Sr+_`8>8Z>B#C_1|@-1|0ol<~ODGRTh;9$EC0^gw^GzT;`B`c}8Bu>!oQunSWDXo+KKzP%PuNYwz z+0w1^VcL8(jgUXg{}r3a7%Wy`Z(+)!v{#_f^gyr+1TW>|X==pny>x3})s0y(;ESf8 zvCgY*&^>-G&$m2tq_yR_@qTt_1&!Ia+&`hMvUy~0ptlg>=zW<@F)&KmeGW&bsQFJv z{(}Lrr=lt0be6wNb>Vt%&l3XBsZRc7zJFmomO<_Hq`V8>PPzx8aCC}mUMT;*jO_XU zfiJy!_Jh=q-R4&hgUbH_M74jV0z?#5(N}lq{|Zd~=RfX-0~pKG1fTK$W3T@6Q~tSV z!#*6e&h4-2Z<`hV6$|vQU-KVE>hJ$_A-USRMGN2r#gIs^O`2{{j-}Bm6tx5Qv|!1-vc2dOH5^j-0Z{`Em&h@da(JpeXdVx>P>VGlxM0XBVi)AD7h zF(4Zb4|FV3;8o}$;5=Vawk2uwuWN1Rr}`&=2jnjZE2QJEa1Npgfl7L2aCp%mfX6@d z(z9{!e7IWi`VFX=fAVuc>Ph6;w_xGr->tu2q@Bg);WL+1tE_NbO{CwcEdr^x8-6-y zO&~}9pHGM7)#~5ep8=+#zX=|&$B!WrRtr^FCc4&mzkrZ823`em@`w@Q}evp2s^N!f&Ja!E8N;C{D)TqnV3 z!g)GA=od~4y`S)6iOVEV5c0BwXHs4tBr)t=J9z)Z&F^rs7?|Rly!x@QO9~Ic zQ4vkSf2mGDxd+#2=)%%2a_9-LA$@PQTm8F8X^{#~f_+}`jCf^HUR`8kpnQizbyoLy zjUAPPtSMK@)^k^J9EWgWi1@q+kp9jG_wOjLHo)*~;YRiy=9x)-hjwQrO8TcHf5k#* zgBbaCbemtD;d}+agfqf1z`?<0O#>1Xrm0nT8g{xrlF@7`;t$l*w}?bdCFwv`?uKhX z&D3BA>nYW_4X1Hpf5C@E#k96H!TI(ub1j%y#-Nus0~RoX)*T`}7mds2{u9jPNC(a!uypKkfW zbI~_c{B~~CC;n<4WCEuO{Q&iHE-E$)-hv_?AO-m;AZ08<;V?!wC{){AZ%4#ti3hLR zbz6H+9i1Q`86BmO&zMvC(FxVD8rc6<-x$g+S~vL$0nLu_>7mXl+KsO$n*baW0>9?& zl$D$93Z7NH~3q)-54Pm#H7s0=Am)A1W)TZjh~#rB9Y?8hfz*80^? z>xD(7G9F95H@&_&4%Q9qD2lZtCfbbx-bx?PmahS%Sb<|Y9XdG$he+@fr(cu|yXs$c z5dIBnsspKNpj3@UANo1|;n9EPF8RNga(jB~`WZJ3Y*h*}Rp*xh(3sbKEMX6DElPRl zDG+|`L&^uQ;r3V1DL-j_IFmWbBAlG^_<0c$EP7d>c%<19)&oZ;GhbMRBDs*SL%DZF zw_;U8%Dj)PtzH>fEM%C2a3a;N0nkVfI**8Lngm*VCDF1sNrY`(fOXOhudTwfoIyUN zvLRRh6bi;_llzX~${dfjPy#`(1e&St)C5R@YQ-P6`54kh>ClDS4ak1=Ll7}FLH!<7 zAzyS!Sloi8O@{2AD+(uzYYMxYPfC-h*g1Lze+AxAj~`^yK56s5Jupb$tYL8XB1vBY z)K~7&)e`OKRkkRJfo8%E@7lr9*FG6jd6(YPpDNKpCO*CbP_TdUHy%6%X?@ewfj4P^w*RCi;Xjg#j2;gsqGuN#qZmJ{OqR^z65e?^$|z10f-#26 zt_sxgI=93FPdI>M9M~VS4FlwFdb3!TzjmIFN$#=7Op}T;E}{qhI*%#C%t-aHfNA;2 z)y=;$ktR^6mznd45#&#~)z<+6>%l1#5N>+GOMli(2PyCa&3rfR8IK{p9NfD z)66cIIQfG_WZdk$N8{$(Za6;~IVLF2-S zS$L_U_b*2HPUM@}du>ECSbtfTe$kX-liI%6gYR(T9UsZKp$0A{?5)P{FwBq!RlUOH zLJ&6-x!qG#uE^U0h|{Sx6G~pz`$B$%!|JXkY@>aZUKYmjC^5NzF9~cN%)S z&mXB${mkV%9=Edve#qhgaYw3LzX&33~R4x?3y zz+nZ;%hqKiMMPp%=n27~>TGbS+?8Y=j4iKIQO*R!Qr}Jpa5e}F|Eh?T?{^RRmiYs4 z?qRyWokmc{hMK>ps%ZH+ZWdSOV!x>@xF@wK7YlI*VAqn)MW|vmv!`xZ@R{*5Bc#>C z?=&^Hc1RW><#$F$&%GsY;j!3?H_8Uo=@KG39uL~!=?;QjEpu-&SVwI;W){4M>(6!t zuD;}Ov}jLaK%WoNyAv0Sz(F+Y%p~*rVcvzDyBVxO<;HiYlnVHP28bUNhAR*0{lh3R zYCTV@l{+jnS8ABjpP9fiM=i*D;uJTJQwFm+vRgQYGSn>WA%i+0GtJvPV!KFyRV zAw-%tIawQ@7T_hI%mOoq&o_jiw z-$wG%l{OhIhVR)ovIsJ^z} z3nHMPf*>g&4bluXv=Sm+(%lRp$k5#}bTfo>gLJnH4bsveATWSPNl3hV{5{Y6ob#UR zI_J91{599CJ+n7!?X_9!zQ6b9tEhZkj6^^IcNaY`84b}1B|(v&UMB3w>T7v9ZfzbF zv0Q>fAn}LOzrE#r*=Bgng5;&uxf^J@05}ZXyK^7vEd2@shNKaCEVtqNrZiSz%OQaS zlX}QSsN~!*G@iZglJPhJUwu=UmibNTndS-Ab~9-HrHp0<9ddE**vV(>m7%N(|2_I8 zdHoUc$j`6g+?iPY;s|Fp8=DWqI0Nx)kK+x^w=#dL^I6#2u~1}7j8%SWD?TYKR7=4c zDe-%FJ%@826O;13fhL_dCDYa_m=4le{1k_{^Cz^bEVG{hJ5TZkp`g|LTNi8FxEmG^ z|D)FmCT#B~FuQpgQtI+$kxo^9u{sv|b?2q{=kK+AG`-_e0g(cxtUOXNDW4-gPiFxY zp_cTNtCiX7Vza~`>SuOD>W|Svj8x7uuwi<)L!11h%tOih&r6zt1=Gw}!j@~Z?e@(Y zL4~>E!lBDe4h&673&hY1QZ34eNvpXo6gs-nrTOBa2R%te5`oUKPb0TyZ{pV`aPWrX zxMv^pi`&r$D$kwq)Z*BV@X4CBA=V-}fGTz`*cO*$%K96#xVHd#Ud2Kp5GBjnDav>v z%GCre7$@b|S=G!JSj5Ux=SLC)>zjiRr)|$ucDavho>%!-+_Xh6b`8g(l+xV?VznDu zzwVI;9x0+>BeV;PQ=simf@4uBus#gXhxn9-z80M-$pw+N1|VzDMwkLM*=fXxNwdYs zhq1s$U_oC9#qvvk6*o6H8UoQvtqXVewQy%9AmiH!)d|`B?@VKqGV;6dBQ4_bOGbuUGX;()=>Rw}@Qr z&Pfc&rlsKbH%omo6mI;fi4kMKiRgOsz-kQ+bE`!W_ z*Wy{u7fFL8 z9e<}!p3dQbzpbz&CH01}3kUf)1z;&^Z7MTyV;;{i9?xb-?9AYY!KKdaRGOk_CQSc0 z(CSs3alm99zC_?WVm94A!*^W3L~Uy!O^q;x_vNvoVqxvVWjNMr$$M|;{rCYa9H=5r zN}Ki*D|OON#*8ju(N4Tx^{i!Wh`%t+BDNwV48L;LN39Yutw>6`JyiC`4n{!3R~CyF z-?9Z{4`o7!&UCi4d$S7J?Qz9#OO+Asgq*`_FSu4*roM7n7|6u=T+vGjSb^}@RJA=rLrD9+gg8m-@!4B zgqwN5tw};YL3r!9H0%63fyQv!sWU9;O6EC@*p7_bhh^afX(2|9q;2W++n1cWz=)75 zp~vtEY}Gw4&f%zo*T?6r){I$jf(hm|8!3N)+mBMMXw}Ttp_8(V1<>^oByc`GT-p;D)HIHmrqFe$GhVaCM)lerp0&?3vl>StzvG6)E#R38ac|pDty*C|6Nn1 z>HwlIq8(35(46By_D5F~yV^4?eN13EYG-F(&yCUZed`fa(6=EascP%vO?oY}(wl5a zA7Nmk^kk;I#1WysBJj^s^|w91-URm+S;N3JC3}S7@|7iet$XHqVaHNo^I>0}1m}i= zsMYkO2&%Q@Hrx{$#BC52$ccL)T+NTtuClS-|BIy*@=`&6374HT;Gc1r5)SrXP(rgp<9l%=el-j4#yYi2_}ozy`cQ(PwuPJ?lw zOxn{>^0aj~p|jrRb5xx}g?}mgTAHjcUCN;0MQJ~3Dng@|FC;Ua7)0!5D@xbIqdA*? z&6b_BB%;_(1tV{#UiiUxbVsZm8-{=jU9Z}X1JEPzfoovc5h`I zcyW~K=7CU7UvM{5LddYKj{`760B^edU*QrnJftJagW7B~S13K2zsXhFM}#~%&b<2~ ztm|JxkU^VfpK0z_9d8b1oucm=CreMps=>TPAx#7L7gE&eEbHjW9$;{pIkX-$Si4s$ z?zZZm8|>Rzs3;w?Hj$F^RmA*`c5S1z?N{1Hp-VE) zRclu&xDMpA;s?T{+JbVGV`(Gnb`;)W?zwlQoFE**=($W~WEt2>_f0bM0Pr3!Rbgk} z&P{0~`Bo!xA)a*GxL;muR_rkCQ3dg!%peDW#Zc4ttR11%=O9t+wTeP9twawLpFqw} z8OE%uFt8R&-6%mS#h%Po9797NMnnJZ$1$@*XMq+jry!Lm5Xl2_-4P`v92qbme+_n+ zjZ~UuhD=U7_oGi7hf@FnT$~PPzky1CPb|^ODr>PZX z*C+E1b=omlAuwU`5{$YS`Kj&39xW~eAWr~!N^p}nmGFfTQ>E+*UnHs`P@$LG@WsRh zPNwc=;nq2vK0b?KG4#WIkx~ly#jfqC0JDTqsp`%QM8YqP6D-$Wbro9;%;IKF_d=d*?9ANo#cAFR84RT;mMdVn$tLdk(P1!`#zQ6UNE*DZd#yT5g?)4ef} zz#eC4SN1zWVDXG?ztO91HJy}cwJKX`TmzbXQhv)oV9)AOX$MV;N>?p;+Dt%;$)ld zA$wa@jUcmb+0O~3Ot&!l`T!`D-Kcvwly3~0Th&ccI+asYBFiR`ym|Db))-HF+Y#V}1wa~7v~ z^svkT;0&u#%#J1ulc$*KFR>PIDyHH$2%nu_uY#Ok*UP7JnO2HzZ;4qmcVcd*wtE_X zWzFOF*hZ>mMy!mgWt8~@Z^;UDW7|PoEUGMKuH)<-F-{rZacxr;y`;a)D(SGnvk_Uc zE1KQ6PU-_h%D0=lnjS~tyCoZ%92nE2y>(#8O{jZB`D$U+OX%1-5K3U7zJ1J#&a>VO z6|;Tj4)UrM=DEka+ycPhWO;2LiE}|h_qX4Elr#&cw<)i%e}S0r28b^8J;a@sX#Ol- zxrR<2&n~}wD#cUc8XkAk1KGVY_g-{k!5YPl0EVC;xAVf`OoD${TBe2MGB%tt`A`<_ z+A@B=>&SPVpQo!hgeXBE!r}<;2*0Gb+m6j06fbLqL>xH-!Pry_w}WJd7l)(33hKcN z-|M5n55_!HnQUs&3G4)>i>-Rc?K+h{*j0^PCQ+^uJ*91gxuz{8jnPN?&CvY)>cd+GdpDoMpJ+z3kXUUj zxKd<aaX*`FQZKTkLp%HWC#Sujm_2sN$G2{?=SB@B|Cpec2R! z+OLqsF+d-PxLZruNh^#caHmDTY?)I(|Dc`}SK<>L1COs9j{~wK8|1``J7x8CsJhf& zt;E?@DU6j+8NY;7d$TL;`Ph;%T;(&Vnh+JZ6QG$<83RHwi5Vkizb)W)$uGv2SJ{7# z5uqypQ|BCRMrpT8?;QrqZQOM=4^UW1e93X3(joS9`{HRY&TTB#`XzkV`BdDL$AkKo z5cCHXdcBFAN&rHyDrGOGX!gedV*t>}!AzCUSzZ5*+Nr7OXZMpI1)hPZe<)K%hZlik z=GQ&lC%{G1?wZgr@HA%+3WST$8xn|1%b)_A@f0KF8>r%_Bl+tjwF3a+_4a(n4KWsQ z*RpGVQNg6!TDRRmcauK}!-aPd+Pu1+A=K!yFrgmq99Hqa?2wiS|0<|a6Rao{JjNZZ z)gNp;MTq^H5Xuvcy*(=+?0HbKdnBmjbK67#O?K)(hQ~ta-4dILLDnO$s5i1wZgL|OO#Xuu|*l2S9inY4GRd6%UKUASUs zzmSAn$=)s_*EOGJp3*@eX}8uaOt3irXi-`!sfMltfH6bqO1X?fHHOyxJXw~@C06vQ^zLNt+HTD$`2K=?=C6@+ znXPX)RMFfM8^n4ay6f7X9G_2|4U*k=-{_X#2vEAsY8D+!R8mix%0${NU4GGB=^o;QVy53y@msgV4@F0?}PM2@}L>}W`{!zm{-+&|s`F`Jkv-kdcHAfeTp z_V^U)2h+n8d{LHcJy;vvw$EsU*|^NZ5iB00-lNi$&ad0%Y0bQ`$$qaKYJj}}tBO9;m^W5R#_mdg-QQBOoJE>{-Mivc+f$K_q_*6|Ld z@^RCt8V9*y=5QNdv(6P(9(LZ{PLU}JE(>J#UIISkx)-uR&jPEX)fq4uANgULbt)N!Dm_~q1IX=vUQ$bb|SIk`GylGo)aRLlI1 zMl{d>2V*@CEcVaYdE$$khGG@y6hTlTEAPw;eKa&^T2Vc!B)Jc8v`tO6+U0}i{Bu*h zn|vIgR3~bw=abMlOE(sDt{WX{qRYVjt+IHNi!snT>qV^g`(i`L60jfms|%B%Dv;To zTL|{~DJW4JD6W>J>(e?iep2nlk?o2cPw2|yVZTh}8kA{y9NI;sYa8d1{9Gg{{MHmR zp4n$p>q+8HkmVTvyMCCQXhVlk=tFNm$6&{)3z~%AwCPErQN&x+A&xzazI%FUOi$LkLWMGm2u11UgS0++uWV`pH>gdBIG#U43GB$7XiV40kS?`>x_n zVzYvEG5h zc*#(fHD;g{7n{ubHp(n%CHFuIb2~uwB~M}qKp3o%xtUPXvSD?`r%LfxUk9Ne%<3Af zM!lbU8RJCuP|mkM%Pl2bt7{cxu;WA;Mnc3zV6&p@Q9YD{AazvXsXwxMc6@T=F

Xf5^9UvRl`UvhWW@G=T?AZz`~6%E)v~d>v&) zGbf;Kiya+SRko-v;qjUsAva|)owt|VHvI--M;4?o596{y$3u*M)VjjWbWChMfkh-g zF_2-t%e_y^__&4%t$dm##4)N!t$^G1VU^tjJ+k$NLIJ#tgRmMlZnd<@hnHvam@IKP z;10mA`tnB~swiGP$TO>AQc9R4KZd*n97s>{`L@x8NUJx?63r#;WsihG7Ii>$jsz(( znrqAuA~G{bJCuBme!CLc7@#V^3W5WQKz9&ZCv$L>g|MVGEh3KQ*>aqD}VXrltcFy&fyo= zx=u&x)jY#>>xB05`R%A%%8FX&@1gH!8tCe z=F-3uCA6w+eD+rOr*I3q=oPv*i6QSSb?YhbLFfzCy@`Wgw*6G0%~UgeGD3ZUk}>4v z!w%x3)TaA$rQ~%DjVisC-;kBe*}o{Do6eB-D4}eU5D$dsfJ<8_D??68k`g9n_Q2(fvYitZhrsf_G4rzQ z5zkXL-}bkZin35X*tLz`V|cT{+EY(2e2#qv4yP=Ze!a@N8UDkVr1 z)O9|jR`7G!JbJ-he+f{t@hz2hxK3R~iY^c~cW)0vSIepzJ7lM=U(p0!KHherytY?@ z#LKw1BSDQx_T$l>JTlb7p#~lrm$E3T*H)XOXe;)nk9x}XtoN|NO|!lGMyi26C!RXj zJq4O9H)Rb`Q1}IIjS^^!K+tf;HPWLjLfv<2W?kfR@K`-+I?`jb`0Nn-4lG=aO!(NW zF(uPd64e(o)UO$@$2F$i5)rDH2@FzVHY~^#gEh$PBzNQiRQNaQKFHP4qK8a0l zVZGMAs+FTxE{2I7je=Uy_YVp7KHKgcpQRQ zAhywzHAjelH;cPsAiSeSe;QJ`{t5OoC-ktR1Wn_^{XSdxQxw`7cLI{D{0GqCJtbTZ zwpWrBzwABAd*|!vnvl*(h;h%2ujRw~(ETRDrz+iL*z6)gHVW%9 zsSdmJ5~cex<^547KZC`(c3xQD;pN{mP~#c1ZnKcE#6oKik%>>Ko(@I+^l0}{p9!UG z>$$x|{&66rI$=(9cG=GD(V)Tl`!|Rim7@!;dqr;@H$sOUm$sa&Dc%eJ)8co}p94K% zknVWBx8t|+xBbNdr++v6b2{;VE)6F8>E>XM%Y0i8=#zha_MbOQ%%47M*XYtq{d4et z8mPrTHTd;d9NZ>R{BJ+^_Xz)Z^ha>|Rt;5DKmU*4w83Gro&9MzkN-a#fIj~JJ=q~% XfkJi$SLFCT;3X@iC|UX1DB!;UZH*%D literal 0 HcmV?d00001 diff --git a/trainer.py b/trainer.py new file mode 100644 index 0000000..c7829d3 --- /dev/null +++ b/trainer.py @@ -0,0 +1,480 @@ +"""ReBias +Copyright (c) 2020-present NAVER Corp. +MIT license + +Unified implementation of the de-biasing minimax optimisation by various methods including, +- ReBias (ours, outer_criterion='RbfHSIC', inner criterion='MinusRbfHSIC') +- Vanilla and Biased baselines (f_lambda_outer=0, g_lambda_inner=0) +- Learned Mixin (outer_criterion='LearnedMixin', g_lambda_inner=0, n_g_update=0) +- RUBi (outer_criterion='RUBi', g_lambda_inner=0) + +Also, this implementation allows various configurations such as: +- adaptive radius for RBF kernels (see `_set_adaptive_sigma`) +- warm-up before jointly optimisation (n_g_pretrain_epochs, n_f_pretrain_epochs) +- feature position to compute losses (feature_pos in f_config and g_config) +- various biased network configurations (n_g_nets, n_g_update, update_g_cls) + +To see the configurations for each experiment, please refer to the following files: +- README.md +- main_biased_mnist.py +- main_imagenet.py +- main_action.py +""" +import itertools +import os + +import munch + +import torch +import torch.nn as nn + +from criterions import get_criterion +from criterions.sigma_utils import median_distance, feature_dimension +from logger import PythonLogger +from optims import get_optim, get_scheduler + + +def flatten(list_of_lists): + return itertools.chain.from_iterable(list_of_lists) + + +def cur_step(cur_epoch, idx, N, fmt=None): + _cur_step = cur_epoch + idx / N + if fmt: + return fmt.format(_cur_step) + else: + return _cur_step + + +class Trainer(object): + """Base wrapper for the de-biasing minimax optimisation to solve. + ..math:: min_g max_f L_f + lambda_1 ( L_debias (f, g) - L_g) + + In practice, we optimise the following two minimisation problems sequentially: + .. math:: + min L_f + f_lambda_outer * outer_criterion (f, g) + min L_g + g_lambda_inner * inner_criterion (f, g) + + Thus, setting f_lambda_outer or g_lambda_inner to zero means only updating classification loss for the optimisation. + In practice, ours set f_lambda_outer = g_lambda_inner = 1, and comparison methods set f_lambda_outer = 1 and g_lambda_inner = 0. + Furthermore, we directly implement criterion functions for comparison methods into `outer_criterion` which also optimise classification too. + In this case, we solely optimise the outer_criterion without the cross entropy loss. + + Parameters + ---------- + outer_criterion, inner_criterion: str + Configurations for setting different criterions including + - ReBias (ours): RbfHSIC, MinusRbfHSIC + - Vanilla and Biased baselines: -, - + - Learned Mixin: LearnedMixin, - + - RUBi: RUBi, - + where `-` denotes to no outer/inner optimisation. + outer_criterion_config, inner_criterion_config: dict + Configuration dict to define criterions, `criterion_fn(**config)`. + outer_criterion_detail, inner_criterion_detail: dict + Configurations dict for more details of each criterion. + In practice, it only contains sigma configurations such as sigma_x_type, sigma_x_scale. + To set ``adaptive radius'' for RBF kernels, use sigma_x_type='median' (see `_set_adaptive_sigma`) + f_config, g_config: dict + Configuration dict for declaring network objects. + f_lambda_outer: float + Control parameter for HSIC or other debiasing objective functions on the target network. + In the experiments, it is always set to one, except ``baseline'' (Vanilla, Biased) cases. + g_lambda_inner: float + Control parameter for HSIC or other debiasing objective functions on the biased network. + ReBias always use one, otherwise it is set to zero. + n_g_update: int + The number of g updates for single f update. It could be used if g update is much slower than expected. + In the experiments, it is always one. + update_g_cls: boolean + Flag for updating g cross entropy loss. If False, only debiasing objective is optimised for g. + n_g_nets: int + The number of biased networks for the optimisation. The debiasing loss is the summation of the loss computed by each g. + n_g_pretrain_epochs, n_f_pretrain_epochs: int + The warm-up epochs for more stable training. + It is not used for ReBias, but other comparison methods when there is no biased network update (LearnedMixin). + train_loader: pytorch dataloader + Used for adaptive kernel updates. + sigma_update_sampling_rate: float + Sampling rate for computing the adaptive kernel radius. + In the experiments, we use 25% of training data points to compute adaptive kernel radius. + """ + def __init__(self, + # criterion settings + outer_criterion='RbfHSIC', + inner_criterion='MinusRbfHSIC', + outer_criterion_config={'sigma': 1.0}, + outer_criterion_detail={}, + inner_criterion_config={}, + inner_criterion_detail={}, + # network settings + f_config={}, + g_config={}, + # optimiser settings + f_lambda_outer=1, + g_lambda_inner=1, + n_g_update=1, + update_g_cls=True, + n_g_nets=1, + optimizer='Adam', + f_optim_config=None, + g_optim_config=None, + scheduler='StepLR', + f_scheduler_config={'step_size': 20}, + g_scheduler_config={'step_size': 20}, + n_g_pretrain_epochs=0, + n_f_pretrain_epochs=0, + n_epochs=80, + log_step=100, + # adaptive sigma settings + train_loader=None, + sigma_update_sampling_rate=0.25, + # others + device='cuda', + logger=None): + + self.device = device + self.sigma_update_sampling_rate = sigma_update_sampling_rate + + if logger is None: + logger = PythonLogger() + self.logger = logger + self.log_step = log_step + + if f_config['num_classes'] != g_config['num_classes']: + raise ValueError('num_classes for f and g should be same.') + + self.num_classes = f_config['num_classes'] + options = { + 'outer_criterion': outer_criterion, + 'inner_criterion': inner_criterion, + 'outer_criterion_config': outer_criterion_config, + 'outer_criterion_detail': outer_criterion_detail, + 'inner_criterion_config': inner_criterion_config, + 'inner_criterion_detail': inner_criterion_detail, + 'f_config': f_config, + 'g_config': g_config, + 'f_lambda_outer': f_lambda_outer, + 'g_lambda_inner': g_lambda_inner, + 'n_g_update': n_g_update, + 'update_g_cls': update_g_cls, + 'n_g_nets': n_g_nets, + 'optimizer': optimizer, + 'f_optim_config': f_optim_config, + 'g_optim_config': g_optim_config, + 'scheduler': scheduler, + 'f_scheduler_config': f_scheduler_config, + 'g_scheduler_config': g_scheduler_config, + 'n_g_pretrain_epochs': n_g_pretrain_epochs, + 'n_f_pretrain_epochs': n_f_pretrain_epochs, + 'n_epochs': n_epochs, + } + + self.options = munch.munchify(options) + self.evaluator = None + + self._set_models() + self._to_device() + self._to_parallel() + self._set_criterion(train_loader) + self._set_optimizer() + + self.logger.log('Outer criterion: {}'.format(self.outer_criterion.__class__.__name__)) + self.logger.log(self.options) + + def _set_models(self): + raise NotImplementedError + + def _to_device(self): + self.model.f_net = self.model.f_net.to(self.device) + for i, g_net in enumerate(self.model.g_nets): + self.model.g_nets[i] = g_net.to(self.device) + + def _to_parallel(self): + self.model.f_net = torch.nn.DataParallel(self.model.f_net) + for i, g_net in enumerate(self.model.g_nets): + self.model.g_nets[i] = torch.nn.DataParallel(g_net) + + def _set_adaptive_sigma(self, train_loader): + if self.options.outer_criterion_detail.get('sigma_x_type') == 'median': + self.logger.log('computing sigma from data median') + sigma_x, sigma_y = median_distance(self.model, train_loader, self.sigma_update_sampling_rate, device=self.device) + elif self.options.outer_criterion_detail.get('sigma_x_type') == 'dimension': + sigma_x, sigma_y = feature_dimension(self.model, train_loader, device=self.device) + else: + return + sigma_x_scale = self.options.outer_criterion_detail.get('sigma_x_scale', 1) + sigma_y_scale = self.options.outer_criterion_detail.get('sigma_y_scale', 1) + self.options.outer_criterion_config['sigma_x'] = sigma_x * sigma_x_scale + self.options.outer_criterion_config['sigma_y'] = sigma_y * sigma_y_scale + + self.options.inner_criterion_config['sigma_x'] = sigma_x * sigma_x_scale + self.options.inner_criterion_config['sigma_y'] = sigma_y * sigma_y_scale + self.logger.log('current sigma: ({}) * {} ({}) * {}'.format(sigma_x, + sigma_x_scale, + sigma_y, + sigma_y_scale, + )) + + def _set_criterion(self, train_loader): + self._set_adaptive_sigma(train_loader) + self.outer_criterion = get_criterion(self.options.outer_criterion)(**self.options.outer_criterion_config) + self.inner_criterion = get_criterion(self.options.inner_criterion)(**self.options.inner_criterion_config) + self.classification_criterion = nn.CrossEntropyLoss() + + def _set_optimizer(self): + f_net_parameters = self.model.f_net.parameters() + + if 'fc' in self.outer_criterion.__dict__: + """[NOTE] for comparison methods (LearnedMixin, RUBi) + """ + f_net_parameters += list(self.outer_criterion.fc.parameters()) + + self.f_optimizer = get_optim(f_net_parameters, + self.options.optimizer, + self.options.f_optim_config) + self.g_optimizer = get_optim(flatten([g_net.parameters() + for g_net in self.model.g_nets]), + self.options.optimizer, + self.options.g_optim_config) + + self.f_lr_scheduler = get_scheduler(self.f_optimizer, + self.options.scheduler, + self.options.f_scheduler_config) + self.g_lr_scheduler = get_scheduler(self.g_optimizer, + self.options.scheduler, + self.options.g_scheduler_config) + + def pretrain(self, dataloader, val_loaders=None): + for cur_epoch in range(self.options.n_g_pretrain_epochs): + if self.options.n_epochs == 0: + self.g_lr_scheduler.step() + for idx, (x, labels, _) in enumerate(dataloader): + x = x.to(self.device) + labels = labels.to(self.device) + + loss_dict = {'step': cur_step(cur_epoch, idx, len(dataloader))} + self._update_g(x, labels, update_inner_loop=False, + loss_dict=loss_dict, prefix='pretrain__') + + if (idx + 1) % self.log_step == 0: + self.logger.report(loss_dict, + prefix='[Pretrain G] Report @step: ') + + self.evaluate_acc(cur_epoch + 1, + f_acc=False, + val_loaders=val_loaders) + + for cur_epoch in range(self.options.n_f_pretrain_epochs): + if self.options.n_epochs == 0: + self.f_lr_scheduler.step() + for idx, (x, labels, _) in enumerate(dataloader): + x = x.to(self.device) + labels = labels.to(self.device) + + loss_dict = {'step': cur_step(cur_epoch, idx, len(dataloader))} + self._update_f(x, labels, update_outer_loop=False, + loss_dict=loss_dict, prefix='pretrain__') + + if (idx + 1) % self.log_step == 0: + self.logger.report(loss_dict, + prefix='[Pretrain F] Report @step: ') + + self.evaluate_acc(cur_epoch + 1, + f_acc=True, + val_loaders=val_loaders) + + def _update_g(self, x, labels, update_inner_loop=True, loss_dict=None, prefix=''): + if loss_dict is None: + loss_dict = {} + + self.model.train() + + g_loss = 0 + for g_idx, g_net in enumerate(self.model.g_nets): + preds, g_feats = g_net(x) + + _g_loss = 0 + if self.options.update_g_cls: + _g_loss_cls = self.classification_criterion(preds, labels) + _g_loss += _g_loss_cls + + loss_dict['{}g_{}_cls'.format(prefix, g_idx)] = _g_loss_cls.item() + + if update_inner_loop and self.options.g_lambda_inner: + _, f_feats = self.model.f_net(x) + _g_loss_inner = self.inner_criterion(g_feats, f_feats, labels=labels) + _g_loss += self.options.g_lambda_inner * _g_loss_inner + + loss_dict['{}g_{}_inner'.format(prefix, g_idx)] = _g_loss_inner.item() + + g_loss += _g_loss + + self.g_optimizer.zero_grad() + g_loss.backward() + self.g_optimizer.step() + + loss_dict['{}g_loss'.format(prefix)] = g_loss.item() + + def _update_f(self, x, labels, update_outer_loop=True, loss_dict=None, prefix=''): + if loss_dict is None: + loss_dict = {} + + self.model.train() + + f_loss = 0 + preds, f_feats = self.model.f_net(x) + + if self.options.outer_criterion not in ('LearnedMixin', 'RUBi'): + """[NOTE] Comparison methods (LearnedMixin, RUBi) do not compute f_loss_cls + """ + f_loss_cls = self.classification_criterion(preds, labels) + f_loss += f_loss_cls + loss_dict['{}f_loss_cls'.format(prefix)] = f_loss_cls.item() + + if update_outer_loop and self.options.f_lambda_outer: + f_loss_indep = 0 + for g_idx, g_net in enumerate(self.model.g_nets): + _g_preds, _g_feats = g_net(x) + + _f_loss_indep = self.outer_criterion(f_feats, _g_feats, labels=labels, f_pred=preds) + f_loss_indep += _f_loss_indep + + loss_dict['{}f_loss_indep_g_{}'.format(prefix, g_idx)] = _f_loss_indep.item() + + f_loss += self.options.f_lambda_outer * f_loss_indep + loss_dict['{}f_loss_indep'.format(prefix)] = f_loss_indep.item() + + self.f_optimizer.zero_grad() + f_loss.backward() + self.f_optimizer.step() + + loss_dict['{}f_loss'.format(prefix)] = f_loss.item() + + def _train_epoch(self, dataloader, cur_epoch): + for idx, (x, labels, _) in enumerate(dataloader): + x = x.to(self.device) + labels = labels.to(self.device) + + loss_dict = {'step': cur_step(cur_epoch, idx, len(dataloader))} + for _ in range(self.options.n_g_update): + self._update_g(x, labels, loss_dict=loss_dict, prefix='train__') + self._update_f(x, labels, loss_dict=loss_dict, prefix='train__') + + if (idx + 1) % self.log_step == 0: + self.logger.report(loss_dict, + prefix='[Train] Report @step: ') + + def train(self, tr_loader, + val_loaders=None, + val_epoch_step=20, + update_sigma_per_epoch=False, + save_dir='./checkpoints', + experiment=None): + if val_loaders: + if not isinstance(val_loaders, dict): + raise TypeError('val_loaders should be dict, not {}' + .format(type(val_loaders))) + if 'unbiased' not in val_loaders: + raise ValueError('val_loaders should contain key "unbiased", cur keys({})' + .format(list(val_loaders.keys()))) + os.makedirs(save_dir, exist_ok=True) + + self.logger.log('start pretraining') + self.pretrain(tr_loader, val_loaders=val_loaders) + + best_acc = 0 + self.logger.log('start training') + + for cur_epoch in range(self.options.n_epochs): + self._train_epoch(tr_loader, cur_epoch) + self.f_lr_scheduler.step() + self.g_lr_scheduler.step() + self.logger.log('F learning rate: {}, G learning rate: {}'.format( + self.f_lr_scheduler.get_lr(), + self.g_lr_scheduler.get_lr() + )) + + metadata = { + 'cur_epoch': cur_epoch + 1, + 'best_acc': best_acc, + } + + if val_loaders and (cur_epoch + 1) % val_epoch_step == 0: + scores = self.evaluate(cur_epoch + 1, val_loaders) + metadata['scores'] = scores + + if scores['unbiased']['f_acc'] > best_acc: + metadata['best_acc'] = scores['unbiased']['f_acc'] + self.save_models(os.path.join(save_dir, 'best.pth'), + metadata=metadata) + self.save_models(os.path.join(save_dir, 'last.pth'), + metadata=metadata) + + if update_sigma_per_epoch: + self.logger.log('sigma update') + self._set_criterion(tr_loader) + sigma_x = self.options.inner_criterion_config['sigma_x'] + sigma_y = self.options.inner_criterion_config['sigma_y'] + self.logger.report({'step': cur_epoch + 1, + 'sigma__f': sigma_x, + 'sigma__g': sigma_y}, prefix='[Validation] Report @step: ') + + def evaluate(self, step=0, val_loaders=None): + if not val_loaders: + return {} + + scores = {} + for key, val_loader in val_loaders.items(): + scores[key] = self.evaluator.evaluate_rebias(val_loader, self.model, + outer_criterion=self.outer_criterion, + inner_criterion=self.inner_criterion, + num_classes=self.num_classes, + key=key) + + for key, score in scores.items(): + msg_dict = {'val__{}_{}'.format(key, k): v for k, v in score.items()} + msg_dict['step'] = step + self.logger.report(msg_dict, prefix='[Validation] Report @step: ') + + print(scores) + return scores + + def evaluate_acc(self, step=0, f_acc=True, val_loaders=None): + if not val_loaders: + return {} + + scores = {} + for key, val_loader in val_loaders.items(): + if f_acc: + scores[key] = self.evaluator.evaluate_acc(val_loader, self.model.f_net) + else: + scores[key] = {} + for idx, g_net in enumerate(self.model.g_nets): + scores[key][idx] = self.evaluator.evaluate_acc(val_loader, g_net) + + for key, score in scores.items(): + if f_acc: + msg_dict = {'pretrain__{}_f_acc'.format(key): score} + else: + msg_dict = {'pretrain__{}_g_{}_acc'.format(key, idx): _score for idx, _score in score.items()} + + msg_dict['step'] = step + self.logger.report(msg_dict, prefix='[Pretrain Validation] Report @step: ') + + return scores + + def save_models(self, save_to, metadata=None): + state_dict = { + 'f_net': self.model.f_net.state_dict(), + 'g_nets': [g_net.state_dict() for g_net in self.model.g_nets], + 'f_optimizer': self.f_optimizer.state_dict(), + 'g_optimizer': self.g_optimizer.state_dict(), + 'f_lr_scheduler': self.f_lr_scheduler.state_dict(), + 'g_lr_scheduler': self.g_lr_scheduler.state_dict(), + 'options': dict(self.options), + 'metadata': metadata, + } + torch.save(state_dict, save_to) + self.logger.log('state dict is saved to {}, metadata: {}'.format( + save_to, metadata))