Skip to content

Commit b994cec

Browse files
Black ALL THE THINGS (facebookresearch#1802)
* Add support for black. * Trivial change to see if it's blacked. * Also add a CI test. * Don't force CI to install both. * Updates. * Slightly better output. * And black this. * Don't black parlai_internal * Black all the things. * Delete trailing whitespaces. * Fix some mixed tabs/spaces.
1 parent 625b3db commit b994cec

File tree

439 files changed

+24158
-16071
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

439 files changed

+24158
-16071
lines changed

.circleci/triggers.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,7 @@ def detect_gpu():
2828
"""Check if we should run GPU tests."""
2929
commit_msg = '[gpu]' in testing_utils.git_commit_messages()
3030
test_changed = any(
31-
'tests/nightly/gpu' in fn
32-
for fn in testing_utils.git_changed_files()
31+
'tests/nightly/gpu' in fn for fn in testing_utils.git_changed_files()
3332
)
3433
return commit_msg or test_changed
3534

@@ -48,8 +47,7 @@ def detect_mturk():
4847
"""Check if we should run mturk tests."""
4948
commit_msg = '[mturk]' in testing_utils.git_commit_messages().lower()
5049
mturk_changed = any(
51-
'parlai/mturk' in fn
52-
for fn in testing_utils.git_changed_files()
50+
'parlai/mturk' in fn for fn in testing_utils.git_changed_files()
5351
)
5452
return commit_msg or mturk_changed
5553

docs/source/conf.py

+12-18
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,7 @@
3737
import sphinx_rtd_theme
3838

3939

40-
extensions = [
41-
'sphinx.ext.autodoc',
42-
'sphinx.ext.githubpages'
43-
]
40+
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.githubpages']
4441

4542
# Add any paths that contain templates here, relative to this directory.
4643
templates_path = ['_templates']
@@ -128,15 +125,12 @@
128125
# The paper size ('letterpaper' or 'a4paper').
129126
#
130127
# 'papersize': 'letterpaper',
131-
132128
# The font size ('10pt', '11pt' or '12pt').
133129
#
134130
# 'pointsize': '10pt',
135-
136131
# Additional stuff for the LaTeX preamble.
137132
#
138133
# 'preamble': '',
139-
140134
# Latex figure (float) alignment
141135
#
142136
# 'figure_align': 'htbp',
@@ -145,20 +139,14 @@
145139
# Grouping the document tree into LaTeX files. List of tuples
146140
# (source start file, target name, title,
147141
# author, documentclass [howto, manual, or own class]).
148-
latex_documents = [
149-
(master_doc, 'ParlAI.tex', 'ParlAI Documentation',
150-
'FAIR', 'manual'),
151-
]
142+
latex_documents = [(master_doc, 'ParlAI.tex', 'ParlAI Documentation', 'FAIR', 'manual')]
152143

153144

154145
# -- Options for manual page output ---------------------------------------
155146

156147
# One entry per manual page. List of tuples
157148
# (source start file, name, description, authors, manual section).
158-
man_pages = [
159-
(master_doc, 'parlai', 'ParlAI Documentation',
160-
[author], 1)
161-
]
149+
man_pages = [(master_doc, 'parlai', 'ParlAI Documentation', [author], 1)]
162150

163151

164152
# -- Options for Texinfo output -------------------------------------------
@@ -167,7 +155,13 @@
167155
# (source start file, target name, title, author,
168156
# dir menu entry, description, category)
169157
texinfo_documents = [
170-
(master_doc, 'ParlAI', 'ParlAI Documentation',
171-
author, 'ParlAI', 'One line description of project.',
172-
'Miscellaneous'),
158+
(
159+
master_doc,
160+
'ParlAI',
161+
'ParlAI Documentation',
162+
author,
163+
'ParlAI',
164+
'One line description of project.',
165+
'Miscellaneous',
166+
)
173167
]

docs/source/generate_task_READMEs.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
display_name = task_dict.get('display_name', None)
1313
task_detailed = task_dict.get('task', None)
1414
if ':' in task_detailed:
15-
task = task_detailed[0:task_detailed.find(':')]
15+
task = task_detailed[0 : task_detailed.find(':')]
1616
else:
1717
task = task_detailed
1818
tags = task_dict.get('tags', None)

docs/source/generate_zoo_list.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,8 @@
2222
if 'example' in model:
2323
example = model['example']
2424
else:
25-
example = (
26-
"python -m parlai.scripts.eval_model --model {} --task {} -mf {}"
27-
.format(model['agent'], model['task'], model['path'])
25+
example = "python -m parlai.scripts.eval_model --model {} --task {} -mf {}".format(
26+
model['agent'], model['task'], model['path']
2827
)
2928
result = model.get('result', '').strip().split("\n")
3029
# strip leading whitespace from results

parlai/agents/bert_classifier/bert_classifier.py

+44-27
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from parlai.agents.bert_ranker.helpers import (
88
BertWrapper,
99
get_bert_optimizer,
10-
MODEL_PATH
10+
MODEL_PATH,
1111
)
1212
from parlai.core.utils import load_opt_file
1313
from parlai.core.torch_agent import History
@@ -18,11 +18,16 @@
1818
from collections import deque
1919
import os
2020
import torch
21+
2122
try:
2223
from pytorch_pretrained_bert import BertModel
2324
except ImportError:
24-
raise Exception(("BERT rankers needs pytorch-pretrained-BERT installed. \n "
25-
"pip install pytorch-pretrained-bert"))
25+
raise Exception(
26+
(
27+
"BERT rankers needs pytorch-pretrained-BERT installed. \n "
28+
"pip install pytorch-pretrained-bert"
29+
)
30+
)
2631

2732

2833
class BertClassifierHistory(History):
@@ -49,11 +54,13 @@ class BertClassifierAgent(TorchClassifierAgent):
4954
"""
5055
Classifier based on Hugging Face BERT implementation.
5156
"""
57+
5258
def __init__(self, opt, shared=None):
5359
# download pretrained models
5460
download(opt['datapath'])
55-
self.pretrained_path = os.path.join(opt['datapath'], 'models',
56-
'bert_models', MODEL_PATH)
61+
self.pretrained_path = os.path.join(
62+
opt['datapath'], 'models', 'bert_models', MODEL_PATH
63+
)
5764
opt['pretrained_path'] = self.pretrained_path
5865
self._upgrade_opt(opt)
5966
self.add_cls_token = opt.get('add_cls_token', True)
@@ -68,21 +75,34 @@ def history_class(cls):
6875
def add_cmdline_args(parser):
6976
TorchClassifierAgent.add_cmdline_args(parser)
7077
parser = parser.add_argument_group('BERT Classifier Arguments')
71-
parser.add_argument('--type-optimization', type=str,
72-
default='all_encoder_layers',
73-
choices=['additional_layers', 'top_layer',
74-
'top4_layers', 'all_encoder_layers',
75-
'all'],
76-
help='which part of the encoders do we optimize '
77-
'(defaults to all layers)')
78-
parser.add_argument('--add-cls-token', type='bool', default=True,
79-
help='add [CLS] token to text vec')
80-
parser.add_argument('--sep-last-utt', type='bool', default=False,
81-
help='separate the last utterance into a different'
82-
'segment with [SEP] token in between')
83-
parser.set_defaults(
84-
dict_maxexs=0, # skip building dictionary
78+
parser.add_argument(
79+
'--type-optimization',
80+
type=str,
81+
default='all_encoder_layers',
82+
choices=[
83+
'additional_layers',
84+
'top_layer',
85+
'top4_layers',
86+
'all_encoder_layers',
87+
'all',
88+
],
89+
help='which part of the encoders do we optimize '
90+
'(defaults to all layers)',
8591
)
92+
parser.add_argument(
93+
'--add-cls-token',
94+
type='bool',
95+
default=True,
96+
help='add [CLS] token to text vec',
97+
)
98+
parser.add_argument(
99+
'--sep-last-utt',
100+
type='bool',
101+
default=False,
102+
help='separate the last utterance into a different'
103+
'segment with [SEP] token in between',
104+
)
105+
parser.set_defaults(dict_maxexs=0) # skip building dictionary
86106

87107
@staticmethod
88108
def dictionary_class():
@@ -95,23 +115,20 @@ def _upgrade_opt(self, opt):
95115
old_opt = load_opt_file(model_opt)
96116
if 'add_cls_token' not in old_opt:
97117
# old model, make this default to False
98-
warn_once(
99-
'Old model: overriding `add_cls_token` to False.'
100-
)
118+
warn_once('Old model: overriding `add_cls_token` to False.')
101119
opt['add_cls_token'] = False
102120
return
103121

104122
def build_model(self):
105123
num_classes = len(self.class_list)
106124
self.model = BertWrapper(
107-
BertModel.from_pretrained(self.pretrained_path),
108-
num_classes
125+
BertModel.from_pretrained(self.pretrained_path), num_classes
109126
)
110127

111128
def init_optim(self, params, optim_states=None, saved_optim_type=None):
112-
self.optimizer = get_bert_optimizer([self.model],
113-
self.opt['type_optimization'],
114-
self.opt['learningrate'])
129+
self.optimizer = get_bert_optimizer(
130+
[self.model], self.opt['type_optimization'], self.opt['learningrate']
131+
)
115132

116133
def _set_text_vec(self, *args, **kwargs):
117134
obs = super()._set_text_vec(*args, **kwargs)

parlai/agents/bert_ranker/bert_dictionary.py

+11-6
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,14 @@
55
# LICENSE file in the root directory of this source tree.
66
from parlai.core.dict import DictionaryAgent
77
from parlai.zoo.bert.build import download
8+
89
try:
910
from pytorch_pretrained_bert import BertTokenizer
1011
except ImportError:
11-
raise ImportError('BERT rankers needs pytorch-pretrained-BERT installed. \n '
12-
'pip install pytorch-pretrained-bert')
12+
raise ImportError(
13+
'BERT rankers needs pytorch-pretrained-BERT installed. \n '
14+
'pip install pytorch-pretrained-bert'
15+
)
1316

1417
from .helpers import VOCAB_PATH
1518

@@ -19,21 +22,23 @@
1922
class BertDictionaryAgent(DictionaryAgent):
2023
"""Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.
2124
"""
25+
2226
def __init__(self, opt):
2327
super().__init__(opt)
2428
# initialize from vocab path
2529
download(opt['datapath'])
26-
vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models',
27-
VOCAB_PATH)
30+
vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models', VOCAB_PATH)
2831
self.tokenizer = BertTokenizer.from_pretrained(vocab_path)
2932

3033
self.start_token = '[CLS]'
3134
self.end_token = '[SEP]'
3235
self.null_token = '[PAD]'
3336
self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[
34-
0] # should be 101
37+
0
38+
] # should be 101
3539
self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[
36-
0] # should be 102
40+
0
41+
] # should be 102
3742
self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0] # should be 0
3843
# set tok2ind for special tokens
3944
self.tok2ind[self.start_token] = self.start_idx

parlai/agents/bert_ranker/bert_ranker.py

+7-4
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,11 @@
1111

1212
class BertRankerAgent(TorchAgent):
1313
"""Abstract parent class for all Bert Ranker agents."""
14+
1415
def __init__(self, opt, shared=None):
15-
raise RuntimeError('You must specify which ranker to use. Choices: \n'
16-
'-m bert_ranker/bi_encoder_ranker \n'
17-
'-m bert_ranker/cross_encoder_ranker \n'
18-
'-m bert_ranker/both_encoder_ranker')
16+
raise RuntimeError(
17+
'You must specify which ranker to use. Choices: \n'
18+
'-m bert_ranker/bi_encoder_ranker \n'
19+
'-m bert_ranker/cross_encoder_ranker \n'
20+
'-m bert_ranker/both_encoder_ranker'
21+
)

0 commit comments

Comments
 (0)