File tree 3 files changed +12
-2
lines changed
3 files changed +12
-2
lines changed Original file line number Diff line number Diff line change @@ -360,8 +360,9 @@ def get_config(config_name: str) -> ml_collections.ConfigDict:
360
360
# Stop training when this many consecutive evaluations yield no improvement.
361
361
config .early_stopping_patience = 250
362
362
363
- # Weight decay of optimizer
363
+ # Optimizer params
364
364
config .optimizer_weight_decay = 0.0
365
+ config .ema_momentum = 0.99
365
366
366
367
# An 'iter' refers to a group of train/tune steps run in succession.
367
368
config .steps_per_iter = 128
Original file line number Diff line number Diff line change 49
49
from deepvariant import keras_modeling
50
50
from official .modeling import optimization
51
51
52
- _CHECKPOINT_OPTIONS = tf .train .CheckpointOptions (enable_async = True )
52
+ _CHECKPOINT_OPTIONS = tf .train .CheckpointOptions ()
53
53
54
54
_LEADER = flags .DEFINE_string (
55
55
'leader' ,
Original file line number Diff line number Diff line change @@ -289,6 +289,15 @@ sudo -H NEEDRESTART_MODE=a apt-get install "${APT_ARGS[@]}" libssl-dev libcurl4-
289
289
# for the debruijn graph
290
290
sudo -H NEEDRESTART_MODE=a apt-get install " ${APT_ARGS[@]} " libboost-graph-dev > /dev/null
291
291
292
+ # Pin tf-models-official back to 2.11.6 to be closer to
293
+ # ${DV_GCP_OPTIMIZED_TF_WHL_VERSION} (which is 2.11.0).
294
+ # This is to avoid the issue:
295
+ # ValueError: Addons>LAMB has already been registered to <class 'tensorflow_addons.optimizers.lamb.LAMB'>
296
+ # However, it's important that the protobuf pinning happens after this!
297
+ # TODO: Remove this later once the first dependency can be changed
298
+ # to ${DV_GCP_OPTIMIZED_TF_WHL_VERSION}.
299
+ pip3 install " ${PIP_ARGS[@]} " " tf-models-official==2.11.6"
300
+
292
301
# Just being safe, pin protobuf's version one more time.
293
302
pip3 install " ${PIP_ARGS[@]} " ' protobuf==3.13.0'
294
303
You can’t perform that action at this time.
0 commit comments