File tree 3 files changed +10
-8
lines changed
3 files changed +10
-8
lines changed Original file line number Diff line number Diff line change @@ -6,7 +6,7 @@ def get_default_config(args):
6
6
config .SAVE_EVERY_EPOCHS = 1
7
7
config .PATIENCE = 10
8
8
config .BATCH_SIZE = 450
9
- config .TEST_BATCH_SIZE = 256
9
+ config .TEST_BATCH_SIZE = 1
10
10
config .READER_NUM_PARALLEL_BATCHES = 1
11
11
config .SHUFFLE_BUFFER_SIZE = 10000
12
12
config .CSV_BUFFER_SIZE = 100 * 1024 * 1024 # 100 MB
Original file line number Diff line number Diff line change 21
21
# recommended to use a multi-core machine for the preprocessing
22
22
# step and set this value to the number of cores.
23
23
# PYTHON - python3 interpreter alias.
24
- TRAIN_DIR=/data2/edinella/java-small-clean-seq-c/training
25
- VAL_DIR=/data2/edinella/java-small-clean-seq-c/validation
26
- TEST_DIR=/data2/edinella/java-small-clean-seq-c/test
27
- DATASET_NAME=java-small-clean-seq-c
24
+ PREFIX=/data2/edinella/seq-lim-b/
25
+ TRAIN_DIR=$PREFIX /training
26
+ VAL_DIR=$PREFIX /validation
27
+ TEST_DIR=$PREFIX /test
28
+ DATASET_NAME=seq-lim-b
28
29
MAX_DATA_CONTEXTS=1000
29
30
MAX_CONTEXTS=200
30
31
SUBTOKEN_VOCAB_SIZE=186277
Original file line number Diff line number Diff line change 5
5
# test_data: by default, points to the validation set, since this is the set that
6
6
# will be evaluated after each training iteration. If you wish to test
7
7
# on the final (held-out) test set, change 'val' to 'test'.
8
- type=java-small-clean-seq-c
9
- dataset_name=java-small-clean-seq-c
10
- data_dir=data/java-small-clean-seq-c
8
+
9
+ type=seq-lim-b
10
+ dataset_name=seq-lim-b
11
+ data_dir=data/seq-lim-b
11
12
data=${data_dir} /${dataset_name}
12
13
test_data=${data_dir} /${dataset_name} .val.c2s
13
14
model_dir=models/${type}
You can’t perform that action at this time.
0 commit comments