-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathrun.py
133 lines (110 loc) · 3.1 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import argparse
import torch
from quixer.setup_training import get_train_evaluate
##################################################
# Default hyperparameters for each of the models #
##################################################
quixer_hparams = {
"qubits": 6,
"layers": 3,
"ansatz_layers": 4,
"window": 32,
"epochs": 30,
"restart_epochs": 30000,
"dropout": 0.10,
"lr": 0.002,
"lr_sched": "cos",
"wd": 0.0001,
"eps": 1e-10,
"batch_size": 32,
"max_grad_norm": 5.0,
"model": "Quixer",
"print_iter": 50,
}
lstm_hparams = {
"layers": 2,
"window": 32,
"residuals": False,
"epochs": 30,
"restart_epochs": 30000,
"dropout": 0.30,
"lr": 0.002,
"lr_sched": "cos",
"wd": 0.0001,
"eps": 1e-10,
"batch_size": 32,
"max_grad_norm": 5.0,
"print_iter": 50,
}
fnet_hparams = {
"layers": 2,
"window": 32,
"epochs": 30,
"restart_epochs": 30000,
"dropout": 0.10,
"lr": 0.002,
"lr_sched": "cos",
"wd": 0.0001,
"eps": 1e-10,
"batch_size": 32,
"max_grad_norm": 5.0,
"model": "FNet",
"print_iter": 50,
}
transformer_hparams = {
"layers": 1,
"heads": 1,
"window": 32,
"epochs": 30,
"restart_epochs": 30000,
"dropout": 0.10,
"lr": 0.001,
"lr_sched": "cos",
"wd": 0.0001,
"eps": 1e-10,
"batch_size": 32,
"max_grad_norm": 5.0,
"model": "Transformer",
"print_iter": 50,
}
##################################################
# Embedding dimensions
classical_embedding_dimensions = [96, 128]
quantum_embedding_dimensions = [512]
# Dictionary defining available models along with associated hyperparameters
model_map = {
"Quixer": (quixer_hparams, quantum_embedding_dimensions),
"Transformer": (transformer_hparams, classical_embedding_dimensions),
"LSTM": (lstm_hparams, classical_embedding_dimensions),
"FNet": (fnet_hparams, classical_embedding_dimensions),
}
available_models = list(model_map.keys())
# Parse command line arguments
args = argparse.ArgumentParser(
prog="Quixer", description="Runs the Quixer model and/or classical baselines"
)
args.add_argument(
"-m",
"--model",
default="Quixer",
choices=available_models,
nargs="*",
help="Model(s) to run.",
)
args.add_argument("-d", "--device", default="cpu", help="Device to run training on.")
parsed = args.parse_args()
device_name = parsed.device
models_to_run = parsed.model if type(parsed.model) is list else [parsed.model]
# Make algorithms deterministic for reproducibility
torch.backends.cudnn.deterministic = True
device = torch.device(device_name)
print(f"Running on device: {device}")
train_evaluate = get_train_evaluate(device)
for model_name in models_to_run:
hyperparameters, embedding_dimensions = model_map[model_name]
for embedding_dimension in embedding_dimensions:
for seed in torch.randint(high=1000000, size=(10,)).tolist():
hyperparameters["model"] = model_name
hyperparameters["dimension"] = embedding_dimension
hyperparameters["seed"] = seed
train_evaluate(hyperparameters)