-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathmetrics.py
96 lines (76 loc) · 3.13 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import numpy as np
import torch
from scipy import linalg
from scipy.stats import entropy
from torchvision.models.inception import inception_v3
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
EPSILON = 1e-20
class Score:
@staticmethod
def inception_score(imgs, cuda=True, batch_size=32, resize=True, splits=1):
"""Computes the inception score of the generated images imgs
imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
cuda -- whether or not to run on GPU
batch_size -- batch size for feeding into Inception v3
splits -- number of splits
"""
N = len(imgs)
assert batch_size > 0
# assert N > batch_size
# Set up dtype
if cuda:
dtype = torch.cuda.FloatTensor
else:
if torch.cuda.is_available():
print("WARNING: You have a CUDA device, so you should probably set cuda=True")
dtype = torch.FloatTensor
# Set up dataloader
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
# Load inception model
inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)
inception_model.eval()
up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)
def get_pred(x):
if resize:
x = up(x)
x = inception_model(x)
return F.softmax(x).data.cpu().numpy()
# Get predictions
preds = np.zeros((N, 1000))
for i, batch in enumerate(dataloader, 0):
batch = batch.type(dtype)
batchv = Variable(batch)
batch_size_i = batch.size()[0]
preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits): (k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
@staticmethod
def frechet_inception_distance(real_data, fake_data):
real_data = real_data.permute(0, 2, 3, 1)
fake_data = fake_data.permute(0, 2, 3, 1)
if isinstance(real_data, torch.autograd.Variable):
real_np = real_data.data.cpu().numpy()
if isinstance(fake_data, torch.autograd.Variable):
fake_np = fake_data.data.cpu().numpy()
scores = []
for id, data in enumerate(real_np):
m = real_np[id].mean(0)
m_w = fake_np[id].mean(0)
C = np.cov(real_np[id].transpose())
C_w = np.cov(fake_np[id].transpose())
C_C_w_sqrt = linalg.sqrtm(C.dot(C_w), True).real
score = m.dot(m) + m_w.dot(m_w) - 2 * m_w.dot(m) + \
np.trace(C + C_w - 2 * C_C_w_sqrt)
scores.append(np.sqrt(score))
return np.mean(scores)