-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
8 changed files
with
949 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,135 @@ | ||
#引入相关库 | ||
import torch | ||
import torch.optim as optim | ||
from torch.autograd import Variable | ||
import torch.nn as nn | ||
from torch.utils.data import DataLoader | ||
from torchvision import datasets, transforms | ||
|
||
# 定义超参数 | ||
EPOCH = 20 #遍历数据集次数 | ||
pre_epoch = 0 # 定义已经遍历数据集的次数 | ||
BATCH_SIZE = 64 #批处理尺寸(batch_size) | ||
LR = 0.01 #学习率 | ||
|
||
class LeNet(nn.Module):#定义网络 | ||
def __init__(self): | ||
super(LeNet, self).__init__() | ||
self.conv1 = nn.Sequential(nn.Conv2d(1, 6, 5, 1, 2), nn.ReLU(), | ||
nn.MaxPool2d(2, 2)) | ||
|
||
self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5), nn.ReLU(), | ||
nn.MaxPool2d(2, 2)) | ||
|
||
self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120), | ||
nn.BatchNorm1d(120), nn.ReLU()) | ||
|
||
self.fc2 = nn.Sequential( | ||
nn.Linear(120, 84), | ||
nn.BatchNorm1d(84), | ||
nn.ReLU(), | ||
nn.Linear(84, 10)) | ||
# 最后的结果一定要变为 10,因为数字的选项是 0 ~ 9 | ||
|
||
def forward(self, x): | ||
x = self.conv1(x) | ||
x = self.conv2(x) | ||
x = x.view(x.size()[0], -1)#展平 | ||
x = self.fc1(x) | ||
x = self.fc2(x) | ||
return x | ||
|
||
# 启用GPU | ||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | ||
|
||
# 启用GPU | ||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | ||
|
||
#加载数据集 | ||
train_loader = torch.utils.data.DataLoader( # 加载训练数据 | ||
datasets.FashionMNIST('./data2', train=True, download=True, | ||
transform=transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)) # 数据集给出的均值和标准差系数,每个数据集都不同的,都数据集提供方给出的 | ||
])), | ||
batch_size=BATCH_SIZE, shuffle=True) | ||
test_loader = torch.utils.data.DataLoader( # 加载训练数据 | ||
datasets.FashionMNIST('./data2', train=False, transform=transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)) # 数据集给出的均值和标准差系数,每个数据集都不同的,都数据集提供方给出的 | ||
])), | ||
batch_size=BATCH_SIZE, shuffle=True) | ||
|
||
model = LeNet() # 实例化一个网络对象 | ||
model = model.to(device) | ||
|
||
criterion = nn.CrossEntropyLoss() #损失函数为交叉熵,多用于多分类问题 | ||
optimizer = optim.Adam(model.parameters(), lr=LR) #优化方式为mini-batch momentum-SGD,并采用L2正则化(权重衰 | ||
|
||
# 训练 | ||
if __name__ == "__main__": | ||
best_acc = 85 #2 初始化best test accuracy | ||
print("Start Training, LetNet5(Fashin-Minist)!") # 定义遍历数据集的次数 | ||
with open("MINIST-FASHIONacc.txt", "w") as f: | ||
with open("MINIST-FASHIONlog.txt", "w")as f2: | ||
for epoch in range(pre_epoch, EPOCH): | ||
print('\nEpoch: %d' % (epoch + 1)) | ||
model.train() | ||
sum_loss = 0.0 | ||
correct = 0.0 | ||
total = 0.0 | ||
for i, data in enumerate(train_loader, 0): | ||
# 准备数据 | ||
length = len(train_loader) | ||
inputs, labels = data | ||
inputs, labels = Variable(inputs), Variable(labels) | ||
inputs, labels = inputs.to(device), labels.to(device) | ||
optimizer.zero_grad() | ||
# forward + backward | ||
outputs = model(inputs) | ||
loss = criterion(outputs, labels) | ||
loss.backward() | ||
optimizer.step() | ||
# 每训练1个batch打印一次loss和准确率 | ||
sum_loss += loss.item() | ||
_, predicted = torch.max(outputs.data, 1) | ||
total += labels.size(0) | ||
correct += predicted.eq(labels.data).cpu().sum() | ||
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% ' | ||
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total)) | ||
f2.write('%03d %05d |Loss: %.03f | Acc: %.3f%% ' | ||
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total)) | ||
f2.write('\n') | ||
f2.flush() | ||
|
||
# 每训练完一个epoch测试一下准确率 | ||
print("Waiting Test!") | ||
model.eval() | ||
with torch.no_grad(): | ||
correct = 0 | ||
total = 0 | ||
for data in test_loader: | ||
model.eval() | ||
images, labels = data | ||
images, labels = Variable(images), Variable(labels) | ||
images, labels = images.to(device), labels.to(device) | ||
outputs = model(images) | ||
# 取得分最高的那个类 (outputs.data的索引号) | ||
_, predicted = torch.max(outputs.data, 1) | ||
total += labels.size(0) | ||
correct += (predicted == labels).sum() | ||
print('测试分类准确率为:%.3f%%' % (100 * correct / total)) | ||
acc = 100. * correct / total | ||
# 将每次测试结果实时写入acc.txt文件中 | ||
f.write("EPOCH=%03d,Accuracy= %.3f%%" % (epoch + 1, acc)) | ||
f.write('\n') | ||
f.flush() | ||
# 记录最佳测试分类准确率并写入best_acc.txt文件中 | ||
if acc > best_acc: | ||
f3 = open("MINIST-FASHIONbest_acc.txt", "w") | ||
f3.write("EPOCH=%d,best_acc= %.3f%%" % (epoch + 1, acc)) | ||
f3.close() | ||
best_acc = acc | ||
print('Saving model......') | ||
torch.save(model, 'MINIST-FASHION_%03d.pth' % (epoch + 1)) | ||
print("Training Finished, TotalEPOCH=%d" % EPOCH) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,135 @@ | ||
#引入相关库 | ||
import torch | ||
import torch.optim as optim | ||
from torch.autograd import Variable | ||
import torch.nn as nn | ||
from torch.utils.data import DataLoader | ||
from torchvision import datasets, transforms | ||
|
||
# 定义超参数 | ||
EPOCH = 20 #遍历数据集次数 | ||
pre_epoch = 0 # 定义已经遍历数据集的次数 | ||
BATCH_SIZE = 64 #批处理尺寸(batch_size) | ||
LR = 0.001 #学习率 | ||
|
||
class LeNet(nn.Module):#定义网络 | ||
def __init__(self): | ||
super(LeNet, self).__init__() | ||
self.conv1 = nn.Sequential(nn.Conv2d(1, 6, 5, 1, 2), nn.ReLU(), | ||
nn.MaxPool2d(2, 2)) | ||
|
||
self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5), nn.ReLU(), | ||
nn.MaxPool2d(2, 2)) | ||
|
||
self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120), | ||
nn.BatchNorm1d(120), nn.ReLU()) | ||
|
||
self.fc2 = nn.Sequential( | ||
nn.Linear(120, 84), | ||
nn.BatchNorm1d(84), | ||
nn.ReLU(), | ||
nn.Linear(84, 10)) | ||
# 最后的结果一定要变为 10,因为数字的选项是 0 ~ 9 | ||
|
||
def forward(self, x): | ||
x = self.conv1(x) | ||
x = self.conv2(x) | ||
x = x.view(x.size()[0], -1)#展平 | ||
x = self.fc1(x) | ||
x = self.fc2(x) | ||
return x | ||
|
||
# 启用GPU | ||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | ||
|
||
# 启用GPU | ||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | ||
|
||
#加载数据集 | ||
train_loader = torch.utils.data.DataLoader( # 加载训练数据 | ||
datasets.MNIST('./data', train=True, download=True, | ||
transform=transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)) # 数据集给出的均值和标准差系数,每个数据集都不同的,都数据集提供方给出的 | ||
])), | ||
batch_size=BATCH_SIZE, shuffle=True) | ||
test_loader = torch.utils.data.DataLoader( # 加载训练数据 | ||
datasets.MNIST('./data', train=False, transform=transforms.Compose([ | ||
transforms.ToTensor(), | ||
transforms.Normalize((0.1307,), (0.3081,)) # 数据集给出的均值和标准差系数,每个数据集都不同的,都数据集提供方给出的 | ||
])), | ||
batch_size=BATCH_SIZE, shuffle=True) | ||
|
||
model = LeNet() # 实例化一个网络对象 | ||
model = model.to(device) | ||
|
||
criterion = nn.CrossEntropyLoss() #损失函数为交叉熵,多用于多分类问题 | ||
optimizer = optim.Adam(model.parameters(), lr=LR) #优化方式为mini-batch momentum-SGD,并采用L2正则化(权重衰 | ||
|
||
# 训练 | ||
if __name__ == "__main__": | ||
best_acc = 85 #2 初始化best test accuracy | ||
print("Start Training, LetNet5-Minist!") # 定义遍历数据集的次数 | ||
with open("LetNet5-Ministacc.txt", "w") as f: | ||
with open("LetNet5-Ministlog.txt", "w")as f2: | ||
for epoch in range(pre_epoch, EPOCH): | ||
print('\nEpoch: %d' % (epoch + 1)) | ||
model.train() | ||
sum_loss = 0.0 | ||
correct = 0.0 | ||
total = 0.0 | ||
for i, data in enumerate(train_loader, 0): | ||
# 准备数据 | ||
length = len(train_loader) | ||
inputs, labels = data | ||
inputs, labels = Variable(inputs), Variable(labels) | ||
inputs, labels = inputs.to(device), labels.to(device) | ||
optimizer.zero_grad() | ||
# forward + backward | ||
outputs = model(inputs) | ||
loss = criterion(outputs, labels) | ||
loss.backward() | ||
optimizer.step() | ||
# 每训练1个batch打印一次loss和准确率 | ||
sum_loss += loss.item() | ||
_, predicted = torch.max(outputs.data, 1) | ||
total += labels.size(0) | ||
correct += predicted.eq(labels.data).cpu().sum() | ||
print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% ' | ||
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total)) | ||
f2.write('%03d %05d |Loss: %.03f | Acc: %.3f%% ' | ||
% (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total)) | ||
f2.write('\n') | ||
f2.flush() | ||
|
||
# 每训练完一个epoch测试一下准确率 | ||
print("Waiting Test!") | ||
model.eval() | ||
with torch.no_grad(): | ||
correct = 0 | ||
total = 0 | ||
for data in test_loader: | ||
model.eval() | ||
images, labels = data | ||
images, labels = Variable(images), Variable(labels) | ||
images, labels = images.to(device), labels.to(device) | ||
outputs = model(images) | ||
# 取得分最高的那个类 (outputs.data的索引号) | ||
_, predicted = torch.max(outputs.data, 1) | ||
total += labels.size(0) | ||
correct += (predicted == labels).sum() | ||
print('测试分类准确率为:%.3f%%' % (100 * correct / total)) | ||
acc = 100. * correct / total | ||
# 将每次测试结果实时写入acc.txt文件中 | ||
f.write("EPOCH=%03d,Accuracy= %.3f%%" % (epoch + 1, acc)) | ||
f.write('\n') | ||
f.flush() | ||
# 记录最佳测试分类准确率并写入best_acc.txt文件中 | ||
if acc > best_acc: | ||
f3 = open("LetNet5-Ministbest_acc.txt", "w") | ||
f3.write("EPOCH=%d,best_acc= %.3f%%" % (epoch + 1, acc)) | ||
f3.close() | ||
best_acc = acc | ||
print('Saving model......') | ||
torch.save(model, 'LetNet5-Minist_%03d.pth' % (epoch + 1)) | ||
print("Training Finished, TotalEPOCH=%d" % EPOCH) |
Oops, something went wrong.