-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrainer.py
112 lines (84 loc) · 4.04 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
class trainer:
def __init__(self, model, epoch, learning_rate, optimizer, loss):
"""Initializing parameters."""
self.model = model
self.epoch = epoch
self.learning_rate = learning_rate
self.optimizer = optimizer
self.criterion = loss
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.train_total_step = None
self.val_total_step = None
self.val_loss = []
self.val_acc = []
self.train_loss = []
self.train_acc = []
def train(self , train_dataset ,valid_dataset , test_dataset):
for epoch in range(self.epoch):
running_loss = 0.0
correct = 0
loss_val = 0.0
correct_val = 0
self.model.train()
avg_loss ,avg_acc = self.run_train_epoch(train_dataset=train_dataset,running_loss=running_loss,correct=correct)
self.model.eval()
avg_loss_val ,avg_acc_val = self.run_eval_epoch(valid_dataset=valid_dataset, loss_val=loss_val , correct_val=correct_val)
print('[epoch %d] loss: %.5f accuracy: %.4f val loss: %.5f val accuracy: %.4f' % (epoch + 1, avg_loss, avg_acc, avg_loss_val, avg_acc_val))
avg_loss_test ,avg_acc_test = self.test_model(test_dataset=test_dataset)
print('Loss in the test : {} , accuracy on test data : {}'.format(avg_loss_test , avg_acc_test) )
def run_train_epoch(self,train_dataset,running_loss,correct):
for data in train_dataset:
batch, labels = data
batch = batch.float()
batch, labels = batch.to(self.device), labels.to(self.device)
# batch = weights.transforms(bacth)
self.optimizer.zero_grad()
outputs = self.model(batch)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
# compute training statistics
_, predicted = torch.max(outputs, 1)
correct += (predicted == labels).sum().item()
running_loss += loss.item()
avg_loss = running_loss /len(train_dataset)
avg_acc = correct /len(train_dataset)
self.train_loss.append(avg_loss)
self.train_acc.append(avg_acc)
return avg_loss ,avg_acc
def run_eval_epoch(self, valid_dataset,loss_val , correct_val):
with torch.no_grad():
for data in valid_dataset:
batch, labels = data
batch = batch.float()
batch, labels = batch.to(self.device), labels.to(self.device)
outputs = self.model(batch)
loss = self.criterion(outputs, labels)
_, predicted = torch.max(outputs, 1)
correct_val += (predicted == labels).sum().item()
loss_val += loss.item()
avg_loss_val = loss_val / int(len(valid_dataset))
avg_acc_val = correct_val / int(len(valid_dataset))
self.val_loss.append(avg_loss_val)
self.val_acc.append(avg_acc_val)
return avg_loss_val ,avg_acc_val
def test_model(self, test_dataset):
self.model.eval()
for data in test_dataset:
batch, labels = data
batch = batch.float()
batch, labels = batch.to(self.device), labels.to(self.device)
outputs = self.model(batch)
loss = self.criterion(outputs, labels)
_, predicted = torch.max(outputs, 1)
correct_test += (predicted == labels).sum().item()
loss_test += loss.item()
avg_loss_test = loss_test / int(len(test_dataset))
avg_acc_test = correct_test / int(len(test_dataset))
self.val_loss.append(avg_acc_test)
self.val_acc.append(avg_loss_test)
return avg_loss_test ,avg_acc_test