-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathNN_angle_predictor.py
More file actions
129 lines (102 loc) · 3.59 KB
/
NN_angle_predictor.py
File metadata and controls
129 lines (102 loc) · 3.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from NN_util_funcs import *
from NN_data_generator import data_generator
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
angle_network_path = './angle_network.pt'
#
use_cuda = torch.cuda.is_available()
# random unsigned 32 bit integer, cast to uint64 because manual_seed() wants 64 bits
# note: it's fine that the front 32 bits will all be zeros, although could be good
# to try expanding random seeds to fill all 64 bits
seed = np.uint64( random.randint(0,2147483647) )
# seed = np.uint64(1810914062)
torch.manual_seed(seed)
device = torch.device("cuda" if use_cuda else "cpu")
# generate training & validation data
PID_data_gen = data_generator()
angle_train_in, train_in, angle_train_out, train_out \
= PID_data_gen.gen_PID_data(num_points=20000)
angle_valid_in, valid_in, angle_valid_out, valid_out \
= PID_data_gen.gen_PID_data(num_points=1000)
# HYPER-PARAMETERS
batch_size = 1000
learning_rate = 5e-3
max_iters = 750
# initialize arrays, split data into batches, prepare torch Optimizer
angle_batches = get_random_batches(angle_train_in, angle_train_out, batch_size)
batch_num = len(angle_batches)
epoch = max_iters
log_interval = 5
# define network structure
angle_network = nn.Sequential(
nn.Linear(4, 6),
nn.ReLU(),
nn.Linear(6, 1)
).double()
# angle_network.load_state_dict(torch.load(angle_network_path))
optimizer = torch.optim.Adam(angle_network.parameters(), lr=learning_rate)
training_loss = np.zeros(max_iters)
training_acc = np.zeros(max_iters)
# time to train
for ep in range(epoch):
total_loss = 0
counter = 0
for xb,yb in angle_batches:
# for i in range(angle_train_out.size):
# xb = angle_train_in[i]
# yb = np.array([ angle_train_out[i] ])
xb = torch.tensor(xb)
# xb = torch.reshape(xb, (len(xb),1
yb_numpy = yb.copy()
yb = torch.tensor(yb)
y_pred = torch.flatten(angle_network(xb))
# loss = F.binary_cross_entropy(y_pred, yb)
loss = F.mse_loss(y_pred, yb)
total_loss += loss.item()
loss_calc, acc = compute_loss_and_acc(yb_numpy, y_pred.detach().numpy(),
acceptable_diff=0.02)
training_acc[ep] += acc / batch_num
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ep % 10 == 0:
print(ep, 'Loss:', total_loss)
print('\tAcc:', training_acc[ep])
training_loss[ep] = total_loss
print("Done training")
torch.save(angle_network.state_dict(), angle_network_path)
plt.plot(np.arange(0,epoch), training_loss)
plt.xlabel("Epoch")
plt.ylabel("Training loss")
plt.ylim(0.0, 0.1)
plt.show()
# estimate outputs for validation dataset
y_est = angle_network(torch.tensor(angle_valid_in))
y_est = y_est.detach().numpy()
# min_val = max(np.min(valid_out), np.min(y_est))
# max_val = min(np.max(valid_out), np.max(y_est))
#
# plt.scatter(valid_out, y_est)
# plt.plot([min_val, max_val], [min_val, max_val], color='navajowhite', label='Ideal')
# plt.xlabel("Actual PWM Output")
# plt.ylabel("NN PWM Estimate")
# plt.legend()
# plt.show()
# plotting angle estimates vs actual
min_val = max(np.min(angle_valid_out), np.min(y_est))
max_val = min(np.max(angle_valid_out), np.max(y_est))
plt.scatter(angle_valid_out, y_est)
plt.plot([min_val, max_val], [min_val, max_val], color='navajowhite', label='Ideal')
plt.xlabel("Actual Angle")
plt.ylabel("NN Angle Estimate")
plt.title("Angle output")
plt.legend()
plt.show()
print("Seed used:", seed)