Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#!/usr/bin/env python
# Generated python code from maj file
###########
# Imports #
###########
import torch.nn as nn
import torch.optim as optim
import torch
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
#############
# New model #
#############
class Model_model1(nn.Module):
def __init__(self):
super(Model_model1, self).__init__()
self.linear0 = nn.Linear(4 ,64)
self.leakyRelu1 = nn.LeakyReLU()
self.linear2 = nn.Linear(64 ,32)
self.leakyRelu3 = nn.LeakyReLU()
self.linear4 = nn.Linear(32 ,3)
self.leakyRelu5 = nn.LeakyReLU()
def forward(self, x):
x = self.linear0(x)
x = self.leakyRelu1(x)
x = self.linear2(x)
x = self.leakyRelu3(x)
x = self.linear4(x)
x = self.leakyRelu5(x)
return x
########
# Main #
########
model_model1 = Model_model1()
learning_rate_model_model1 = 0.001 # Default learning_rate
batch_size_model_model1 = 16 # Default batch_size
epochs_model_model1 = 10 # Default epochs
criterion_model1 = nn.CrossEntropyLoss()
optimizer_model1 = optim.Adam(model_model1.parameters(), lr=learning_rate_model_model1)
# Read dataset
data_dataset1 = pd.read_csv('test.csv')
X_dataset1 = torch.tensor(data_dataset1.iloc[:, :-1].values, dtype=torch.float32)
label_encoder_model1 = LabelEncoder()
y_encoded_model1 = label_encoder_model1.fit_transform(data_dataset1.iloc[:, -1].values)
y_dataset1 = torch.tensor(y_encoded_model1, dtype=torch.long)
def accuracy(y_true, y_pred):
correct = (y_true == y_pred).sum().item()
total = y_true.size(0)
return correct / total
def recall(y_true, y_pred):
TP = (y_pred == 1) & (y_true == 1)
FN = (y_pred == 0) & (y_true == 1)
if (TP.sum().item() + FN.sum().item()) == 0:
return 0
recall_score = TP.sum().item() / (TP.sum().item() + FN.sum().item())
return recall_score
# Split data
X_train_dataset1, X_val_dataset1, y_train_dataset1, y_val_dataset1 = train_test_split(X_dataset1, y_dataset1, test_size=0.8, shuffle=True)
# Create dataLoader
train_dataset_dataset1 = TensorDataset(X_train_dataset1, y_train_dataset1)
val_dataset_dataset1 = TensorDataset(X_val_dataset1, y_val_dataset1)
train_loader_dataset1 = DataLoader(train_dataset_dataset1, batch_size=batch_size_model_model1, shuffle=True)
val_loader_dataset1 = DataLoader(val_dataset_dataset1, batch_size=batch_size_model_model1)
def train_model1():
for epoch in range(epochs_model_model1):
model_model1.train()
for inputs, targets in train_loader_dataset1:
optimizer_model1.zero_grad()
outputs = model_model1(inputs)
loss = criterion_model1(outputs, targets)
loss.backward()
optimizer_model1.step()
print(f'Epoch {epoch + 1}/{epochs_model_model1}, Loss: {loss.item():.4f}')
# Train
train_model1()
def evaluate_model1():
model_model1.eval()
val_loss = 0.0
correct_predictions = 0.0
total_predictions = 0
all_targets = []
all_predicted = []
with torch.no_grad():
for inputs, targets in val_loader_dataset1:
outputs = model_model1(inputs)
loss = criterion_model1(outputs, targets)
val_loss += loss.item()
_, predicted = torch.max(outputs, 1)
correct_predictions += (predicted == targets).sum().item()
total_predictions += targets.size(0)
all_targets.append(targets)
all_predicted.append(predicted)
val_loss /= len(val_loader_dataset1)
all_targets = torch.cat(all_targets)
all_predicted = torch.cat(all_predicted)
print(f'[*] Metrics : ')
print(f' └──[+] Validation Loss: {val_loss:.4f}') # Default metric
accuracy_value = accuracy(all_targets, all_predicted)
print(f' └──[+] Accuracy: {accuracy_value:.4f}')
recall_value = recall(all_targets, all_predicted)
print(f' └──[+] Recall: {recall_value:.4f}')
# Evaluate
evaluate_model1()