Newer
Older
#!/usr/bin/env python
# Generated python code from maj file
###########
# Imports #
###########
import torch.nn as nn
import torch.optim as optim
import torch
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
#############
# New model #
#############
class Model_model1(nn.Module):
def __init__(self):
super(Model_model1, self).__init__()
self.linear0 = nn.Linear(4 ,64)
return x
########
# Main #
########
model_model1 = Model_model1()
learning_rate_model_model1 = 0.01
batch_size_model_model1 = 32
epochs_model_model1 = 30
criterion_model1 = nn.CrossEntropyLoss()
optimizer_model1 = optim.Adam(model_model1.parameters(), lr=learning_rate_model_model1)
# Read dataset
data_dataset1 = pd.read_csv('test.csv')
X_dataset1 = torch.tensor(data_dataset1.iloc[:, :-1].values, dtype=torch.float32)
label_encoder_model1 = LabelEncoder()
y_encoded_model1 = label_encoder_model1.fit_transform(data_dataset1.iloc[:, -1].values)
y_dataset1 = torch.tensor(y_encoded_model1, dtype=torch.long)
def accuracy(y_true, y_pred):
correct = (y_true == y_pred).sum().item()
total = y_true.size(0)
return correct / total
def recall(y_true, y_pred):
TP = (y_pred == 1) & (y_true == 1)
FN = (y_pred == 0) & (y_true == 1)
if (TP.sum().item() + FN.sum().item()) == 0:
return 0
recall_score = TP.sum().item() / (TP.sum().item() + FN.sum().item())
return recall_score
# Split data
X_train_dataset1, X_val_dataset1, y_train_dataset1, y_val_dataset1 = train_test_split(X_dataset1, y_dataset1, test_size=0.8, shuffle=True)
# Create dataLoader
train_dataset_dataset1 = TensorDataset(X_train_dataset1, y_train_dataset1)
val_dataset_dataset1 = TensorDataset(X_val_dataset1, y_val_dataset1)
train_loader_dataset1 = DataLoader(train_dataset_dataset1, batch_size=batch_size_model_model1, shuffle=True)
val_loader_dataset1 = DataLoader(val_dataset_dataset1, batch_size=batch_size_model_model1)
def train_model1():
for epoch in range(epochs_model_model1):
model_model1.train()
for inputs, targets in train_loader_dataset1:
optimizer_model1.zero_grad()
outputs = model_model1(inputs)
loss = criterion_model1(outputs, targets)
loss.backward()
optimizer_model1.step()
print(f' └──[+] Epoch {epoch + 1}/{epochs_model_model1}, Loss: {loss.item():.4f}')
# Train
train_model1()
def evaluate_model1():
model_model1.eval()
val_loss = 0.0
correct_predictions = 0.0
total_predictions = 0
all_targets = []
all_predicted = []
with torch.no_grad():
for inputs, targets in val_loader_dataset1:
outputs = model_model1(inputs)
loss = criterion_model1(outputs, targets)
val_loss += loss.item()
_, predicted = torch.max(outputs, 1)
correct_predictions += (predicted == targets).sum().item()
total_predictions += targets.size(0)
all_targets.append(targets)
all_predicted.append(predicted)
val_loss /= len(val_loader_dataset1)
all_targets = torch.cat(all_targets)
all_predicted = torch.cat(all_predicted)
print(f'[*] Evaluation metrics : ')
print(f' └──[+] Validaion Loss: {val_loss:.4f}') # Default metric
accuracy_value = accuracy(all_targets, all_predicted)
print(f' └──[+] Accuracy: {accuracy_value:.4f}')
recall_value = recall(all_targets, all_predicted)
print(f' └──[+] Recall: {recall_value:.4f}')
# Evaluate
evaluate_model1()
torch.save(model_model1.state_dict(), 'model_model1.pth')