Skip to content
Snippets Groups Projects
programme.py 3.25 KiB
Newer Older
Malvin Chevallier's avatar
Malvin Chevallier committed
#!/usr/bin/env python
# Generated python code from maj file

###########
# Imports #
###########
import torch.nn as nn
import torch.optim as optim
import torch
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
Malvin Chevallier's avatar
Malvin Chevallier committed
from torch.utils.data import DataLoader, TensorDataset

#############
# New model #
#############
class Model_model3(nn.Module):
    def __init__(self):
        super(Model_model3, self).__init__()
        self.linear0 = nn.Linear(4 ,128)
        self.sigmoid1 = nn.Sigmoid()
        self.linear2 = nn.Linear(128 ,3)
        self.softmax3 = nn.Softmax()
Malvin Chevallier's avatar
Malvin Chevallier committed

    def forward(self, x):
        x = self.linear0(x)
        x = self.sigmoid1(x)
Malvin Chevallier's avatar
Malvin Chevallier committed
        x = self.linear2(x)
        x = self.softmax3(x)
Malvin Chevallier's avatar
Malvin Chevallier committed
        return x

########
# Main #
########
model_model3 = Model_model3()
learning_rate_model_model3 = 0.01
batch_size_model_model3 = 32
epochs_model_model3 = 15
criterion_model3 = nn.CrossEntropyLoss()
optimizer_model3 = optim.Adam(model_model3.parameters(), lr=learning_rate_model_model3)

# Read dataset
data_dataset2 = pd.read_csv('test.csv')
X_dataset2 = torch.tensor(data_dataset2.iloc[:, :-1].values, dtype=torch.float32)
label_encoder_model3 = LabelEncoder()
y_encoded_model3 = label_encoder_model3.fit_transform(data_dataset2.iloc[:, -1].values)
Malvin Chevallier's avatar
Malvin Chevallier committed
y_dataset2 = torch.tensor(y_encoded_model3, dtype=torch.long)

# Split data
X_train_dataset2, X_val_dataset2, y_train_dataset2, y_val_dataset2 = train_test_split(X_dataset2, y_dataset2, test_size=0.6, shuffle=True)

Malvin Chevallier's avatar
Malvin Chevallier committed
# Create dataLoader
train_dataset_dataset2 = TensorDataset(X_train_dataset2, y_train_dataset2)
val_dataset_dataset2 = TensorDataset(X_val_dataset2, y_val_dataset2)
train_loader_dataset2 = DataLoader(train_dataset_dataset2, batch_size=batch_size_model_model3, shuffle=True)
val_loader_dataset2 = DataLoader(val_dataset_dataset2, batch_size=batch_size_model_model3)

def train_model3():
    print(f'[*] Training metrics : ')
Malvin Chevallier's avatar
Malvin Chevallier committed
    for epoch in range(epochs_model_model3):
        model_model3.train()
        for inputs, targets in train_loader_dataset2:
            optimizer_model3.zero_grad()
            outputs = model_model3(inputs)
            loss = criterion_model3(outputs, targets)
            loss.backward()
            optimizer_model3.step()
        print(f' └──[+] Epoch {epoch + 1}/{epochs_model_model3}, Loss: {loss.item():.4f}')
Malvin Chevallier's avatar
Malvin Chevallier committed

# Train
train_model3()

def evaluate_model3():
    model_model3.eval()
    val_loss = 0.0
    correct_predictions = 0.0
    total_predictions = 0
    all_targets = []
    all_predicted = []
    with torch.no_grad():
        for inputs, targets in val_loader_dataset2:
            outputs = model_model3(inputs)
            loss = criterion_model3(outputs, targets)
            val_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            correct_predictions += (predicted == targets).sum().item()
            total_predictions += targets.size(0)
            all_targets.append(targets)
            all_predicted.append(predicted)
    val_loss /= len(val_loader_dataset2)
    all_targets = torch.cat(all_targets)
    all_predicted = torch.cat(all_predicted)
    print(f'[*] Evaluation metrics : ')
    print(f' └──[+] Validaion Loss: {val_loss:.4f}') # Default metric
Malvin Chevallier's avatar
Malvin Chevallier committed

# Evaluate
evaluate_model3()