Skip to content
Snippets Groups Projects
programme.py 2.69 KiB
Newer Older
Malvin Chevallier's avatar
Malvin Chevallier committed
#!/usr/bin/env python
# Generated python code from maj file

###########
# Imports #
###########
import torch.nn as nn
import torch.optim as optim
Malvin Chevallier's avatar
Malvin Chevallier committed
import torch
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
Malvin Chevallier's avatar
Malvin Chevallier committed

#############
# New model #
#############
class Model_model10(nn.Module):
    def __init__(self):
        super(Model_model10, self).__init__()
        self.linear0 = nn.Linear(4 ,32)
Malvin Chevallier's avatar
Malvin Chevallier committed
        self.leakyRelu1 = nn.LeakyReLU()
        self.linear2 = nn.Linear(32 ,3)
        self.softmax3 = nn.Softmax()
Malvin Chevallier's avatar
Malvin Chevallier committed

    def forward(self, x):
        x = self.linear0(x)
        x = self.leakyRelu1(x)
        x = self.linear2(x)
        x = self.softmax3(x)
Malvin Chevallier's avatar
Malvin Chevallier committed
        return x

########
# Main #
########
model_model10 = Model_model10()
learning_rate_model_model10 = 0.001 # Default learning_rate
batch_size_model_model10 = 16 # Default batch_size
epochs_model_model10 = 10 # Default epochs
criterion_model10 = nn.CrossEntropyLoss()
optimizer_model10 = optim.Adam(model_model10.parameters(), lr=learning_rate_model_model10)
Malvin Chevallier's avatar
Malvin Chevallier committed

# Read dataset
data_data10 = pd.read_csv('test.csv')
X_data10 = torch.tensor(data_data10.iloc[:, :-1].values, dtype=torch.float32)
label_encoder_model10 = LabelEncoder()
y_encoded_model10 = label_encoder_model10.fit_transform(data_data10.iloc[:, -1].values)
Malvin Chevallier's avatar
Malvin Chevallier committed
y_data10 = torch.tensor(y_encoded_model10, dtype=torch.long)

def precision(y_true, y_pred):
    TP = (y_pred == 1) & (y_true == 1)
    FP = (y_pred == 1) & (y_true == 0)
    if (TP.sum().item() + FP.sum().item()) == 0:
Malvin Chevallier's avatar
Malvin Chevallier committed
        return 0
    precision_score = TP.sum().item() / (TP.sum().item() + FP.sum().item())
    return precision_score

# Split data
X_train_data10, X_val_data10, y_train_data10, y_val_data10 = train_test_split(X_data10, y_data10, test_size=0.8, shuffle=False)

# Create dataLoader
train_dataset_data10 = TensorDataset(X_train_data10, y_train_data10)
val_dataset_data10 = TensorDataset(X_val_data10, y_val_data10)
train_loader_data10 = DataLoader(train_dataset_data10, batch_size=batch_size_model_model10, shuffle=False)
val_loader_data10 = DataLoader(val_dataset_data10, batch_size=batch_size_model_model10)
Malvin Chevallier's avatar
Malvin Chevallier committed

def train_model10():
    print(f'[*] Training metrics : ')
Malvin Chevallier's avatar
Malvin Chevallier committed
    for epoch in range(epochs_model_model10):
        model_model10.train()
        for inputs, targets in train_loader_data10:
            optimizer_model10.zero_grad()
            outputs = model_model10(inputs)
            loss = criterion_model10(outputs, targets)
            loss.backward()
            optimizer_model10.step()
        print(f' └──[+] Epoch {epoch + 1}/{epochs_model_model10}, Loss: {loss.item():.4f}')
Malvin Chevallier's avatar
Malvin Chevallier committed

# Train
train_model10()