Newer
Older
#!/usr/bin/env python
# Generated python code from maj file
###########
# Imports #
###########
from torch import load as load_file
import torch
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
model_pretrainedModel = load_file('model.pth')
learning_rate_model_pretrainedModel = 0.001 # Default learning_rate
batch_size_model_pretrainedModel = 16 # Default batch_size
epochs_model_pretrainedModel = 10 # Default epochs
criterion_pretrainedModel = nn.CrossEntropyLoss()
# Read dataset
data_testDataset = pd.read_csv('test.csv')
X_testDataset = torch.tensor(data_testDataset.iloc[:, :-1].values, dtype=torch.float32)
label_encoder_pretrainedModel = LabelEncoder()
y_encoded_pretrainedModel = label_encoder_pretrainedModel.fit_transform(data_testDataset.iloc[:, -1].values)
y_testDataset = torch.tensor(y_encoded_pretrainedModel, dtype=torch.long)
# Split data
X_train_testDataset, X_val_testDataset, y_train_testDataset, y_val_testDataset = train_test_split(X_testDataset, y_testDataset, test_size=0.8, shuffle=False)
# Create dataLoader
train_dataset_testDataset = TensorDataset(X_train_testDataset, y_train_testDataset)
val_dataset_testDataset = TensorDataset(X_val_testDataset, y_val_testDataset)
train_loader_testDataset = DataLoader(train_dataset_testDataset, batch_size=batch_size_model_pretrainedModel, shuffle=False)
val_loader_testDataset = DataLoader(val_dataset_testDataset, batch_size=batch_size_model_pretrainedModel)
def evaluate_pretrainedModel():
model_pretrainedModel.eval()
val_loss = 0.0
correct_predictions = 0.0
total_predictions = 0
all_targets = []
all_predicted = []
with torch.no_grad():
for inputs, targets in val_loader_testDataset:
outputs = model_pretrainedModel(inputs)
loss = criterion_pretrainedModel(outputs, targets)
val_loss += loss.item()
_, predicted = torch.max(outputs, 1)
correct_predictions += (predicted == targets).sum().item()
total_predictions += targets.size(0)
all_targets.append(targets)
all_predicted.append(predicted)
val_loss /= len(val_loader_testDataset)
all_targets = torch.cat(all_targets)
all_predicted = torch.cat(all_predicted)
print(f'[*] Evaluation metrics : ')
print(f' └──[+] Validaion Loss: {val_loss:.4f}') # Default metric