Newer
Older
#!/usr/bin/env python
# Generated python code from maj file
###########
# Imports #
###########
import torch.nn as nn
import torch.optim as optim
import torch
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
#############
# New model #
#############
class Model_model4(nn.Module):
def __init__(self):
super(Model_model4, self).__init__()
self.linear0 = nn.Linear(4 ,64)
self.tanh1 = nn.Tanh()
self.linear2 = nn.Linear(64 ,3)
self.softmax3 = nn.Softmax()
return x
########
# Main #
########
model_model4 = Model_model4()
learning_rate_model_model4 = 0.001 # Default learning_rate
batch_size_model_model4 = 16 # Default batch_size
epochs_model_model4 = 10 # Default epochs
optimizer_model4 = optim.Adam(model_model4.parameters(), lr=learning_rate_model_model4)
# Read dataset
data_dataset4 = pd.read_csv('test.csv')
X_dataset4 = torch.tensor(data_dataset4.iloc[:, :-1].values, dtype=torch.float32)
label_encoder_model4 = LabelEncoder()
y_encoded_model4 = label_encoder_model4.fit_transform(data_dataset4.iloc[:, -1].values)
y_dataset4 = torch.tensor(y_encoded_model4, dtype=torch.long)
# Normalize data
scaler = StandardScaler()
X_dataset4 = scaler.fit_transform(X_dataset4)
# Split data
X_train_dataset4, X_val_dataset4, y_train_dataset4, y_val_dataset4 = train_test_split(X_dataset4, y_dataset4, test_size=0.7, shuffle=True)
# Create dataLoader
train_dataset_dataset4 = TensorDataset(X_train_dataset4, y_train_dataset4)
val_dataset_dataset4 = TensorDataset(X_val_dataset4, y_val_dataset4)
train_loader_dataset4 = DataLoader(train_dataset_dataset4, batch_size=batch_size_model_model4, shuffle=True)
val_loader_dataset4 = DataLoader(val_dataset_dataset4, batch_size=batch_size_model_model4)
def train_model4():
for epoch in range(epochs_model_model4):
model_model4.train()
for inputs, targets in train_loader_dataset4:
optimizer_model4.zero_grad()
outputs = model_model4(inputs)
loss = criterion_model4(outputs, targets)
loss.backward()
optimizer_model4.step()
print(f' └──[+] Epoch {epoch + 1}/{epochs_model_model4}, Loss: {loss.item():.4f}')
# Train
train_model4()
torch.save(model_model4.state_dict(), 'model_model4.pth')