Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor training loop from script to class #130

Closed
wants to merge 3 commits into from
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
100 changes: 59 additions & 41 deletions src/main.py
Original file line number Diff line number Diff line change
@@ -1,48 +1,66 @@
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import numpy as np

# Step 1: Load MNIST Data and Preprocess
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
from torchvision import datasets, transforms

trainset = datasets.MNIST('.', download=True, train=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=64, shuffle=True)

# Step 2: Define the PyTorch Model
class Net(nn.Module):
class MNISTTrainer:
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)

def forward(self, x):
x = x.view(-1, 28 * 28)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return nn.functional.log_softmax(x, dim=1)

# Step 3: Train the Model
model = Net()
optimizer = optim.SGD(model.parameters(), lr=0.01)
criterion = nn.NLLLoss()

# Training loop
epochs = 3
for epoch in range(epochs):
for images, labels in trainloader:
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()

torch.save(model.state_dict(), "mnist_model.pth")
self.transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
self.optimizer = None
self.criterion = nn.NLLLoss()
self.epochs = 3

def load_data(self):
"""Load and preprocess MNIST data."""
trainset = datasets.MNIST(
".", download=True, train=True, transform=self.transform
)
trainloader = DataLoader(trainset, batch_size=64, shuffle=True)
return trainloader

def define_model(self):
"""Define the PyTorch Model."""

class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)

def forward(self, x):
x = x.view(-1, 28 * 28)
x = nn.functional.relu(self.fc1(x))
x = nn.functional.relu(self.fc2(x))
x = self.fc3(x)
return nn.functional.log_softmax(x, dim=1)

model = Net()
self.optimizer = optim.SGD(model.parameters(), lr=0.01)
return model

def train_model(self, model, trainloader):
"""Train the model."""
for epoch in range(self.epochs):
for images, labels in trainloader:
self.optimizer.zero_grad()
output = model(images)
loss = self.criterion(output, labels)
loss.backward()
self.optimizer.step()

def save_model(self, model):
"""Save the trained model."""
torch.save(model.state_dict(), "mnist_model.pth")


# Create an instance of MNISTTrainer and call the methods in the correct order
trainer = MNISTTrainer()
trainloader = trainer.load_data()
model = trainer.define_model()
trainer.train_model(model, trainloader)
trainer.save_model(model)
Loading