Skip to content

Commit

Permalink
update loss_fns
Browse files Browse the repository at this point in the history
  • Loading branch information
SiddeshSambasivam committed Jun 27, 2021
1 parent 4403b59 commit 0b61720
Show file tree
Hide file tree
Showing 6 changed files with 80 additions and 14 deletions.
22 changes: 20 additions & 2 deletions matterix/functions.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import numpy as np
from .tensor import Tensor, TensorableType, enforceTensor

# TODO: tanh, relu
# TODO: tanh


def sigmoid(x: TensorableType):
def sigmoid(x: TensorableType) -> Tensor:
"""Returns the sigmoid of a given tensor"""

x = enforceTensor(x)
Expand All @@ -25,3 +25,21 @@ def backward_fn():
output.backward_fn = backward_fn

return output


def relu(x: TensorableType) -> Tensor:

x = enforceTensor(x)
output = Tensor(np.maximum(x.data, 0), requires_grad=x.requires_grad)
output.save_for_backward([x])

def backward_fn():

if x.requires_grad:

local_gradient = (x.data > 0) * output.grad.data
x.grad.data += local_gradient

output.backward_fn = backward_fn

return output
22 changes: 22 additions & 0 deletions matterix/loss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from numpy import diff
from numpy.random import logseries
from .tensor import Tensor

# TODO: RMSE, MAE, Binary cross-entropy, Categorical cross-entropy, kullback leibler divergence loss


def MSE(y_train: Tensor, y_pred: Tensor) -> Tensor:

diff = y_train - y_pred
loss = (diff * diff).sum() * (1.0 / diff.numel())

return loss


def RMSE(y_train: Tensor, y_pred: Tensor) -> Tensor:

diff = y_train - y_pred
mse = ((diff * diff).sum()) * (1.0 / diff.numel())
rmse = mse ** (1.0 / 2.0)

return rmse
1 change: 0 additions & 1 deletion matterix/loss_fn.py

This file was deleted.

2 changes: 1 addition & 1 deletion matterix/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def backward_fn():


@registerFn(Tensor, "__pow__")
def pow(a: TensorableType, pow: int) -> Tensor:
def pow(a: TensorableType, pow: float) -> Tensor:

a = enforceTensor(a)

Expand Down
30 changes: 24 additions & 6 deletions matterix/optim.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,40 @@
# TODO: SGD, Adam, RMSProp
from .tensor import Tensor
from .nn import Module

# Model (params)
# -> Optimizer (which updates the parameters)
# -> Needs to be reflected in the Model (params)
# TODO: Adam, RMSProp


class SGD:
def __init__(self, model, parameters, lr: float = 0.001) -> None:
"""
Runs stochastic gradient descent
Parameters
----------
Arg: model (Module)
model which needs to be optimized
Arg: parameters (dict)
dict of all the parameters which needs to be optimized in the model
Arg: lr (float)
Learning rate. Size of each gradient step
"""

def __init__(self, model: Module, parameters: dict, lr: float = 0.001) -> None:
self.model = model
self.params = parameters
self.lr = lr

def step(self):
def step(self) -> None:
"""Updates the parameters of the model"""

for k, v in self.params.items():
v -= v.grad * self.lr
self.params[k] = v

self.model.__dict__.update(self.params)

def zero_grad(self) -> None:
"""Sets the gradients of all the parameters to zero"""
self.model.zero_grad()
17 changes: 13 additions & 4 deletions matterix/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,21 @@ def enforceTensor(_input: TensorableType) -> "Tensor":
return Tensor(_input)


def enforceNumpy(_input: ArrayableType, dtype=np.float64) -> np.ndarray:
def enforceNumpy(_input: ArrayableType, dtype=np.float32) -> np.ndarray:
"""Converts the input to numpy array. This is called only during input validation"""

if _input is None:
raise TypeError("No input data provided. Tensor cannot be empty.")

if not isinstance(_input, np.ndarray):
if type(_input) in [list, float, np.float32, np.float64]:
if type(_input) in [
list,
float,
np.float32,
np.float64,
np.float16,
np.float128,
]:
return np.array(_input, dtype=dtype)
raise ValueError("Tensor only accepts float, list and numpy array as data.")

Expand Down Expand Up @@ -56,9 +63,11 @@ class Tensor:
"""

def __init__(self, data: ArrayableType, requires_grad: bool = False) -> None:
def __init__(
self, data: ArrayableType, requires_grad: bool = False, dtype=np.float64
) -> None:

self.data = enforceNumpy(data)
self.data = enforceNumpy(data, dtype=dtype)
self.ctx: List["Tensor"] = []
self.grad = Tensor(np.zeros_like(self.data)) if requires_grad == True else None
self.backward_fn = lambda: None
Expand Down

0 comments on commit 0b61720

Please sign in to comment.