Skip to content

Commit

Permalink
ADD: add MLP example
Browse files Browse the repository at this point in the history
  • Loading branch information
T-K-233 committed Jun 9, 2024
1 parent 674a576 commit cb939ee
Show file tree
Hide file tree
Showing 11 changed files with 208 additions and 27 deletions.
22 changes: 22 additions & 0 deletions example/mlp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
cmake_minimum_required(VERSION 3.15)

set(PROJECT_NAME "mlp")

set(PROJECT_INCLUDES
inc
)

set(PROJECT_SOURCES
main.c
)

project(${PROJECT_NAME})


add_executable(${PROJECT_NAME} ${PROJECT_SOURCES})
target_include_directories(${PROJECT_NAME} PUBLIC ${PROJECT_INCLUDES})

add_subdirectory(../../nn ./build/nn)
target_link_libraries(${PROJECT_NAME} PUBLIC nn)


29 changes: 29 additions & 0 deletions example/mlp/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Simple Example

A simple example demonstrating C = A * B + D

## Initial setup

```bash
mkdir ./example/simple/build/
cd ./example/simple/build/
cmake ..
```

## Generating model weights

```bash
cd ./example/simple/
python ./scripts/run.py
```

The script will generate a `model.pth` file and a `model.bin` file.

## Compiling and running the program

```bash
cd ./example/simple/build/
cmake --build . && ./mnist
```


79 changes: 79 additions & 0 deletions example/mlp/main.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/**
* @file main.c
*
* A simple example demonstrating C = A * B + D
*/

#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>

#include "nn.h"

#define DIM 3


// load the weight data block from the model.bin file
INCLUDE_FILE(".rodata", "../model.bin", weights);
extern uint8_t weights_data[];
extern size_t weights_start[];
extern size_t weights_end[];

typedef struct {
Tensor *input;
Tensor *fc1_weight;
Tensor *fc1_bias;
Tensor *output;
} Model;

/**
* Initialize the required tensors for the model
*/
void init(Model *model) {
uint8_t *array_pointer = weights_data;

model->input = NN_tensor(2, (size_t[]){1, DIM}, DTYPE_F32, NULL);
model->fc1_weight = NN_tensor(2, (size_t[]){DIM, DIM}, DTYPE_F32, array_pointer);
array_pointer += DIM * DIM * sizeof(float);
model->fc1_bias = NN_tensor(2, (size_t[]){1, DIM}, DTYPE_F32, array_pointer);
array_pointer += DIM * sizeof(float);

model->output = NN_tensor(2, (size_t[]){1, DIM}, DTYPE_F32, NULL);

printf("fc1_weight: \n");
NN_printf(model->fc1_weight);
printf("fc1_bias: \n");
NN_printf(model->fc1_bias);
}

/**
* Forward pass of the model
*/
void forward(Model *model) {
NN_linear_F32(model->output, model->input, model->fc1_weight, model->fc1_bias);
NN_relu_F32(model->output, model->output);
}

int main() {
size_t size = (size_t)weights_end - (size_t)weights_start;
printf("weight size: %d\n", (int)size);

Model *model = malloc(sizeof(Model));

init(model);

((float *)model->input->data)[0] = 1.;
((float *)model->input->data)[1] = 2.;
((float *)model->input->data)[2] = 3.;

forward(model);

printf("input:\n");
NN_printf(model->input);

printf("output:\n");
NN_printf(model->output);

return 0;
}
1 change: 1 addition & 0 deletions example/mlp/model.bin
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
裍���>~K�ɋپ��c�S�>@k;�za�>B�Q��p>��2��k�
Binary file added example/mlp/model.pth
Binary file not shown.
59 changes: 59 additions & 0 deletions example/mlp/scripts/run.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import numpy as np
import torch
import torch.nn as nn

torch.manual_seed(0)

class Simple(nn.Module):
"""
Simple model with one linear layer
"""

def __init__(self, dim: int):
super().__init__()
self.fc1 = nn.Linear(dim, dim)

def forward(self, x: torch.Tensor):
y = nn.functional.relu(self.fc1(x))
return y

# Create model
model = Simple(dim=3)

# Save model
torch.save(model, "model.pth")

# Load model
# model = torch.load("model.pth")

# store model weights as binary file
model_structure = list(model.named_modules())

w1 = model.state_dict().get("fc1.weight").contiguous().numpy()
b1 = model.state_dict().get("fc1.bias").contiguous().numpy()

print("w1:\n", w1)
print("b1:\n", b1)

w1_flat = w1.astype(np.float32).flatten()
b1_flat = b1.astype(np.float32).flatten()

with open("model.bin", "wb") as f:
f.write(w1_flat.tobytes())
f.write(b1_flat.tobytes())



# Test model
test_input = np.array([
[1.0, 2.0, 3.0],
], dtype=np.float32)
test_tensor = torch.tensor(test_input, dtype=torch.float32)

output = model.forward(test_tensor)
print("input:")
print(test_input)

print("output:")
# print(test_input @ w1.T + b1)
print(output)
16 changes: 1 addition & 15 deletions example/ppo/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,7 @@
// extern char _binary_test_end[];


// http://elm-chan.org/junk/32bit/binclude.html
#define IMPORT_BIN(section, filename, symbol) asm (\
".section "#section"\n" /* Change section */\
".balign 4\n" /* Word alignment */\
".global "#symbol"\n" /* Export the object address */\
".global "#symbol"_start\n" /* Export the object address */\
#symbol"_start:\n" /* Define the object label */\
#symbol":\n" /* Define the object label */\
".incbin \""filename"\"\n" /* Import the file */\
".global "#symbol"_end\n" /* Export the object address */\
#symbol"_end:\n" /* Define the object label */\
".balign 4\n" /* Word alignment */\
".section \".text\"\n") /* Restore section */

IMPORT_BIN(".rodata", "../hack_policy.bin", externdata);
INCLUDE_FILE(".rodata", "../hack_policy.bin", externdata);



Expand Down
2 changes: 2 additions & 0 deletions example/simple/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ extern uint8_t weights_data[];
extern size_t weights_start[];
extern size_t weights_end[];

#define DIM 3


// Tensors can be defined either globally or locally
Tensor A;
Expand Down
2 changes: 1 addition & 1 deletion example/simple/model.bin
Original file line number Diff line number Diff line change
@@ -1 +1 @@
_�*�� �>Cs>�Z�>˅�����wN�>�lV�=ˣ޾�*4>�)�>
裍���>~K�ɋپ��c�S�>@k;�za�>B�Q��p>��2��k�
Binary file modified example/simple/model.pth
Binary file not shown.
25 changes: 14 additions & 11 deletions example/simple/scripts/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,44 +2,47 @@
import torch
import torch.nn as nn

torch.manual_seed(0)

class Simple(nn.Module):
"""
Simple model with one linear layer
"""

def __init__(self, dim: int = 2):
def __init__(self, dim: int):
super().__init__()
self.fc1 = nn.Linear(dim, dim)

def forward(self, x: torch.Tensor):
y = self.fc1(x)
y = nn.functional.relu(self.fc1(x))
return y

# Create model
model = Simple(dim=3)

# Save model
# torch.save(model, "model.pth")
torch.save(model, "model.pth")

# Load model
model = torch.load("model.pth")
# model = torch.load("model.pth")

# store model weights as binary file
model_structure = list(model.named_modules())

w1 = model.state_dict().get("fc1.weight").numpy().T
b1 = model.state_dict().get("fc1.bias").numpy()
w1 = model.state_dict().get("fc1.weight").contiguous().numpy()
b1 = model.state_dict().get("fc1.bias").contiguous().numpy()

print("w1:\n", w1)
print("b1:\n", b1)

w1 = w1.astype(np.float32).flatten()
b1 = b1.astype(np.float32).flatten()
w1_flat = w1.astype(np.float32).flatten()
b1_flat = b1.astype(np.float32).flatten()

with open("model.bin", "wb") as f:
f.write(b1.tobytes())
f.write(w1.tobytes())
f.write(w1_flat.tobytes())
f.write(b1_flat.tobytes())



# Test model
test_input = np.array([
Expand All @@ -54,4 +57,4 @@ def forward(self, x: torch.Tensor):
print(output)

print("raw result:")
print(test_input @ w1 + b1)
print(test_input @ w1.T + b1)

0 comments on commit cb939ee

Please sign in to comment.