Skip to content

Commit

Permalink
ADD: add matvec test
Browse files Browse the repository at this point in the history
  • Loading branch information
T-K-233 committed Jun 8, 2024
1 parent 82ba2d4 commit 54a2105
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 13 deletions.
4 changes: 0 additions & 4 deletions nn/src/matmul/nn_matmul_rvv.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,6 @@ void NN_matmul_F32_RVV(Tensor *out, Tensor *a, Tensor *b) {
vfloat32m1_t vec_zero = __riscv_vfmv_v_f_f32m1(0, vlmax);
for (size_t i = 0; i < a->shape[0]; i += 1) {
for (size_t j = 0; j < b->shape[1]; j += 1) {
// a_ptr = a->data + a->strides[0] * i;
// b_ptr = b->data + b->strides[1] * j;

uint8_t *a_ptr_v = a_ptr;
uint8_t *b_ptr_v = b_ptr;

Expand Down Expand Up @@ -54,4 +51,3 @@ void NN_matmul_F32_RVV(Tensor *out, Tensor *a, Tensor *b) {
b_ptr -= b->strides[1] * b->shape[1];
}
}

45 changes: 43 additions & 2 deletions test/src/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,36 @@ int main() {

// matvec
{
// array gen
Tensor *H = NN_rand(2, (size_t[]){N, M}, DTYPE_F32);
Tensor *V = NN_rand(2, (size_t[]){M, 1}, DTYPE_F32);
Tensor *W = NN_rand(2, (size_t[]){1, M}, DTYPE_F32);

printf("matvec:\t\t");
Tensor *golden_vec = NN_tensor(2, (size_t[]){N, 1}, DTYPE_F32, NULL);
Tensor *actual_vec = NN_tensor(2, (size_t[]){N, 1}, DTYPE_F32, NULL);

NN_matmul_F32(golden_vec, H, V);
cycles = READ_CSR("mcycle");
NN_matmul_F32_RVV(actual_vec, H, V);
cycles = READ_CSR("mcycle") - cycles;
printf("%s (%lu)\n", compare_2d(golden_vec, actual_vec, N, 1) ? "pass" : "fail", cycles);

printf("matvec_t:\t");
NN_transpose_F32(H, H);

NN_matmul_F32(golden_vec, W, H);
cycles = READ_CSR("mcycle");
NN_matmul_F32_RVV(actual_vec, W, H);
cycles = READ_CSR("mcycle") - cycles;
printf("%s (%lu)\n", compare_2d(golden_vec, actual_vec, N, 1) ? "pass" : "fail", cycles);

NN_freeTensorData(H);
NN_deleteTensor(H);
NN_freeTensorData(V);
NN_deleteTensor(V);
NN_freeTensorData(W);
NN_deleteTensor(W);
}

// max and min
Expand Down Expand Up @@ -196,15 +225,27 @@ int main() {
int out_features = 4;
int in_features = 3;

float weights[] = {-0.00432252, 0.30971584, -0.47518533, -0.4248946, -0.22236897, 0.15482073, -0.01143914, 0.45777494, -0.0512364, 0.15277413, -0.1744828, -0.11348708};
float bias[] = {-0.5515707, -0.38236874, -0.23799711, 0.02138712};


Tensor *x = NN_ones(2, (size_t[]){batch, in_features}, DTYPE_F32);
Tensor *w = NN_ones(2, (size_t[]){out_features, in_features}, DTYPE_F32);
Tensor *b = NN_ones(2, (size_t[]){batch, out_features}, DTYPE_F32);
Tensor *w = NN_tensor(2, (size_t[]){out_features, in_features}, DTYPE_F32, weights);
Tensor *b = NN_tensor(2, (size_t[]){1, out_features}, DTYPE_F32, bias);

Tensor *y = NN_tensor(2, (size_t[]){batch, out_features}, DTYPE_F32, NULL);

// NN_linear_F32(y, x, w, b);
NN_linear_F32_RVV(y, x, w, b);

printf("linear:\t\t");
printf("weights:\n");
NN_printf(w);
printf("bias:\n");
NN_printf(b);
printf("input:\n");
NN_printf(x);
printf("output:\n");
NN_printf(y);
}

Expand Down
23 changes: 16 additions & 7 deletions test/src/test.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,22 @@
import torch

batch = 5
out_features = 2

# seed random number generator
torch.manual_seed(0)

batch = 1
out_features = 4
in_features = 3

x = torch.ones((batch, in_features))
w = torch.ones((out_features, in_features))
b = torch.zeros((out_features))
l = torch.nn.Linear(in_features, out_features)

print(l.state_dict()["weight"].numpy().flatten())
print(l.state_dict()["bias"].numpy().flatten())


input = torch.ones(batch, in_features)

output = l.forward(input)

y = x @ w.T + b
print(output)

print(y)

0 comments on commit 54a2105

Please sign in to comment.