Skip to content

Commit

Permalink
REFACTOR: clean up
Browse files Browse the repository at this point in the history
  • Loading branch information
T-K-233 committed Jul 20, 2024
1 parent e41e34b commit 6dfb30d
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 139 deletions.
87 changes: 0 additions & 87 deletions nn/functional/nn_math.h → nn/nn_math.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,67 +128,6 @@
// #endif
// }

// inline static void NN__scale_F32(const int n, float *y, const float v) {
// #if defined(GGML_USE_ACCELERATE)
// vDSP_vsmul(y, 1, &v, y, 1, n);
// #elif defined(GGML_SIMD)
// const int np = (n & ~(GGML_F32_STEP - 1));

// GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);

// GGML_F32_VEC ay[GGML_F32_ARR];

// for (int i = 0; i < np; i += GGML_F32_STEP) {
// for (int j = 0; j < GGML_F32_ARR; j++) {
// ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
// ay[j] = GGML_F32_VEC_MUL(ay[j], vx);

// GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
// }
// }

// // leftovers
// for (int i = np; i < n; i += 1) {
// y[i] *= v;
// }
// #else
// // scalar
// for (int i = 0; i < n; i += 1) {
// y[i] *= v;
// }
// #endif
// }

// inline static void NN__scale_f16(const int n, float16_t * y, const float v) {
// #if defined(GGML_SIMD)
// const int np = (n & ~(GGML_F16_STEP - 1));

// GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);

// GGML_F16_VEC ay[GGML_F16_ARR];

// for (int i = 0; i < np; i += GGML_F16_STEP) {
// for (int j = 0; j < GGML_F16_ARR; j++) {
// ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
// ay[j] = GGML_F16_VEC_MUL(ay[j], vx);

// GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
// }
// }

// // leftovers
// for (int i = np; i < n; i += 1) {
// y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v);
// }
// #else
// // scalar
// for (int i = 0; i < n; i += 1) {
// y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v);
// }
// #endif
// }


// inline static void NN__step_F32 (const int n, float *y, const float *x) { for (int i = 0; i < n; i += 1) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
// inline static void NN__tanh_F32 (const int n, float *y, const float *x) { for (int i = 0; i < n; i += 1) y[i] = tanhf(x[i]); }
// inline static void NN__elu_F32 (const int n, float *y, const float *x) { for (int i = 0; i < n; i += 1) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
Expand Down Expand Up @@ -503,32 +442,6 @@
// dx[i] = ggml_silu_backward_F32(x[i], dy[i]);
// }
// }


// inline static void NN__sum_f32_ggf(const int n, float *s, const float *x) {
// float sum = 0.0;
// for (int i = 0; i < n; i += 1) {
// sum += (float)x[i];
// }
// *s = sum;
// }

// inline static void NN__sum_f16_ggf(const int n, float *s, const float16_t * x) {
// float sum = 0.0f;
// for (int i = 0; i < n; i += 1) {
// sum += GGML_FP16_TO_FP32(x[i]);
// }
// *s = sum;
// }

// inline static void NN__sum_bf16_ggf(const int n, float *s, const bfloat16_t * x) {
// float sum = 0.0f;
// for (int i = 0; i < n; i += 1) {
// sum += GGML_BF16_TO_FP32(x[i]);
// }
// *s = sum;
// }

// inline static void NN__argmax_F32(const int n, int * s, const float *x) {
// float max = -INFINITY;
// int idx = 0;
Expand Down
52 changes: 0 additions & 52 deletions nn/nn_todo
Original file line number Diff line number Diff line change
@@ -1,15 +1,3 @@
#ifndef __NN_H
#define __NN_H

#include <math.h>
#include <float.h>
#include <stddef.h>

typedef struct {
size_t rows;
size_t cols;
float *data;
} Matrix;

/*
* ====== Math Functions ======
Expand All @@ -20,39 +8,6 @@ void NN_initMatrix(Matrix *m, size_t rows, size_t cols) {
m->data = malloc(rows * cols * sizeof(float));
}

void NN_matmul(Matrix *out, Matrix *a, Matrix *b) {
NN_assert(a->cols == b->rows, "matmul: dimension mismatch");
NN_assert(out->rows == a->rows, "matmul: dimension mismatch");
NN_assert(out->cols == b->cols, "matmul: dimension mismatch");
for (size_t i = 0; i < a->rows; i += 1) {
for (size_t j = 0; j < b->cols; j += 1) {
float sum = 0;
for (size_t k = 0; k < a->cols; k += 1) {
sum += a->data[i * a->cols + k] * b->data[k * b->cols + j];
}
out->data[i * out->cols + j] = sum;
}
}
}

void NN_matadd(Matrix *out, Matrix *a, Matrix *b) {
NN_assert(a->rows == b->rows, "matadd: dimension mismatch");
NN_assert(a->cols == b->cols, "matadd: dimension mismatch");
for (size_t i = 0; i < a->rows; i += 1) {
for (size_t j = 0; j < a->cols; j += 1) {
out->data[i * out->cols + j] = a->data[i * a->cols + j] + b->data[i * b->cols + j];
}
}
}

void NN_transpose(Matrix *out, Matrix *a) {
for (size_t i = 0; i < a->rows; i += 1) {
for (size_t j = 0; j < a->cols; j += 1) {
out->data[j * out->cols + i] = a->data[i * a->cols + j];
}
}
}

void NN_concatenate(Matrix *out, Matrix *a, Matrix *b) {
for (size_t i = 0; i < a->cols; i += 1) {
out->data[i] = a->data[i];
Expand All @@ -78,11 +33,6 @@ size_t NN_argmax(Matrix *a) {
* ====== Operators ======
*/

void NN_linear(Matrix *out, Matrix *weight, Matrix *bias, Matrix *input) {
NN_matmul(out, input, weight);
NN_matadd(out, out, bias);
}

void NN_logSoftmax(Matrix *out, Matrix *a) {
float sum = 0;
for (size_t i = 0; i < a->cols; i += 1) {
Expand All @@ -93,5 +43,3 @@ void NN_logSoftmax(Matrix *out, Matrix *a) {
}
}


#endif // __NN_H

0 comments on commit 6dfb30d

Please sign in to comment.