Skip to content

Commit

Permalink
ADD: add tensor creation helpers
Browse files Browse the repository at this point in the history
  • Loading branch information
T-K-233 committed Oct 15, 2024
1 parent 67a6f15 commit 9b25b5f
Show file tree
Hide file tree
Showing 2 changed files with 242 additions and 0 deletions.
1 change: 1 addition & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ target_link_options(target-riscv INTERFACE -T ${LINKER_SCRIPT})

set(cpu_impl
./src/print.c
./src/cpu/creation.c
./src/cpu/add.c
./src/cpu/addmm.c
./src/cpu/addscalar.c
Expand Down
241 changes: 241 additions & 0 deletions src/cpu/creation.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
#include "nn.h"

__attribute__((weak)) Tensor0D_F16 *nn_tensor0d_f16(float16_t data) {
Tensor0D_F16 *tensor = (Tensor0D_F16 *)malloc(sizeof(Tensor0D_F16));
tensor->data = data;
}

__attribute__((weak)) Tensor0D_F32 *nn_tensor0d_f32(float data) {
Tensor0D_F32 *tensor = (Tensor0D_F32 *)malloc(sizeof(Tensor0D_F32));
tensor->data = data;
}

__attribute__((weak)) Tensor1D_F16 *nn_tensor1d_f16(size_t shape[1], const float16_t *data) {
Tensor1D_F16 *tensor = (Tensor1D_F16 *)malloc(sizeof(Tensor1D_F16));
tensor->shape[0] = shape[0];

size_t n_bytes = shape[0] * sizeof(float16_t);
tensor->data = (float16_t *)malloc(n_bytes);
if (data != NULL) {
memcpy(tensor->data, data, n_bytes);
}
}

__attribute__((weak)) Tensor1D_F32 *nn_tensor1d_f32(size_t shape[1], const float *data) {
Tensor1D_F32 *tensor = (Tensor1D_F32 *)malloc(sizeof(Tensor1D_F32));
tensor->shape[0] = shape[0];

size_t n_bytes = shape[0] * sizeof(float);
tensor->data = (float *)malloc(n_bytes);
if (data != NULL) {
memcpy(tensor->data, data, n_bytes);
}
}

__attribute__((weak)) Tensor2D_F16 *nn_tensor2d_f16(size_t shape[2], const float16_t *data) {
Tensor2D_F16 *tensor = (Tensor2D_F16 *)malloc(sizeof(Tensor2D_F16));
tensor->shape[0] = shape[0];
tensor->shape[1] = shape[1];

size_t n_bytes = shape[0] * shape[1] * sizeof(float16_t);
tensor->data = (float16_t *)malloc(n_bytes);
if (data != NULL) {
memcpy(tensor->data, data, n_bytes);
}
}

__attribute__((weak)) Tensor2D_F32 *nn_tensor2d_f32(size_t shape[2], const float *data) {
Tensor2D_F32 *tensor = (Tensor2D_F32 *)malloc(sizeof(Tensor2D_F32));
tensor->shape[0] = shape[0];
tensor->shape[1] = shape[1];

size_t n_bytes = shape[0] * shape[1] * sizeof(float);
tensor->data = (float *)malloc(n_bytes);
if (data != NULL) {
memcpy(tensor->data, data, n_bytes);
}
}

__attribute__((weak)) Tensor0D_F16 *nn_zeros0d_f16() {
Tensor0D_F16 *tensor = nn_tensor0d_f16(0);
return tensor;
}

__attribute__((weak)) Tensor0D_F32 *nn_zeros0d_f32() {
Tensor0D_F32 *tensor = nn_tensor0d_f32(0);
return tensor;
}

__attribute__((weak)) Tensor1D_F16 *nn_zeros1d_f16(size_t shape[1]) {
Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 0;
}
return tensor;
}

__attribute__((weak)) Tensor1D_F32 *nn_zeros1d_f32(size_t shape[1]) {
Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 0;
}
return tensor;
}

__attribute__((weak)) Tensor2D_F16 *nn_zeros2d_f16(size_t shape[2]) {
Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 0;
}
return tensor;
}

__attribute__((weak)) Tensor2D_F32 *nn_zeros2d_f32(size_t shape[2]) {
Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 0;
}
return tensor;
}

__attribute__((weak)) Tensor0D_F16 *nn_ones0d_f16() {
Tensor0D_F16 *tensor = nn_tensor0d_f16(1);
return tensor;
}

__attribute__((weak)) Tensor0D_F32 *nn_ones0d_f32() {
Tensor0D_F32 *tensor = nn_tensor0d_f32(1);
return tensor;
}

__attribute__((weak)) Tensor1D_F16 *nn_ones1d_f16(size_t shape[1]) {
Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 1;
}
return tensor;
}

__attribute__((weak)) Tensor1D_F32 *nn_ones1d_f32(size_t shape[1]) {
Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 1;
}
return tensor;
}

__attribute__((weak)) Tensor2D_F16 *nn_ones2d_f16(size_t shape[2]) {
Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 1;
}
return tensor;
}

__attribute__((weak)) Tensor2D_F32 *nn_ones2d_f32(size_t shape[2]) {
Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = 1;
}
return tensor;
}

__attribute__((weak)) Tensor0D_F16 *nn_full0d_f16(float16_t data) {
Tensor0D_F16 *tensor = nn_tensor0d_f16(data);
return tensor;
}

__attribute__((weak)) Tensor0D_F32 *nn_full0d_f32(float data) {
Tensor0D_F32 *tensor = nn_tensor0d_f32(data);
return tensor;
}

__attribute__((weak)) Tensor1D_F16 *nn_full1d_f16(size_t shape[1], float16_t data) {
Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = data;
}
return tensor;
}

__attribute__((weak)) Tensor1D_F32 *nn_full1d_f32(size_t shape[1], float data) {
Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = data;
}
return tensor;
}

__attribute__((weak)) Tensor2D_F16 *nn_full2d_f16(size_t shape[2], float16_t data) {
Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = data;
}
return tensor;
}

__attribute__((weak)) Tensor2D_F32 *nn_full2d_f32(size_t shape[2], float data) {
Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = data;
}
return tensor;
}

__attribute__((weak)) Tensor0D_F16 *nn_rand0d_f16() {
Tensor0D_F16 *tensor = nn_tensor0d_f16(as_f16(rand()));
return tensor;
}

__attribute__((weak)) Tensor0D_F32 *nn_rand0d_f32() {
Tensor0D_F32 *tensor = nn_tensor0d_f32(rand());
return tensor;
}

__attribute__((weak)) Tensor1D_F16 *nn_rand1d_f16(size_t shape[1]) {
Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = as_f16(rand());
}
return tensor;
}

__attribute__((weak)) Tensor1D_F32 *nn_rand1d_f32(size_t shape[1]) {
Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL);
size_t n = shape[0];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = rand();
}
return tensor;
}

__attribute__((weak)) Tensor2D_F16 *nn_rand2d_f16(size_t shape[2]) {
Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = as_f16(rand());
}
return tensor;
}

__attribute__((weak)) Tensor2D_F32 *nn_rand2d_f32(size_t shape[2]) {
Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL);
size_t n = shape[0] * shape[1];
for (size_t i = 0; i < n; i += 1) {
tensor->data[i] = rand();
}
return tensor;
}

0 comments on commit 9b25b5f

Please sign in to comment.