From 9b25b5f2d91e76afe0ce204ca3ee319e1683d982 Mon Sep 17 00:00:00 2001 From: "-T.K.-" Date: Tue, 15 Oct 2024 11:10:51 -0700 Subject: [PATCH] ADD: add tensor creation helpers --- CMakeLists.txt | 1 + src/cpu/creation.c | 241 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 242 insertions(+) create mode 100644 src/cpu/creation.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e10587..221efdf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -89,6 +89,7 @@ target_link_options(target-riscv INTERFACE -T ${LINKER_SCRIPT}) set(cpu_impl ./src/print.c + ./src/cpu/creation.c ./src/cpu/add.c ./src/cpu/addmm.c ./src/cpu/addscalar.c diff --git a/src/cpu/creation.c b/src/cpu/creation.c new file mode 100644 index 0000000..457cf6b --- /dev/null +++ b/src/cpu/creation.c @@ -0,0 +1,241 @@ +#include "nn.h" + +__attribute__((weak)) Tensor0D_F16 *nn_tensor0d_f16(float16_t data) { + Tensor0D_F16 *tensor = (Tensor0D_F16 *)malloc(sizeof(Tensor0D_F16)); + tensor->data = data; +} + +__attribute__((weak)) Tensor0D_F32 *nn_tensor0d_f32(float data) { + Tensor0D_F32 *tensor = (Tensor0D_F32 *)malloc(sizeof(Tensor0D_F32)); + tensor->data = data; +} + +__attribute__((weak)) Tensor1D_F16 *nn_tensor1d_f16(size_t shape[1], const float16_t *data) { + Tensor1D_F16 *tensor = (Tensor1D_F16 *)malloc(sizeof(Tensor1D_F16)); + tensor->shape[0] = shape[0]; + + size_t n_bytes = shape[0] * sizeof(float16_t); + tensor->data = (float16_t *)malloc(n_bytes); + if (data != NULL) { + memcpy(tensor->data, data, n_bytes); + } +} + +__attribute__((weak)) Tensor1D_F32 *nn_tensor1d_f32(size_t shape[1], const float *data) { + Tensor1D_F32 *tensor = (Tensor1D_F32 *)malloc(sizeof(Tensor1D_F32)); + tensor->shape[0] = shape[0]; + + size_t n_bytes = shape[0] * sizeof(float); + tensor->data = (float *)malloc(n_bytes); + if (data != NULL) { + memcpy(tensor->data, data, n_bytes); + } +} + +__attribute__((weak)) Tensor2D_F16 *nn_tensor2d_f16(size_t shape[2], const float16_t *data) { + Tensor2D_F16 *tensor = (Tensor2D_F16 *)malloc(sizeof(Tensor2D_F16)); + tensor->shape[0] = shape[0]; + tensor->shape[1] = shape[1]; + + size_t n_bytes = shape[0] * shape[1] * sizeof(float16_t); + tensor->data = (float16_t *)malloc(n_bytes); + if (data != NULL) { + memcpy(tensor->data, data, n_bytes); + } +} + +__attribute__((weak)) Tensor2D_F32 *nn_tensor2d_f32(size_t shape[2], const float *data) { + Tensor2D_F32 *tensor = (Tensor2D_F32 *)malloc(sizeof(Tensor2D_F32)); + tensor->shape[0] = shape[0]; + tensor->shape[1] = shape[1]; + + size_t n_bytes = shape[0] * shape[1] * sizeof(float); + tensor->data = (float *)malloc(n_bytes); + if (data != NULL) { + memcpy(tensor->data, data, n_bytes); + } +} + +__attribute__((weak)) Tensor0D_F16 *nn_zeros0d_f16() { + Tensor0D_F16 *tensor = nn_tensor0d_f16(0); + return tensor; +} + +__attribute__((weak)) Tensor0D_F32 *nn_zeros0d_f32() { + Tensor0D_F32 *tensor = nn_tensor0d_f32(0); + return tensor; +} + +__attribute__((weak)) Tensor1D_F16 *nn_zeros1d_f16(size_t shape[1]) { + Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 0; + } + return tensor; +} + +__attribute__((weak)) Tensor1D_F32 *nn_zeros1d_f32(size_t shape[1]) { + Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 0; + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F16 *nn_zeros2d_f16(size_t shape[2]) { + Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 0; + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F32 *nn_zeros2d_f32(size_t shape[2]) { + Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 0; + } + return tensor; +} + +__attribute__((weak)) Tensor0D_F16 *nn_ones0d_f16() { + Tensor0D_F16 *tensor = nn_tensor0d_f16(1); + return tensor; +} + +__attribute__((weak)) Tensor0D_F32 *nn_ones0d_f32() { + Tensor0D_F32 *tensor = nn_tensor0d_f32(1); + return tensor; +} + +__attribute__((weak)) Tensor1D_F16 *nn_ones1d_f16(size_t shape[1]) { + Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 1; + } + return tensor; +} + +__attribute__((weak)) Tensor1D_F32 *nn_ones1d_f32(size_t shape[1]) { + Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 1; + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F16 *nn_ones2d_f16(size_t shape[2]) { + Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 1; + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F32 *nn_ones2d_f32(size_t shape[2]) { + Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = 1; + } + return tensor; +} + +__attribute__((weak)) Tensor0D_F16 *nn_full0d_f16(float16_t data) { + Tensor0D_F16 *tensor = nn_tensor0d_f16(data); + return tensor; +} + +__attribute__((weak)) Tensor0D_F32 *nn_full0d_f32(float data) { + Tensor0D_F32 *tensor = nn_tensor0d_f32(data); + return tensor; +} + +__attribute__((weak)) Tensor1D_F16 *nn_full1d_f16(size_t shape[1], float16_t data) { + Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = data; + } + return tensor; +} + +__attribute__((weak)) Tensor1D_F32 *nn_full1d_f32(size_t shape[1], float data) { + Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = data; + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F16 *nn_full2d_f16(size_t shape[2], float16_t data) { + Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = data; + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F32 *nn_full2d_f32(size_t shape[2], float data) { + Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = data; + } + return tensor; +} + +__attribute__((weak)) Tensor0D_F16 *nn_rand0d_f16() { + Tensor0D_F16 *tensor = nn_tensor0d_f16(as_f16(rand())); + return tensor; +} + +__attribute__((weak)) Tensor0D_F32 *nn_rand0d_f32() { + Tensor0D_F32 *tensor = nn_tensor0d_f32(rand()); + return tensor; +} + +__attribute__((weak)) Tensor1D_F16 *nn_rand1d_f16(size_t shape[1]) { + Tensor1D_F16 *tensor = nn_tensor1d_f16(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = as_f16(rand()); + } + return tensor; +} + +__attribute__((weak)) Tensor1D_F32 *nn_rand1d_f32(size_t shape[1]) { + Tensor1D_F32 *tensor = nn_tensor1d_f32(shape, NULL); + size_t n = shape[0]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = rand(); + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F16 *nn_rand2d_f16(size_t shape[2]) { + Tensor2D_F16 *tensor = nn_tensor2d_f16(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = as_f16(rand()); + } + return tensor; +} + +__attribute__((weak)) Tensor2D_F32 *nn_rand2d_f32(size_t shape[2]) { + Tensor2D_F32 *tensor = nn_tensor2d_f32(shape, NULL); + size_t n = shape[0] * shape[1]; + for (size_t i = 0; i < n; i += 1) { + tensor->data[i] = rand(); + } + return tensor; +}