-
Notifications
You must be signed in to change notification settings - Fork 347
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #312 from wildkid1024/master
增加了新的ops, 支持低级op操作
- Loading branch information
Showing
19 changed files
with
761 additions
and
63 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,93 @@ | ||
import fastllm | ||
import numpy as np | ||
|
||
def np_rms_norm(inputs, weights, eps): | ||
channel = inputs.shape[-1] | ||
sqrt_mean = np.sqrt(np.sum(inputs**2)/channel + eps) | ||
return inputs / sqrt_mean *weights | ||
|
||
|
||
def np_layer_norm(inputs, gamma, beta, axis=-1): | ||
assert axis < len(inputs.shapes), "axis should less than inputs dims" | ||
channel = inputs.shape[axis] | ||
mean = np.mean(inputs, axis=axis) | ||
var = np.var(inputs, axis=axis) | ||
|
||
output = (inputs - mean) / var * gamma + beta | ||
return output | ||
|
||
def np_linear(inputs, weights, bias): | ||
output = np.matmul(inputs, weights.T) + bias | ||
return output | ||
|
||
def np_softmax(inputs, axis=None): | ||
maxv = inputs.max(axis, keepdims=True) | ||
exp_v = np.exp(inputs - maxv) | ||
exp_sum = np.sum(exp_v, axis=axis) | ||
return exp_v / exp_sum | ||
|
||
def np_silu(inputs, ): | ||
return inputs / (1 + np.exp(-inputs)) | ||
|
||
def np_attention(q, k, v, mask=None, group=None, scale=None): | ||
qk = np_softmax(q @ k.T * scale, axis=-1) | ||
attn = qk @ v | ||
return attn | ||
|
||
def test_linear(): | ||
inputs = np.array([[1, 2]]) | ||
weight = np.array([[3, 4, 5, 5, 6, 7]]).reshape([3, 2]) | ||
bias = np.array([0, 1, 1]) | ||
np_output = np_linear(inputs, weight, bias) | ||
print(np_output) | ||
|
||
input = fastllm.Tensor(fastllm.float32, [1, 2], [1, 2]) | ||
weights = fastllm.Tensor(fastllm.float32, [3, 2], [3, 4, 5, 5, 6, 7]) | ||
bias = fastllm.Tensor(fastllm.float32, [3], [0, 1, 1]) | ||
out = fastllm.ops.linear(input, weights, bias) | ||
print(out) | ||
|
||
def test_rms_norm(): | ||
inputs = np.array([1, 5]).reshape([1, 2]) | ||
weights = np.array([1, 3]).reshape([1, 2]) | ||
eps = 1e-6 | ||
|
||
np_out = np_rms_norm(inputs, weights, eps) | ||
print(np_out) | ||
|
||
input = fastllm.Tensor(fastllm.float32, [1, 2], [1, 5]) | ||
weights = fastllm.Tensor(fastllm.float32, [1, 2], [1, 3]) | ||
out = fastllm.Tensor() | ||
out = fastllm.ops.rms_norm(input, weights, eps=1e-6) | ||
print(out) | ||
|
||
def test_silu(): | ||
inputs = np.array([1, 5]).reshape([1, 2]) | ||
output = np_softmax(inputs) | ||
# output = np_silu(inputs) | ||
print(output) | ||
|
||
inputs = fastllm.Tensor(fastllm.float32, [1, 2], [1, 5]) | ||
out = fastllm.ops.activation(input=inputs, activate_type="softmax") | ||
# out = fastllm.ops.activation(input=inputs, activate_type="silu") | ||
print(out) | ||
|
||
def test_attention(): | ||
q = np.array([1, 2, 3, 4, 5, 6]).reshape([2, 3]) | ||
k = np.array([5, 6, 7, 8, 9, 10]).reshape([2, 3]) | ||
v = np.array([1, 1, 1, 2, 1, 3]).reshape([2, 3]) | ||
scale = 1 / np.sqrt(q.shape[-1]) | ||
output = np_attention(q, k, v, scale=scale) | ||
print(output) | ||
|
||
q = fastllm.Tensor(fastllm.float32, [1, 2, 3], [1, 2, 3, 4, 5, 6]) | ||
k = fastllm.Tensor(fastllm.float32, [1, 2, 3], [5, 6, 7, 8, 9, 10]) | ||
v = fastllm.Tensor(fastllm.float32, [1, 2, 3], [1, 1, 1, 2, 1, 3]) | ||
mask = fastllm.Tensor() | ||
output = fastllm.ops.attention(q, k, v, mask, group=1, scale=scale, attentionType=0) | ||
print(output) | ||
|
||
test_attention() | ||
test_silu() | ||
test_linear() | ||
test_rms_norm() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,14 @@ | ||
import os | ||
import sys | ||
import ctypes | ||
import glob | ||
|
||
_BASE_DIR = os.path.dirname(__file__) | ||
sys.path.append(_BASE_DIR) | ||
# libs = glob.glob("*.so") | ||
# for lib in libs: _cdll = ctypes.cdll.LoadLibrary(lib) | ||
|
||
from pyfastllm import * | ||
from . import utils | ||
from . import utils | ||
from . import functions as ops | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
from .fastllm_ops import * |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
import pyfastllm as fastllm | ||
|
||
|
||
def embedding(data: fastllm.Tensor, ): | ||
# some check | ||
return fastllm.embedding(data, ) | ||
|
||
def rms_norm(input:fastllm.Tensor, weight: fastllm.Tensor, eps: float, output: fastllm.Tensor=None): | ||
output = fastllm.rms_norm(input, weight, eps) | ||
return output | ||
|
||
def layer_norm(input: fastllm.Tensor, | ||
gamma: fastllm.Tensor, | ||
beta: fastllm.Tensor, | ||
axis:int=-1 ): | ||
output = fastllm.layer_norm(input, gamma, beta,axis) | ||
return output | ||
|
||
def linear(input: fastllm.Tensor, | ||
weight: fastllm.Tensor, | ||
bias: fastllm.Tensor): | ||
output = fastllm.linear(input, weight, bias) | ||
return output | ||
|
||
def matmul(input0: fastllm.Tensor, | ||
input1: fastllm.Tensor, | ||
alpha: fastllm.Tensor): | ||
output = fastllm.matmul(input0, input1, alpha) | ||
return output | ||
|
||
def attention(q: fastllm.Tensor, | ||
k: fastllm.Tensor, | ||
v: fastllm.Tensor, | ||
mask: fastllm.Tensor, | ||
group: int, | ||
scale: float, | ||
attentionType: int): | ||
output = fastllm.attention(q, k, v, mask, group, scale, attentionType) | ||
return output | ||
|
||
def activation(input: fastllm.Tensor, axis=-1, activate_type="silu"): | ||
assert activate_type in ("softmax", "silu", "gelu", "swiglu") | ||
func = getattr(fastllm, activate_type) | ||
if activate_type == "softmax": | ||
return func(input, axis) | ||
return func(input) | ||
|
||
def mul(input: fastllm.Tensor, v: int): | ||
output = fastllm.mul(input, v) | ||
return output | ||
|
||
def matmul_transB(): | ||
pass | ||
|
||
def add(input0: fastllm.Tensor, input1: fastllm.Tensor): | ||
output = fastllm.add(input0, input1) | ||
return output | ||
|
||
def AttentionMask(): | ||
pass | ||
|
||
def AlibiMask(): | ||
pass | ||
|
||
def topk(): | ||
pass | ||
|
||
def RotatePosition2D(): | ||
pass | ||
|
||
def NearlyRotatePosition2D(): | ||
pass | ||
|
||
def LlamaRotatePosition2D(): | ||
pass | ||
|
||
def RepeatPenalty(): | ||
pass |
Oops, something went wrong.