Skip to content

Commit

Permalink
update autograd
Browse files Browse the repository at this point in the history
  • Loading branch information
Elvin-Ma committed Aug 20, 2023
1 parent ab651c6 commit 1ac4720
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 39 deletions.
59 changes: 52 additions & 7 deletions 0-torch_base/op_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def batch_morm_demo():

# 创建批归一化层
batch_norm = nn.BatchNorm2d(num_features, eps=eps, momentum=momentum)
# 定义层归一化的参数
# 定义层归一化的参数(laynorm)
normalized_shape = [16, 8, 8]

# 创建层归一化层
Expand All @@ -136,6 +136,7 @@ def batch_morm_demo():

# 进行层归一化操作
layer_norm_output = layer_norm(input)
print(layer_norm_output.shape)


def conv_demo():
Expand All @@ -150,31 +151,39 @@ def conv_demo():
print("output shape", output.shape)

def linear_demo():
m = nn.Linear(20, 30) # 初始化 [m,k]*[k,n] --> [m,n]
m = nn.Linear(20, 30) # 初始化 [*,k]*[k,n] --> [*,n] --> weight bias : 自动帮我们做了初始化
input = torch.randn(512, 20) # input 设定
output = m(input) # run
layer_0 = m(input) # run weight 是有一个转置的

m_2 = nn.Linear(30, 40)
output = m_2(layer_0)
print(output.size()) # bias 有没有, weight呢?

def maxpool_demo():
m = nn.MaxPool2d(3, stride=2, padding=1)
m = nn.AvgPool2d(3, stride=2, padding=1)
# pool of non-square window
# m = nn.MaxPool2d((3, 2), stride=(2, 1))
input = torch.randn(20, 16, 50, 32)
output = m(input)
print("output shape: ", output.shape)

def global_average_pool():
m = nn.AdaptiveAvgPool2d((2, 3))
m = nn.AdaptiveAvgPool2d((2, 2))
input = torch.randn(1, 2048, 7, 7)
output = m(input)
torch.onnx.export(m, input, "adaptiv_avg.onnx")

print("output shape: ", output.shape)

torch.Tensor()
def batch_norm_demo():
m = nn.BatchNorm2d(100) # 100 就表示我们的channel 维度
# Without Learnable Parameters
# m = nn.BatchNorm2d(100, affine=False)
input = torch.randn(20, 100, 35, 45)
input = torch.randint(-100, 100, (20, 100, 35, 45)).float() # 哪些数据取均值(20*35*45) --> 取了多少个均值
output = m(input)
# input2 = torch.randint(-10, 200, (20, 100, 35, 45)).float()
# output1 = m(input2)
print("output shape: ", output.shape)

def rnncell_onnx_get():
Expand All @@ -194,15 +203,51 @@ def rnn_onnx_get():
output, hn = rnn(input, h0)
torch.onnx.export(rnn, (input, h0), "rnn.onnx")
# model = onnx.shape_inference.infer_shapes(onnx_model)

def flatten_demo():
input = torch.randn(32, 1, 5, 5)
# With default parameters
m = nn.Flatten()
output = m(input)
output.size()
# h.Size([32, 25])
# With non-default parameters
m = nn.Flatten(0, 2)
output = m(input)
output.size()

def embedding_demo():
# weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
# embedding = nn.Embedding.from_pretrained(weight)
# # Get embeddings for index 1
# input = torch.LongTensor([1]) # 35200 --> [0, 1, ... , 35199]
# output = embedding(input)
# print("output: ", output)

embedding = nn.Embedding(10, 3)
# a batch of 2 samples of 4 indices each
input = torch.LongTensor([[1, 2, 4, 9], [4, 3, 2, 9]]) # int64_t
output = embedding(input)
print(output.shape)

def gather_demo():
t = torch.tensor([[1, 2], [3, 4]])
output = torch.gather(t, 0, torch.tensor([[0, 0], [1, 0]]))
print(output.shape)


if __name__ == "__main__":
gather_demo()
# deform_conv2d_demo(*params[0])
# transposed_conv_demo()
# group_conv_demo()
# conv_demo()
linear_demo()
# linear_demo()
# maxpool_demo()
# global_average_pool()
# batch_norm_demo()
# batch_morm_demo()
#rnn_onnx_get()
# flatten_demo()
embedding_demo()
print("run op_demo.py successfully !!!")
76 changes: 46 additions & 30 deletions 1-tensor_guide/tensor_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,34 +3,34 @@

def tensor_create():
# 方式1
# data = [[1, 2],[3, 4]] # python list
# x_data = torch.tensor(data)
# x_data2 = torch.tensor((1, 2, 3))
# # x_data3 = torch.tensor({"a": 5}) # fail
# print("x_data2: ", x_data2)
data = [[1, 2],[3, 4]] # python list
x_data = torch.tensor(data) #
x_data2 = torch.tensor((1, 2, 3))
# x_data3 = torch.tensor({"a": 5}) # fail
print("x_data2: ", x_data2)

# 方式2
# data = torch.ones(1, 2, 3)
# data1 = torch.zeros(1, 3,4)
# data2 = torch.randn(3, 4, 5)
# data3 = torch.eye(4, 5)
# data4 = torch.randint(5, (2, 10))
# print("data type: ", type(data4))
# print("data2: ", data4)
data = torch.ones(1, 2, 3)
data1 = torch.zeros(1, 3,4)
data2 = torch.randn(3, 4, 5)
data3 = torch.eye(4, 5)
data4 = torch.randint(5, (2, 10))
print("data type: ", type(data4))
print("data2: ", data4)

# 方式3
# data0 = torch.Tensor([1, 2, 3])
# data1 = torch.ones_like(data0)
# data2 = torch.empty_like(data0)
data0 = torch.Tensor([1, 2, 3])
data1 = torch.ones_like(data0)
data2 = torch.empty_like(data0)
# data3 = torch.empty_like(data0)
# print("data: ", data2)
print("data: ", data2)

# 方式4
np_array = np.array([1, 2, 3])
tensor_numpy = torch.from_numpy(np_array)
# tensor_numpy2 = torch.Tensor(np_array) # deepcopy 了一份数据
np_array[0] = 100
# data_numpy = tensor_numpy.numpy()
data_numpy = tensor_numpy.numpy()
# print("data numpy: ", type(data_numpy))
print("numpy tensor: ", tensor_numpy)

Expand All @@ -47,6 +47,11 @@ def tensor_struct():
# print("stride: ", tensor.stride())
# print("device: ", tensor.device)
# .... 其它参考 /lib/python3.8/site-packages/torch/_C/__init__.pyi

# tensor / ndarray
# 1. meta_data + raw_data;
# 2 meta_data: shape/dtype/stride/dim/device ...
# 3. raw_data: data_ptr

# raw data
print("pytorch data: \n", tensor)
Expand Down Expand Up @@ -118,21 +123,24 @@ def numpy_with_torch_tensor():

def tensor_to_demo():
tensor = torch.ones(4, 5)
print("tensor dtype: ", tensor.dtype)
# print("tensor dtype: ", tensor.dtype)
# tensor = tensor.to(torch.float32)
# print("tensor device: ", tensor.device)

# H2D : H: host(cpu), D: device(GPU) --> ;
tensor_0 = tensor.to(torch.int32).to("cuda:0") # 数据的搬迁 h2d: h: host d:device(gpu)
# print("tensor device: ", tensor_0.device)

# tensor_1 = tensor.cuda(0)
# tensor_1 = tensor.cuda(0).cpu()
# print("tensor1 dtype: ", tensor_1.device)

# if torch.cuda.is_available():
# device = torch.device("cuda:0")
# else:
# device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")

# tensor_2 = tensor.to(device)
# print(tensor_2.device)
# tensor_3 = tensor.to(device)

# #dtype 转化
Expand All @@ -143,8 +151,8 @@ def tensor_to_demo():

# # 1. 获取tensor0 的设备, 2 完成设备上的数据copy
tensor_5 = tensor.to(tensor_0).cpu()
tensor_6 = tensor.cuda() + tensor_5
print(tensor_5.device)
# tensor_6 = tensor.cuda() + tensor_5
# print(tensor_5.device)
print(tensor_5.dtype)

def id_with_ptr():
Expand Down Expand Up @@ -178,7 +186,7 @@ def broadcast_demo():

def inplace_demo():
a = torch.ones(3, 5)
b = a.add_(5) # inplace 操作 一定要小心
b = a.add_(5) # inplace 操作 一定要小心 -->
c = a.add(10)
print("tensor_a data_ptr: ", a)
print("tensor_b data_ptr: ", b)
Expand Down Expand Up @@ -209,10 +217,18 @@ def inplace_demo():
# # print("c stride: ", c.stride())

def reshape_vs_view():
a = torch.randn(4, 6)
b = a.T
c = b.reshape(3, 8)
d = b.contiguous().view(3, 8)
data0 = torch.randn(4, 6) # 24
data1 = data0.reshape(6, 4)
data2 = data0.view(6, 4) # reshape 和 view 相同
data3 = data0.transpose(0, 1) # data uncontiguous

# data5 = data3.view(3, 8)
datat = data3.reshape(3, 8) # reshape 和 view 不同
# a = torch.randn(3, 4, 2, 5)
# data4 = data3.contiguous() # 完成了数据的copy


print("reshape")

def tensor_api_demo():
a = torch.Tensor()
Expand Down
25 changes: 25 additions & 0 deletions 2-autograd_guide/autograd_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,32 @@ def inner(*args, **kargs):
inner.co = 0
return inner

def matmul_demo():
input = torch.randn(10, 3, 4)
mat2 = torch.randn(5, 1, 4, 5)
# res = torch.bmm(input, mat2)
# res = torch.mm(input, mat2)
res = torch.matmul(input, mat2)
res.is_leaf
print(res)

def autograd_demo():
data0 = torch.randn(2, 2, 4)
w_0 = torch.randn(2, 4, 3, requires_grad=True)
data2 = torch.bmm(data0, w_0)
data3 = torch.sigmoid(data2)
w_1 = torch.randn(2, 3, 5, requires_grad = True)
output = torch.matmul(data3, w_1)
# output.backward()
output.backward(torch.ones_like(output))

w_0 = w_0 - 0.001* w_0.grad

print("run autograd_demo finished !!!")

if __name__ == "__main__":
autograd_demo()
# matmul_demo()
# reqiregrad_set()
# autograd_demo_v2()
# internal_grad_demo()
Expand Down
4 changes: 2 additions & 2 deletions 3-module_guide/minist_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,9 +243,9 @@ def container_demo():
print("output shape: ", x.shape)

if __name__ == '__main__':
# main()
main()
# parameters_demo()
# function_demo()
container_demo()
torch.Tensor()
# torch.Tensor()
print("run minist_main.py successfully !!!")

0 comments on commit 1ac4720

Please sign in to comment.