-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSJ.py
157 lines (154 loc) · 5.65 KB
/
SJ.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import numpy as np
import math
import random
import h5py
A=0.01
class act:#激活函数
def sigmoid(x):
return 1.0/(1.0+np.exp(-x))
def tanh(x):
return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
def none(x):
return x
def relu(x):
return 1 * (x > 0) * x
def leaky_relu(x):
return np.where(x <=0, x*A, x)
def elu(x):
return np.where(x <=0, A*(np.exp(x)-1), x)
class der:#导数
def sigmoid(x):
return act.sigmoid(x)*(1-act.sigmoid(x))
def tanh(x):
return 1-math.pow(act.tanh(x),2)
def none(x):
return 1
def relu(x):
return 1 * (x > 0) * 1
def leaky_relu(x):
return np.where(x >0, 1, A)
def elu(x):
return np.where(x <=0, A*np.exp(x), 1)
class loss:#损失函数
def ms(tru,fed):
return 0.5*np.sum((fed-tru)**2)
def square(tru,fed):
return np.sum((fed-tru)**2)
class tensor:
def __init__(self,x:np):
self.a=x
def lens(self):
return len(self.a)
def set(self,x,y):
self.a[x]=y
def get(self,x):
return self.a[x]
def gets(self):
return self.a
class bp:
def __init__(self,sizes,ders,acts,insizes=1,los=loss.ms):#定义
self.size=sizes#神经网络大小
self.derx=ders#导数
self.actx=acts#激活函数
self.losx=los#损失函数
self.w=np.random.rand(len(sizes),max(sizes),len(sizes),max(sizes),insizes)#权重
self.b=np.random.rand(len(sizes),max(sizes),len(sizes),max(sizes),insizes)#偏置
self.insize=insizes#权重偏置的大小
def getw(self,x,y,xx,yy):#获取权重
return self.w[x,y,xx,yy]
def getb(self,x,y,xx,yy):#获取偏置
return self.b[x,y,xx,yy]
def getws(self):#获取权重数组
return self.w
def getbs(self):#获取偏置数组
return self.b
def los(self,input,out):#损失
a=[]#中间处理用的变量
for i in out:
a.append(i.gets())
a=np.array(a)
return self.losx(a,self.feedforward(input))
def feedforward(self,input):#前向传播
ass=input#input的拷贝
a=[[]]#激活函数的输出
ab=[[]]#导数的输出
a.append([])
ab.append([])
for i in ass:
a[0].append(i.gets())
ab[0].append(i.gets())
for i in range(len(self.size)-1):
a.append([])
ab.append([])
for j in range(self.size[i+1]):
jj=np.array(0)#激活函数
js=np.array(0)#导数
for js in range(self.size[i]):
jj=jj+self.actx[i]((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
js=js+self.derx[i]((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
a[i+1].append(jj)
ab[i+1].append(js)
self.feed=np.array(a)
self.feeds=np.array(ab)
return np.array(a[len(a)-2])
def feedforwards(self,input):#前向传播多输出版本
ass=input
a=[[]]
ab=[[]]
a.append([])
ab.append([])
for i in ass:
a[0].append(i.gets())
ab[0].append(i.gets())
for i in range(len(self.size)-1):
a.append([])
ab.append([])
for j in range(self.size[i+1]):
jj=np.array(0)
js=np.array(0)
for js in range(self.size[i]):
jj=jj+self.actx((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
js=js+self.derx((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
a[i+1].append(jj)
ab[i+1].append(js)
self.feed=np.array(a)
self.feeds=np.array(ab)
ret=[]
for i in a[len(a)-2]:
ret.append(tensor(np.array(i)))
return ret
def op(self,input,out,eta):#优化
for i in range(len(input)):
self.feedforward(input[i])
self.updatew(input,self.back(out[i]),eta)
def back(self,out):#反向传播
b=np.zeros((len(self.size),max(self.size),self.insize))
out=np.array(out)
ij=len(self.size)-1
ib=0
for i in out:
a1=np.array(self.feed[len(self.feed)-2])
a2=np.array(i.gets())
a3=np.array(self.feeds[len(self.feed)-2])
a5=-(a1-a2)*a3
for ix in range(len(a5)):
b[0][ix]=a5[ix][0]
ib=ib+1
for i in range(self.size[len(self.size)-1]):
for j in range(self.size[len(self.size)-2]):
b[1][i+j-1]=b[0][i]*self.w[len(self.w)-1][j][len(self.w)-2][i]+self.b[len(self.w)-1][j][len(self.w)-2][i]
for i in range(len(self.size)-1):
for j in range(self.size[ij-i]*self.size[ij-i-1]):
b1=b[i+1][int(j/self.size[ij-i-1])]
b3=self.feeds[ij-i][int(j/self.size[ij-i-1])]
b[i+1][int(j/self.size[ij-i-1])]=np.array(b1*b3)
for js in range(self.size[ij-i-1]):
b[i+1][js]=(b[i+1][int(j/self.size[ij-i-1])]*self.w[ij-i][js][ij-i-1][int(j/self.size[ij-i-1])-1]+self.b[ij-i][js][ij-i-1][int(j/self.size[ij-i-1])-1])
return b
def updatew(self,input,bs,eta):#更改权重偏置
for i in range(len(self.size)-1):
for j in range(self.size[i]):
for js in range(self.size[i+1]):
self.w[i][j][i+1][js]=np.array(self.w[i][j][i+1][js])+np.array(self.feed[i][j]*bs[i+1][js]*eta)
self.b[i][j][i+1][js]=np.array(self.b[i][j][i+1][js])-np.array(eta*bs[i+1][js])
#