Skip to content

Commit

Permalink
cmt
Browse files Browse the repository at this point in the history
  • Loading branch information
unknown committed Apr 5, 2019
1 parent e633111 commit 4f85ecc
Show file tree
Hide file tree
Showing 8,991 changed files with 4,363,667 additions and 3 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
6 changes: 3 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@ Thumbs.db
__MACOSX
._*
.idea
**/bbs
**/ebook
**/bbs
**/ebook
**/bbs
.gitignore
7 changes: 7 additions & 0 deletions 大三上/VLSI设计导论/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# VLSI设计导论
感觉是一门被埋没的好(shui)课,没有平时作业,上课也不点名(2018年秋季学期),上过的都说给分好(样本量2)。唯一的任务就是从给定的题目中选一个做大作业,基本上都是电路设计中的算法问题(布图规划,布局,布线,时钟综合,供电网络分析)。整个学期一共需要交一个选题报告,一个总结报告,做一次课堂展示。

## 大作业
* 静态供电网络分析(by Zhengxiao Du):https://github.com/duzx16/PGsolver
欢迎添加。

1 change: 1 addition & 0 deletions 大三上/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
大三上还有很多限选课,欢迎补充!
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
import numpy as np
from scipy.signal import convolve


def conv2d_forward(input, W, b, kernel_size, pad):
'''
Args:
input: shape = n (#sample) x c_in (#input channel) x h_in (#height) x w_in (#width)
W: weight, shape = c_out (#output channel) x c_in (#input channel) x k (#kernel_size) x k (#kernel_size)
b: bias, shape = c_out
kernel_size: size of the convolving kernel (or filter)
pad: number of zero added to both sides of input
Returns:
output: shape = n (#sample) x c_out (#output channel) x h_out x w_out,
where h_out, w_out is the height and width of output, after convolution
'''
n, c_in, h_in, w_in = input.shape
h_pad, w_pad = h_in + 2 * pad, w_in+2*pad
padded_input = np.zeros((n, c_in, h_pad, w_pad))
padded_input[:, :, pad:h_in + pad, pad:w_in + pad] = input
h_out, w_out = h_pad - kernel_size + 1, w_pad - kernel_size + 1
c_out = W.shape[0]

output = np.zeros((n, c_out, h_out, w_out))

for i in range(c_out):
for j in range(c_in):
ker = np.rot90(W[i, j], 2)[np.newaxis, :, :]
output[:, i] += convolve(padded_input[:, j], ker, 'valid')

output += b[np.newaxis, :, np.newaxis, np.newaxis]

return output


def conv2d_backward(input, grad_output, W, b, kernel_size, pad):
'''
Args:
input: shape = n (#sample) x c_in (#input channel) x h_in (#height) x w_in (#width)
grad_output: shape = n (#sample) x c_out (#output channel) x h_out x w_out
W: weight, shape = c_out (#output channel) x c_in (#input channel) x k (#kernel_size) x k (#kernel_size)
b: bias, shape = c_out
kernel_size: size of the convolving kernel (or filter)
pad: number of zero added to both sides of input
Returns:
grad_input: gradient of input, shape = n (#sample) x c_in (#input channel) x h_in (#height) x w_in (#width)
grad_W: gradient of W, shape = c_out (#output channel) x c_in (#input channel) x k (#kernel_size) x k (#kernel_size)
grad_b: gradient of b, shape = c_out
'''
n, c_in, h_in, w_in = input.shape
_, _, h_out, w_out = grad_output.shape
assert h_out == h_in + 2 * pad - kernel_size + 1 and w_out == w_in + 2 * pad - kernel_size + 1, \
"grad_output shape not consistent with output"

h_pad, w_pad = h_in + 2 * pad, w_in + 2 * pad
c_out = W.shape[0]

# grad_input
padded_grad_input = np.zeros((n, c_in, h_pad, w_pad))
for i in range(c_in):
for j in range(c_out):
padded_grad_input[:, i] += convolve(grad_output[:, j], W[j, i][np.newaxis, :, :], 'full')
grad_input = padded_grad_input[:, :, pad:h_in + pad, pad:w_in + pad]

# grad_W
padded_input = np.zeros((n, c_in, h_pad, w_pad))
padded_input[:, :, pad:h_in + pad, pad:w_in + pad] = input

grad_W = np.zeros((c_out, c_in, kernel_size, kernel_size))
for i in range(c_in):
for j in range(c_out):
delta = np.flip(np.rot90(grad_output[:, j], 2, (1, 2)), 0)
grad_W[j, i] += convolve(padded_input[:, i], delta, 'valid').squeeze()
# grad_b
grad_b = np.sum(grad_output, (0, 2, 3))
return grad_input, grad_W, grad_b


def avgpool2d_forward(input, kernel_size, pad):
'''
Args:
input: shape = n (#sample) x c_in (#input channel) x h_in (#height) x w_in (#width)
kernel_size: size of the window to take average over
pad: number of zero added to both sides of input
Returns:
output: shape = n (#sample) x c_in (#input channel) x h_out x w_out,
where h_out, w_out is the height and width of output, after average pooling over input
'''
n, c_in, h_in, w_in = input.shape
h_pad, w_pad = h_in + 2 * pad, w_in + 2 * pad
padded_input = np.zeros((n, c_in, h_pad, w_pad))
padded_input[:, :, pad:h_in + pad, pad:w_in + pad] = input

h_out, w_out = int(h_pad / kernel_size), int(w_pad / kernel_size)

output_1 = np.zeros((n, c_in, h_out, w_pad))
for i in range(kernel_size):
output_1 += padded_input[:, :, i:h_pad:kernel_size, :]

output_2 = np.zeros((n, c_in, h_out, w_out))
for i in range(kernel_size):
output_2 += output_1[:, :, :, i:w_pad:kernel_size]

output = output_2 / (kernel_size * kernel_size)

return output


def avgpool2d_backward(input, grad_output, kernel_size, pad):
'''
Args:
input: shape = n (#sample) x c_in (#input channel) x h_in (#height) x w_in (#width)
grad_output: shape = n (#sample) x c_in (#input channel) x h_out x w_out
kernel_size: size of the window to take average over
pad: number of zero added to both sides of input
Returns:
grad_input: gradient of input, shape = n (#sample) x c_in (#input channel) x h_in (#height) x w_in (#width)
'''
n, c_in, h_in, w_in = input.shape
_, _, h_out, w_out = grad_output.shape
assert h_out == (h_in + 2 * pad) / kernel_size and w_out == (w_in + 2 * pad) / kernel_size, \
"grad_output shape not consistent with output"
h_pad, w_pad = h_in + 2 * pad, w_in + 2 * pad

padded_grad_input_1 = np.zeros((n, c_in, h_pad, w_out))
for i in range(kernel_size):
padded_grad_input_1[:, :, i:h_pad:kernel_size, :] = grad_output

padded_grad_input_2 = np.zeros((n, c_in, h_pad, w_pad))
for i in range(kernel_size):
padded_grad_input_2[:, :, :, i:w_pad:kernel_size] = padded_grad_input_1

grad_input = padded_grad_input_2[:, :, pad:h_in + pad, pad:w_in + pad] / (kernel_size * kernel_size)

return grad_input
149 changes: 149 additions & 0 deletions 大三上/人工神经网络/hw/2018/HW2_136408023/HW2/code/layers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
import numpy as np
from functions import conv2d_forward, conv2d_backward, avgpool2d_forward, avgpool2d_backward


class Layer(object):
def __init__(self, name, trainable=False):
self.name = name
self.trainable = trainable
self._saved_tensor = None

def forward(self, input):
pass

def backward(self, grad_output):
pass

def update(self, config):
pass

def _saved_for_backward(self, tensor):
self._saved_tensor = tensor


class Relu(Layer):
def __init__(self, name):
super(Relu, self).__init__(name)

def forward(self, input):
self._saved_for_backward(input)
return np.maximum(0, input)

def backward(self, grad_output):
input = self._saved_tensor
return grad_output * (input > 0)


class Sigmoid(Layer):
def __init__(self, name):
super(Sigmoid, self).__init__(name)

def forward(self, input):
output = 1 / (1 + np.exp(-input))
self._saved_for_backward(output)
return output

def backward(self, grad_output):
output = self._saved_tensor
return grad_output * output * (1 - output)


class Linear(Layer):
def __init__(self, name, in_num, out_num, init_std):
super(Linear, self).__init__(name, trainable=True)
self.in_num = in_num
self.out_num = out_num
self.W = np.random.randn(in_num, out_num) * init_std
self.b = np.zeros(out_num)

self.grad_W = np.zeros((in_num, out_num))
self.grad_b = np.zeros(out_num)

self.diff_W = np.zeros((in_num, out_num))
self.diff_b = np.zeros(out_num)

def forward(self, input):
self._saved_for_backward(input)
output = np.dot(input, self.W) + self.b
return output

def backward(self, grad_output):
input = self._saved_tensor
self.grad_W = np.dot(input.T, grad_output)
self.grad_b = np.sum(grad_output, axis=0)
return np.dot(grad_output, self.W.T)

def update(self, config):
mm = config['momentum']
lr = config['learning_rate']
wd = config['weight_decay']

self.diff_W = mm * self.diff_W + (self.grad_W + wd * self.W)
self.W = self.W - lr * self.diff_W

self.diff_b = mm * self.diff_b + (self.grad_b + wd * self.b)
self.b = self.b - lr * self.diff_b


class Reshape(Layer):
def __init__(self, name, new_shape):
super(Reshape, self).__init__(name)
self.new_shape = new_shape

def forward(self, input):
self._saved_for_backward(input)
return input.reshape(*self.new_shape)

def backward(self, grad_output):
input = self._saved_tensor
return grad_output.reshape(*input.shape)


class Conv2D(Layer):
def __init__(self, name, in_channel, out_channel, kernel_size, pad, init_std):
super(Conv2D, self).__init__(name, trainable=True)
self.kernel_size = kernel_size
self.pad = pad
self.W = np.random.randn(out_channel, in_channel, kernel_size, kernel_size)
self.b = np.zeros(out_channel)

self.diff_W = np.zeros(self.W.shape)
self.diff_b = np.zeros(out_channel)

def forward(self, input):
self._saved_for_backward(input)
output = conv2d_forward(input, self.W, self.b, self.kernel_size, self.pad)
return output

def backward(self, grad_output):
input = self._saved_tensor
grad_input, self.grad_W, self.grad_b = conv2d_backward(input, grad_output, self.W, self.b, self.kernel_size, self.pad)
return grad_input

def update(self, config):
mm = config['momentum']
lr = config['learning_rate']
wd = config['weight_decay']

self.diff_W = mm * self.diff_W + (self.grad_W + wd * self.W)
self.W = self.W - lr * self.diff_W

self.diff_b = mm * self.diff_b + (self.grad_b + wd * self.b)
self.b = self.b - lr * self.diff_b


class AvgPool2D(Layer):
def __init__(self, name, kernel_size, pad):
super(AvgPool2D, self).__init__(name)
self.kernel_size = kernel_size
self.pad = pad

def forward(self, input):
self._saved_for_backward(input)
output = avgpool2d_forward(input, self.kernel_size, self.pad)
return output

def backward(self, grad_output):
input = self._saved_tensor
grad_input = avgpool2d_backward(input, grad_output, self.kernel_size, self.pad)
return grad_input
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import numpy as np
import os


def load_mnist_4d(data_dir):
fd = open(os.path.join(data_dir, 'train-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 1, 28, 28)).astype(float)

fd = open(os.path.join(data_dir, 'train-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000))

fd = open(os.path.join(data_dir, 't10k-images-idx3-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 1, 28, 28)).astype(float)

fd = open(os.path.join(data_dir, 't10k-labels-idx1-ubyte'))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000))

trX = (trX - 128.0) / 255.0
teX = (teX - 128.0) / 255.0

return trX, teX, trY, teY
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from __future__ import division
import numpy as np


class EuclideanLoss(object):
def __init__(self, name):
self.name = name

def forward(self, input, target):
return 0.5 * np.mean(np.sum(np.square(input - target), axis=1))

def backward(self, input, target):
return (input - target) / len(input)


class SoftmaxCrossEntropyLoss(object):
def __init__(self, name):
self.name = name

def forward(self, input, target):
'''Your codes here'''
input -= np.max(input)
exp_input = np.exp(input)
prob = exp_input / (np.sum(exp_input, axis=1, keepdims=True) + 1e-20) # for stablity
return np.mean(np.sum(- target * np.log(prob + 1e-20), axis=1)) # for stablity

def backward(self, input, target):
input -= np.max(input)
exp_input = np.exp(input)
prob = exp_input / (np.sum(exp_input, axis=1, keepdims=True) + 1e-20) # for stablity
return (prob - target) / len(input)
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
class Network(object):
def __init__(self):
self.layer_list = []
self.params = []
self.num_layers = 0

def add(self, layer):
self.layer_list.append(layer)
self.num_layers += 1

def forward(self, input):
output = input
for i in range(self.num_layers):
output = self.layer_list[i].forward(output)

return output

def backward(self, grad_output):
grad_input = grad_output
for i in range(self.num_layers - 1, -1, -1):
grad_input = self.layer_list[i].backward(grad_input)

def update(self, config):
for i in range(self.num_layers):
if self.layer_list[i].trainable:
self.layer_list[i].update(config)
Loading

0 comments on commit 4f85ecc

Please sign in to comment.