TOC
Simple DNN RNN CNN example codes
1.DNN-Deep neural network
import numpy as np;
class myDNN:
# 3 * 5 * 2
def __init__(self, input, hidden, output):
# hidden random weight
# Note: hidden_weight can be the shape of (input,hidden); correspondingly, `self.hidden_out` should equal `np.dot(input_data, self.hidden_weight)` to accord with hidden_weight shape.
self.hidden_weight = np.random.rand(hidden, input);
self.hidden_bias = np.random.rand(hidden);
# hidden random weight
self.output_weight = np.random.rand(output,hidden);
self.output_bias = np.random.rand(output);
#
def forward(self, input_data):
self.hidden_out = np.dot(input_data, self.hidden_weight.T) + self.hidden_bias;
self.output_out = np.dot(self.hidden_out, self.output_weight.T) + self.output_bias;
# Usage:
dnn = myDNN(3,5,3);
print("hidden_weight",dnn.hidden_weight)
print("hidden_bias:",dnn.hidden_bias)
print("output_weight",dnn.output_weight)
print("output_bias",dnn.output_bias)
x = np.array([1, 2, 3]) #inut
dnn.forward(x);
print("output_out",dnn.output_out)
# output:
# hidden_weight [[0.99663996 0.39342568 0.5312192 ]
# [0.0798744 0.50312289 0.86241405]
# [0.17138496 0.6761287 0.70645906]
# [0.61662379 0.69389404 0.16623206]
# [0.71213402 0.30800932 0.64149244]]
# hidden_bias: [0.81517457 0.56115705 0.3089624 0.84450962 0.93530796]
# output_weight [[0.34466034 0.31119367 0.12883636 0.34135026 0.43802589]
# [0.31553914 0.16063241 0.8179255 0.52314575 0.79439618]
# [0.86730239 0.25280671 0.20375421 0.78095429 0.67368635]]
# output_bias [0.5588883 0.98722366 0.21507382]
# output_out [ 6.8078659 11.30086755 11.16252686]
1.1 Use DNN in torch
import torch.nn as nn
class TorchDNN(nn.Module):
def __init__(self, input, hidden, output):
super(TorchDNN, self).__init__();
self.layer_hidden = nn.Linear(input, hidden, bias = True);
self.layer_output = nn.Linear(hidden, output, bias = True);
#
def forward(self, input_data):
self.hidden_out = self.layer_hidden(input_data);
self.output_out = self.layer_output(self.hidden_out);
x = np.array([1, 2, 3])
torch_model = TorchDNN(len(x), 5, 3)
print(torch_model.state_dict())
# OrderedDict([('layer_hidden.weight',
# tensor([[-0.5216, -0.5690, 0.4181],
# [-0.3142, 0.1489, 0.5071],
# [ 0.0295, 0.3381, 0.4401],
# [-0.4697, 0.0732, -0.0328],
# [ 0.5250, 0.1540, 0.2086]])),
# ('layer_hidden.bias', tensor([-0.5134, 0.2645, -0.3366, -0.0597, 0.0159])),
# ('layer_output.weight',
# tensor([[ 0.2770, -0.3408, -0.3145, -0.3686, 0.1060],
# [ 0.1268, 0.0729, -0.3838, 0.2850, 0.1438],
# [ 0.1645, -0.0497, 0.1029, 0.1088, -0.0536]])),
# ('layer_output.bias', tensor([ 0.0908, -0.1240, 0.2800]))])
2.RNN-Recurrent neural network
import numpy as np;
class myRNN:
def __init__(self, input, hidden ):
# random weight
self.input_hidden_weight = np.random.randint(-10000,10000,(hidden, input))/10000;
self.hidden_hidden_weight = np.random.randint(-10000,10000,(hidden))/10000;
# random bias
self.input_hidden_bias = np.random.randint(-10000,10000,(hidden))/10000;
self.hidden_hidden_bias = np.random.randint(-10000,10000,(hidden))/10000;
# self.input_hidden_bias = np.zeros(hidden);
# self.hidden_hidden_bias = np.zeros(hidden);
self.hidden_size = hidden
def forward(self, input_data):
self.last_hidden_output = np.zeros([self.hidden_size]);
output = []
for item in input_data:
# ht=tanh(W_ih * x_t + b_ih + W_hh*h_(t−1)+b_hh)
hidden_cur = np.dot(item, self.input_hidden_weight.T) + self.input_hidden_bias;
hidden_pre = np.dot(self.last_hidden_output, self.hidden_hidden_weight.T) + self.hidden_hidden_bias;
hidden_output = np.tanh( hidden_cur + hidden_pre )
output.append(hidden_output)
self.last_hidden_output = hidden_output;
return np.array(output), hidden_output;
# diy_model = myRNN(w_ih, w_hh, hidden_size)
x = np.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
input_size = 3;
hidden_size = 4;
diy_model = myRNN(input_size,hidden_size)
output, hidden_output = diy_model.forward(x)
print("myRNN process output: ", output)
print("myRNN hidden_output:", hidden_output)
# output:
# myRNN process output: [[-0.62745049 -0.99314575 -0.96754221 -0.9965258 ]
# [-0.99542912 -0.99962783 -0.99965698 -0.99998354]
# [-0.99983032 -0.99992543 -0.9999868 -0.99999971]]
# myRNN hidden_output: [-0.99983032 -0.99992543 -0.9999868 -0.99999971]
2.1 Use RNN in torch
import torch.nn as nn;
import torch;
import numpy as np;
class TorchRNN(nn.Module):
def __init__(self, input_size, hidden):
super(TorchRNN,self).__init__();
self.layer = nn.RNN(input_size, hidden, batch_first=True);
def forward(self, x):
return self.layer(x)
torch_model = TorchRNN(3, 4)
print(torch_model.state_dict())
x = np.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
torch_x = torch.FloatTensor([x])
output, h = torch_model.forward(torch_x)
print("output:", output.detach().numpy())
print("h:",h.detach().numpy())
# output:
# OrderedDict([('layer.weight_ih_l0', tensor([[ 0.0922, 0.2786, -0.4514],
# [ 0.3809, 0.2628, -0.4460],
# [-0.4951, -0.3599, -0.4961],
# [ 0.3794, 0.3397, 0.3185]])), ('layer.weight_hh_l0', tensor([[-0.1330, -0.1843, -0.2618, 0.4246],
# [ 0.4154, -0.3578, -0.4181, -0.4291],
# [ 0.3608, -0.2349, 0.4631, 0.4873],
# [ 0.4886, 0.0285, -0.0490, 0.2928]])), ('layer.bias_ih_l0', tensor([-0.1421, -0.3572, -0.2087, -0.0319])), ('layer.bias_hh_l0', tensor([-0.3799, 0.1126, -0.1766, 0.2630]))])
# output: [[[-0.8416229 -0.589054 -0.99585485 0.9778193 ]
# [-0.45533973 -0.3993926 -0.9999862 0.99957436]
# [-0.6221984 0.05736368 -0.99999994 0.9999955 ]]]
# h: [[[-0.6221984 0.05736368 -0.99999994 0.9999955 ]]]
3.CNN-Convolutional neural network
import numpy as np;
class MyCNN:
# I don't know how do filters work.
def __init__(self, in_channel, out_channel, kernel_size):
# random weight
# (out_channel, in_channel, kernel_size, kernel_size)
# self.kernel_weight = np.random.randint(-10000,10000,(out_channel, in_channel, kernel_size, kernel_size))/10000;
self.kernel_weight = np.array([[[[ 0.0106, -0.1561, 0.0984],
[ 0.1468, 0.1580, -0.1404],
[ 0.0856, 0.0780, 0.0636]],
[[-0.1620, 0.2318, 0.0486],
[-0.2214, -0.2046, 0.1070],
[ 0.1609, 0.0160, -0.0374]]],
[[[ 0.1876, -0.2056, 0.1858],
[-0.1288, 0.0065, -0.0145],
[-0.1080, 0.1519, 0.0581]],
[[-0.0749, 0.2289, -0.0890],
[ 0.0611, 0.0398, -0.1293],
[ 0.0911, -0.0264, -0.2104]]]]);
self.in_channel = in_channel;
self.out_channel = out_channel;
self.kernel_size = kernel_size;
# c*h*w
def forward(self, input_data):
output = [];
input_shape = input_data.shape;
idx_start = np.int(np.floor( (self.kernel_size)/2)) ;
width = input_shape[1];
height = input_shape[2];
in_channel = input_shape[0];
for o_c in range(self.out_channel):
piece_of_out_channel = np.zeros(( width-(idx_start*2), height-(idx_start*2) ));
# print("piece_of_out_channel:", piece_of_out_channel.shape)
# width
for idx_height in range(idx_start, height - idx_start):
# height
for idx_width in range(idx_start, width - idx_start ):
# kernel_shape_input
kernel_shape_input = input_data[:, idx_height-idx_start: idx_height+idx_start+1, idx_width-idx_start :idx_width+idx_start+1 ];
out = self.kernel_weight[o_c] * kernel_shape_input;
out = np.sum(out)
# assign value
idx_h = idx_height - idx_start;
idx_w = idx_width - idx_start
piece_of_out_channel[idx_h][idx_w] = out;
output.append(piece_of_out_channel);
return output;
# x = np.random.randint(0,10000,(2, 6, 6))/100;
# x = random.astype(int)
x = np.array([[[61,93,18,31,2,49]
,[12,62,32,60,58,30]
,[49,64,38,74,59,29]
,[71,34,29,88,59,41]
,[91,72,36,94,79,29]
,[17,15,86,29,84,53]]
,[[31,25,15,16,35,20]
,[76,45,82,88,49,99]
,[56,46,82,72,26,55]
,[7, 86,32,29,82,91]
,[76,68,17,50,19,53]
,[87,21,58,35,81,46]]
]);
# print(x);
print("x.shape:",x.shape)
myCNN = MyCNN(x.shape[0], 2, 3);
print(myCNN.kernel_weight)
output = myCNN.forward(x)
print("myCNN output:",output)
# output:
# x.shape: (2, 6, 6)
# myCNN output: [array([[-2.5093, 9.0098, -0.2033, 28.9 ],
# [ 6.5155, 27.3464, 0.7038, 14.5031],
# [24.2218, 16.1092, 23.2223, 16.9067],
# [29.6749, 2.0986, 16.8128, 45.025 ]]),
# array([[-14.77 , -4.3335, 5.0665, 3.2378],
# [-28.2207, 18.1968, 11.889 , -27.3557],
# [ -2.748 , 22.5508, 10.6013, -19.0372],
# [ 22.0148, 9.1788, -22.0313, 9.5176]])]
3.1 Use CNN in torch
import torch;
import torch.nn as nn;
class TorchCNN(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size ):
super().__init__();
self.conv2d = nn.Conv2d(in_channel, out_channel, kernel_size, bias=False);
def forward(self, input_data):
return self.conv2d(input_data);
in_channel = x.shape[0];
torchcnn = TorchCNN(in_channel, 2, 3);
print("torchcnn weight:", torchcnn.state_dict())
print("torchcnn weight shape:", torchcnn.state_dict()['conv2d.weight'].numpy().shape)
# torchcnn weight shape: (2, 2, 3, 3) => (out_channel, in_channel, kernel_size, kernel_size)
torch_x = torch.FloatTensor([x])
out = torchcnn.forward(torch_x);
print("TorchCNN out: ", out)
# output
# torchcnn weight: OrderedDict([('conv2d.weight', tensor(
# [[[[ 0.0106, -0.1561, 0.0984],
# [ 0.1468, 0.1580, -0.1404],
# [ 0.0856, 0.0780, 0.0636]],
# [[-0.1620, 0.2318, 0.0486],
# [-0.2214, -0.2046, 0.1070],
# [ 0.1609, 0.0160, -0.0374]]],
# [[[ 0.1876, -0.2056, 0.1858],
# [-0.1288, 0.0065, -0.0145],
# [-0.1080, 0.1519, 0.0581]],
# [[-0.0749, 0.2289, -0.0890],
# [ 0.0611, 0.0398, -0.1293],
# [ 0.0911, -0.0264, -0.2104]]]]))])
# TorchCNN out: tensor([[[[ -2.5066, 9.0144, -0.1983, 28.9003],
# [ 6.5160, 27.3456, 0.7094, 14.5056],
# [ 24.2262, 16.1086, 23.2279, 16.9102],
# [ 29.6763, 2.1047, 16.8210, 45.0250]],
# [[-14.7564, -4.3273, 5.0752, 3.2491],
# [-28.2115, 18.1981, 11.8975, -27.3447],
# [ -2.7442, 22.5540, 10.6096, -19.0247],
# [ 22.0166, 9.1837, -22.0241, 9.5211]]]],
# grad_fn=<MkldnnConvolutionBackward>)
「点个赞」
点个赞
使用微信扫描二维码完成支付
