神经网络中数值梯度的计算 python代码

深度学习入门python
 
import matplotlib.pyplot as plt
import numpy as np
import time
from collections import OrderedDict

def softmax(a):
a = a - np.max(a)
exp_a = np.exp(a)
exp_a_sum = np.sum(exp_a)
return exp_a / exp_a_sum


def cross_entropy_error(t, y):
delta = 1e-7
s = -1 * np.sum(t * np.log(y + delta))
# print('cross entropy ',s)
return s


class simpleNet:
def __init__(self):
self.W = np.random.randn(2, 3)

def predict(self, x):
print('current w',self.W)
return np.dot(x, self.W)

def loss(self, x, t):
z = self.predict(x)
# print(z)
# print(z.ndim)
y = softmax(z)
# print('y',y)
loss = cross_entropy_error(y, t) # y为预测的值
return loss



def numerical_gradient_(f, x): # 针对2维的情况 甚至是多维
h = 1e-4 # 0.0001
grad = np.zeros_like(x)

it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
print('idx', idx)
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
print('fxh1 ', fxh1)
# print('current W', net.W)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
print('fxh2 ', fxh2)
# print('next currnet W ', net.W)
grad[idx] = (fxh1 - fxh2) / (2 * h)

x[idx] = tmp_val # 还原值
it.iternext()

return grad



net = simpleNet()
x=np.array([0.6,0.9])
t = np.array([0.0,0.0,1.0])

def f(W):
return net.loss(x,t)

grads =numerical_gradient_(f,net.W)
print(grads)

0 个评论

要回复文章请先登录注册