© Dr. Yves J. Hilpisch
The Python Quants GmbH
import numpy as np
from pylab import plt
plt.style.use('seaborn')
%matplotlib inline
x = np.linspace(0, 10, 5)
y = 3 * x + 2.5 + np.random.standard_normal(len(x)) * 2.5
plt.plot(x, y, 'ro')
reg = np.polyfit(x, y, deg=1)
reg
yr = np.polyval(reg, x)
plt.plot(x, y, 'ro')
plt.plot(x, yr, 'b')
((y - yr) ** 2).mean()
l0 = np.array((x, len(x) * [1])).T
l0
weights = np.array(((2., 2.)))
l1 = np.dot(l0, weights)
l1
y
d = y - l1
d
(d ** 2).mean() # MSE
alpha = 0.01 # learning rate
update = alpha * np.dot(d, l0)
update
weights += update # updating weights
weights
l1 = np.dot(l0, weights)
d = y - l1
(d ** 2).mean() # new MSE
weights = np.array(((1., 100.)))
for _ in range(51):
# layer 1
l1 = np.dot(l0, weights)
# deltas of layer 1
d = y - l1
# print MSE
if _ % 5 == 0:
print('MSE after %4d iterations: %6.2f' % (_, (d ** 2).mean()))
# update weights based on deltas
weights += alpha * np.dot(d, l0)
yr = np.polyval(reg, x)
plt.plot(x, y, 'ro')
plt.plot(x, yr, 'b')
plt.plot(x, l1, 'm--')
# sigmoid function
def sigmoid(x, deriv=False):
if deriv == True:
return sigmoid(x) * (1 - sigmoid(x))
return 1 / (1 + np.exp(-x))
x = np.linspace(-10, 10, 250)
y = sigmoid(x)
d = sigmoid(x, deriv=True)
s = np.where(x > 0, 1, 0)
fig, ax = plt.subplots(2, sharex=True, figsize=(10, 8))
ax[0].plot(x, y, 'b')
ax[0].plot(x, s, 'm--')
ax[1].plot(x, d, 'g');
# input dataset (features)
# layer 0
l0 = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1] ])
# output dataset (labels)
y = np.array([[0,
0,
1,
1]]).T
# initialize weights randomly with mean 0
np.random.seed(1)
weights = 2 * np.random.random((3, 1)) - 1
weights
np.dot(l0, weights)
l1 = sigmoid(np.dot(l0, weights))
l1
e = y - l1
e
(e ** 2).mean()
sigmoid(l1, True)
d = e * sigmoid(l1, True)
d
u = np.dot(l0.T, d)
u
weights += u
weights
l1 = sigmoid(np.dot(l0, weights))
e = y - l1
(e ** 2).mean()
# initialize weights randomly with mean 0
np.random.seed(1)
weights = 2 * np.random.random((3, 1)) - 1
weights
for _ in range(1001):
# forward propagation
# layer 1
l1 = sigmoid(np.dot(l0, weights))
# errors of layer 1
e = y - l1
if _ % 200 == 0:
print('\nafter %d iterations' % _)
print('layer 1:', l1.T)
print('errors: ', e.T)
print('MSE: ', (e ** 2).mean())
# multiply errors by the slope of the
# sigmoid at the values in l1
d = e * sigmoid(l1, True)
# update weights
weights += np.dot(l0.T, d)