The examples in this article share the specific code of python to implement the gradient descent algorithm for your reference. The specific content is as follows
Introduction
This article uses python to implement the gradient descent algorithm and supports linear regression of y = Wx+b
Currently supports batch gradient algorithm and stochastic gradient descent algorithm (bs=1)
It also supports the visualization of images with the x dimension of the input feature vector less than 3
Code requires python version 3.4
Code
'''
Gradient descent algorithm
Batch Gradient Descent
Stochastic Gradient Descent SGD
'''
__ author__ ='epleone'import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
# Use random number seed to make the random number generation the same every time, which is convenient for debugging
# np.random.seed(111111111)classGradientDescent(object):
eps =1.0e-8
max_iter =1000000 #not needed for now
dim =1
func_args =[2.1,2.7] # [w_0,.., w_dim, b]
def __init__(self, func_arg=None, N=1000):
self.data_num = N
if func_arg is not None:
self.FuncArgs = func_arg
self._getData()
def _getData(self):
x =20*(np.random.rand(self.data_num, self.dim)-0.5)
b_1 = np.ones((self.data_num,1), dtype=np.float)
# x = np.concatenate((x, b_1), axis=1)
self.x = np.concatenate((x, b_1), axis=1)
def func(self, x):
# If the noise is too large, the gradient descent method will not work
noise =0.01* np.random.randn(self.data_num)+0
w = np.array(self.func_args)
# y1 = w * self.x[0,] #Direct multiplication
y = np.dot(self.x, w) #Matrix multiplication
y += noise
return y
@ property
def FuncArgs(self):return self.func_args
@ FuncArgs.setter
def FuncArgs(self, args):if not isinstance(args, list):
raise Exception('args is not list, it should be like [w_0, ..., w_dim, b]')iflen(args)==0:
raise Exception('args is empty list!!')iflen(args)==1:
args.append(0.0)
self.func_args = args
self.dim =len(args)-1
self._getData()
@ property
def EPS(self):return self.eps
@ EPS.setter
def EPS(self, value):if not isinstance(value, float) and not isinstance(value, int):
raise Exception("The type of eps should be an float number")
self.eps = value
def plotFunc(self):
# One-dimensional drawing
if self.dim ==1:
# x = np.sort(self.x, axis=0)
x = self.x
y = self.func(x)
fig, ax = plt.subplots()
ax.plot(x, y,'o')
ax.set(xlabel='x ', ylabel='y', title='Loss Curve')
ax.grid()
plt.show()
# Two-dimensional drawing
if self.dim ==2:
# x = np.sort(self.x, axis=0)
x = self.x
y = self.func(x)
xs = x[:,0]
ys = x[:,1]
zs = y
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, ys, zs, c='r', marker='o')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()else:
# plt.axis('off')
plt.text(0.5,0.5,"The dimension(x.dim 2) \n is too high to draw",
size=17,
rotation=0.,
ha="center",
va="center",
bbox=dict(
boxstyle="round",
ec=(1.,0.5,0.5),
fc=(1.,0.8,0.8),))
plt.draw()
plt.show()
# print('The dimension(x.dim 2) is too high to draw')
# The gradient descent method can only solve convex functions
def _gradient_descent(self, bs, lr, epoch):
x = self.x
# The shuffle data set is not necessary
# np.random.shuffle(x)
y = self.func(x)
w = np.ones((self.dim +1,1), dtype=float)for e inrange(epoch):print('epoch:'+str(e), end=',')
# Batch gradient descent, when bs is 1, the equivalent single-sample gradient descent
for i inrange(0, self.data_num, bs):
y_ = np.dot(x[i:i + bs], w)
loss = y_ - y[i:i + bs].reshape(-1,1)
d = loss * x[i:i + bs]
d = d.sum(axis=0)/ bs
d = lr * d
d.shape =(-1,1)
w = w - d
y_ = np.dot(self.x, w)
loss_ =abs((y_ - y).sum())print('\tLoss = '+str(loss_))print('The result of the fitting is:', end=',')print(sum(w.tolist(),[]))print()if loss_ < self.eps:print('The Gradient Descent algorithm has converged!!\n')break
pass
def __call__(self, bs=1, lr=0.1, epoch=10):if sys.version_info <(3,4):
raise RuntimeError('At least Python 3.4 is required')if not isinstance(bs, int) or not isinstance(epoch, int):
raise Exception("The type of BatchSize/Epoch should be an integer number")
self._gradient_descent(bs, lr, epoch)
pass
pass
if __name__ =="__main__":if sys.version_info <(3,4):
raise RuntimeError('At least Python 3.4 is required')
gd =GradientDescent([1.2,1.4,2.1,4.5,2.1])
# gd =GradientDescent([1.2,1.4,2.1])print("The parameter result to be fitted is: ")print(gd.FuncArgs)print("===================\n\n")
# gd.EPS =0.0
gd.plotFunc()gd(10,0.01)print("Finished!")
The above is the whole content of this article, I hope it will be helpful to everyone's study.
Recommended Posts