Reference link: Logic gate in Python
Python underlying logic algorithm: Regression: Regression is an important concept of statistics, and its original intention is to predict an accurate output value based on previous data. Logistic regression is the third algorithm in the course "Machine Learning". It is currently the most widely used learning algorithm for solving classification problems. Like the linear regression algorithm, it is also a supervised learning algorithm. Such as: news classification, gene sequence, market division, etc. are divided according to characteristics, using logistic regression. The final prediction results output are: positive class (1), negative class (0).
The logistic regression model is an "S" shaped function:
Cost function: cost function-sum of squares of errors-non-convex function-local minimum point. Gradient descent
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
train_data=np.loadtxt(r'LoR_linear.txt',delimiter=',')
test_data=np.loadtxt(r'LoR_nonlinear.txt',delimiter=',')
train_X,train_y=train_data[:,:-1],train_data[:,-1]
test_X,test_y=test_data[:,:-1],test_data[:,-1]
def preProess(X,y):
#Feature zoom
X -=np.mean(X,axis=0)
X /=np.std(X,axis=0,ddof=1)
X=np.c_[np.ones(len(X)),X]
y=np.c_[y]
return X,y
train_X,train_y=preProess(train_X,train_y)
test_X,test_y=preProess(test_X,test_y)
def g(x):
return 1/(1+np.exp(-x))
x=np.linspace(-10,10,500)
y=g(x)
plt.plot(x,y)
plt.show()
def model(X,theta):
z=np.dot(X,theta)
h=g(z)
return h
def costFunc(h,y):
m=len(y)
J=-(1.0/m)np.sum(ynp.log(h)+(1-y)*np.log(1-h))
return J
def gradDesc(X,y,max_iter=15000,alpha=0.1):
m,n=X.shape
theta=np.zeros((n,1))
J_history=np.zeros(max_iter)
for i in range(max_iter):
h=model(X,theta)
J_history[i]=costFunc(h,y)
deltaTheta = (1.0/m)*np.dot(X.T,h-y)
theta -= deltaTheta*alpha
return J_history,theta
def score(h,y):
m=len(y)
count=0
for i in range(m):
h[i]=np.where(h[i]>=0.5,1,0)
if h[i]==y[i]:
count+=1
return count/m
def predict(h):
y_pre=[1 if i>=0.5 else 0 for i in h]
return y_pre
print(train_X.shape,train_y.shape)
J_history,theta=gradDesc(train_X,train_y)
print(theta)
plt.title("cost function")
plt.plot(J_history)
plt.show()
train_h=model(train_X,theta)
test_h=model(test_X,theta)
print(train_h,test_h)
def showDivide(X,theta,y,title):
plt.title(title)
plt.scatter(X[y[:,0]==0,1],X[y[:,0]==0,2],label="negative sample")
plt.scatter(X[y[:,0]==1,1],X[y[:,0]==1,2],label="positive sample")
min_x1,max_x1=np.min(X),np.max(X)
min_x2,max_x2=-(theta[0]+theta[1]*min_x1)/theta[2],-(theta[0]+theta[1]*max_x1)/theta[2]
plt.plot([min_x1,max_x1],[min_x2,max_x2])
plt.legend()
plt.show()
showDivide(train_X,theta,train_y,'training set')
showDivide(test_X,theta,test_y,'test set set')
train_y1=predict(train_h)
print('The predicted result is:',train_y1)
Recommended Posts