邏輯回歸模型構建及訓練流程
關于邏輯回歸的數據,有很多學習?的?例樣本。這?我們使?scikit learn提供的數據集?成函數來創建
具體參數可參照官網
Scikit-learn 是? Python 開發的開源機器學習庫,?泛?于數據挖掘和數據分析。
- 特點:易?、?效,具有豐富的?具,涵蓋分類、回歸、聚類等多種機器學習算法。
- 功能:提供數據預處理、模型選擇、評估等功能,便于構建完整的機器學習?作流。
- 優勢:有詳細?檔和?例,社區活躍,能降低開發成本、提?效率。
#生成分類數據
from sklearn.datasets import make_classification #生成的是numpy.ndarray數組(隨機生成)
from sklearn.model_selection import train_test_split #劃分數據集
import numpy as np
demo_X,demo_y = make_classification() #默認生成100個樣本,20個特征,2個類別
print(demo_X) #特征
print(demo_y) #標簽
print(demo_X.shape)
print(demo_y.shape)
[[ 0.96837399 0.69991556 -0.80719258 ... 1.07349589 0.600931011.25834368][ 1.54064333 -0.72874526 0.05101656 ... 1.69469224 0.68078434-0.22108232][ 1.3130273 0.13845124 -0.17878829 ... -2.51988675 0.73565307-0.61197128]...[-0.96507974 0.62850721 0.25545924 ... -1.03533221 -0.02341121.86283345][-1.09606837 -0.92451774 -0.59875319 ... -0.19421878 0.62418285-0.26886614][-0.18375534 0.12046227 0.52649374 ... 0.93921941 0.896507111.14815417]]
[0 0 1 1 1 0 1 1 0 1 1 1 1 1 0 0 0 1 1 1 1 1 0 0 1 1 1 1 1 0 0 0 1 0 0 1 00 0 1 0 1 0 1 1 0 0 1 0 1 1 0 0 0 1 0 0 1 1 0 1 0 1 0 0 1 0 1 1 1 0 0 0 00 0 0 1 1 1 1 0 0 1 1 0 1 0 0 1 1 1 0 0 1 0 0 1 1 0]
(100, 20)
(100,)
查看生成結果發現,這是一個元組,里面包含兩組值,默認生成100個樣本,20個特征,2個類別
demo_X[1]
array([ 1.54064333, -0.72874526, 0.05101656, 2.66218782, 1.94089634,-0.10555552, 0.12877297, -0.47275342, -0.23722334, -0.24897953,0.29021104, -1.03756101, -0.6875083 , -1.57963226, 1.81221622,-0.04901801, -0.91022508, 1.69469224, 0.68078434, -0.22108232])
demo_y[1]
0
步驟
- 數據準備,參數初始化
- 模型計算函數
- 計算損失函數
- 計算梯度
- 模型訓練
模型訓練的數據,通常還需要拆分為訓練集和測試集。?的是為了防?數據泄露
**數據泄露(Data Leakage)**指的是訓練數據中包含了不應該有的信息,這些信息會使模型在評估時表現得?在真實應?場景中更好,導致對模型性能的?估,使得模型的泛化能?被錯誤判斷。
#1.數據準備,參數初始化
#生成150個樣本,10個特征,2個類別的訓練數據
X,y = make_classification(n_samples=150,n_features=10) #shape(150,10)#數據拆分
#局部樣本訓練模型(過擬合模型)測試預測不好(泛化能力差)
#全局樣本訓練模型(泛化能力好)測試預測好(泛化能力好)
#新樣本數據模型表現不好(泛化能力差)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3) #30%作為測試集
#X_train, X_test表示一部分用作訓練,一部分用作測試,避免過擬合
print(X_train.shape)
print(X_test.shape)
(105, 10)
(45, 10)
#權重參數
theta = np.random.randn(1,10) #10個特征 1個權重 #shape(1,10) #隨機生成一個權重參數
#偏置參數
bias = 0
#超參數
lr = 0.01 #學習率
epochs = 3000 #迭代次數,訓練次數
#假設 X 是shape(3,3)
#[[x1,x2,x3],
# [x4x5,x6],
# [x7,x8,x9]]
#X.T 是shape(3,3)
#[[x1,x4,x7],
# [x2,x5,x8],
# [x3,x6,x9]]
#
#假設theta 模型參數shape(1,3)
#[[w1,w2,w3]]
#theta * X.T = shape(1,3) * shape(3,3) = shape(1,3)
#y1 = w1*x1 + w2*x2 + w3*x3
#y2 = w1*x4 + w2*x5 + w3*x6
#y3 = w1*x7 + w2*x8 + w3*x9
#y = [[y1,y2,y3]]
##2. 模型計算函數
def forward(X,theta,bias):#線性運算z = np.dot(theta,X.T) + bias #shape(105,10)#sigmod 是為了將線性運算的結果映射到0-1之間y_hat = 1/(1+np.exp(-z)) #shape(105,10)return y_hat #3. 計算損失函數
def loss(y,y_hat):#損失函數e = 1e-8 #防止除0錯誤return - y*np.log(y_hat +e ) - (1-y)*np.log(1-y_hat + e)#4. 計算梯度
def calc_gradient(x,y,y_hat):m = x.shape[-1] #樣本數delta_theta = np.dot(y_hat-y,x)/m #shape(1,10) #梯度delta_bias = np.mean(y_hat-y) #shape(1,) #偏置的梯度return delta_theta,delta_bias#5. 模型訓練
for i in range(epochs): #epochs是訓練次數#前向傳播y_hat = forward(X_train,theta,bias)#計算損失loss_value = loss(y_train,y_hat)#計算梯度delta_theta,delta_bias = calc_gradient(X_train,y_train,y_hat)#更新參數theta = theta - lr * delta_thetabias = bias - lr * delta_biasif i % 100 == 0:print(f"epoch:{i},loss:{np.mean(loss_value)}")#計算準確率acc = np.mean(np.round(y_hat) == y_train) #[Fales,True,True,False,True] --> [0,1,1,0,1] --> 0.6 print(f"epoch:{i},loss:{np.mean(loss_value)},acc:{acc}")
epoch:0,loss:0.9826484121456485
epoch:0,loss:0.9826484121456485,acc:0.6
epoch:100,loss:0.28410629245685803
epoch:100,loss:0.28410629245685803,acc:0.8857142857142857
epoch:200,loss:0.24510667568678654
epoch:200,loss:0.24510667568678654,acc:0.8666666666666667
epoch:300,loss:0.23505007869724906
epoch:300,loss:0.23505007869724906,acc:0.8571428571428571
epoch:400,loss:0.23103972248220034
epoch:400,loss:0.23103972248220034,acc:0.8666666666666667
epoch:500,loss:0.22908011250548505
epoch:500,loss:0.22908011250548505,acc:0.8761904761904762
epoch:600,loss:0.2280154352157034
epoch:600,loss:0.2280154352157034,acc:0.8761904761904762
epoch:700,loss:0.22739935077291418
epoch:700,loss:0.22739935077291418,acc:0.8761904761904762
epoch:800,loss:0.2270275272465418
epoch:800,loss:0.2270275272465418,acc:0.8761904761904762
epoch:900,loss:0.22679615751708468
epoch:900,loss:0.22679615751708468,acc:0.8761904761904762
epoch:1000,loss:0.2266487526019183
epoch:1000,loss:0.2266487526019183,acc:0.8761904761904762
epoch:1100,loss:0.22655303465776838
epoch:1100,loss:0.22655303465776838,acc:0.8761904761904762
epoch:1200,loss:0.22648987077080793
epoch:1200,loss:0.22648987077080793,acc:0.8761904761904762
epoch:1300,loss:0.22644759054763966
epoch:1300,loss:0.22644759054763966,acc:0.8761904761904762
epoch:1400,loss:0.2264189111347839
epoch:1400,loss:0.2264189111347839,acc:0.8666666666666667
epoch:1500,loss:0.22639920302759006
epoch:1500,loss:0.22639920302759006,acc:0.8666666666666667
epoch:1600,loss:0.22638547862134517
epoch:1600,loss:0.22638547862134517,acc:0.8666666666666667
epoch:1700,loss:0.22637578566413857
epoch:1700,loss:0.22637578566413857,acc:0.8666666666666667
epoch:1800,loss:0.22636883499686028
epoch:1800,loss:0.22636883499686028,acc:0.8666666666666667
epoch:1900,loss:0.2263637676238141
epoch:1900,loss:0.2263637676238141,acc:0.8666666666666667
epoch:2000,loss:0.2263600065967938
epoch:2000,loss:0.2263600065967938,acc:0.8666666666666667
epoch:2100,loss:0.22635716155827576
epoch:2100,loss:0.22635716155827576,acc:0.8666666666666667
epoch:2200,loss:0.2263549665310887
epoch:2200,loss:0.2263549665310887,acc:0.8666666666666667
epoch:2300,loss:0.2263532389959844
epoch:2300,loss:0.2263532389959844,acc:0.8666666666666667
epoch:2400,loss:0.2263518527619451
epoch:2400,loss:0.2263518527619451,acc:0.8666666666666667
epoch:2500,loss:0.22635071986180172
epoch:2500,loss:0.22635071986180172,acc:0.8666666666666667
epoch:2600,loss:0.22634977840264098
epoch:2600,loss:0.22634977840264098,acc:0.8666666666666667
epoch:2700,loss:0.22634898437242262
epoch:2700,loss:0.22634898437242262,acc:0.8666666666666667
epoch:2800,loss:0.2263483060903649
epoch:2800,loss:0.2263483060903649,acc:0.8666666666666667
epoch:2900,loss:0.2263477204327887
epoch:2900,loss:0.2263477204327887,acc:0.8666666666666667
#模型推理
idx = np.random.randint(len(X_test)) #隨機生成一個索引作為測試樣本
x = X_test[idx] #shape(10,)
y = y_test[idx] #shape(1,)
#模型預測
predict = np.round(forward(x,theta,bias))
print(f"y:{y},predict:{predict}")
y:1,predict:[1.]