当前位置: 首页 > news >正文

python打卡训练营打卡记录day37

知识点回顾:
  1. 过拟合的判断:测试集和训练集同步打印指标
  2. 模型的保存和加载
    1. 仅保存权重
    2. 保存权重和模型
    3. 保存全部信息checkpoint,还包含训练状态
  3. 早停策略

作业:对信贷数据集训练后保存权重,加载权重后继续训练50轮,并采取早停策略

import pandas as pd    #用于数据处理和分析,可处理表格数据。
import numpy as np     #用于数值计算,提供了高效的数组操作。
import matplotlib.pyplot as plt    #用于绘制各种类型的图表
import seaborn as sns   #基于matplotlib的高级绘图库,能绘制更美观的统计图形。
import warnings
warnings.filterwarnings("ignore")import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
from tqdm import tqdm  # 导入tqdm库用于进度条显示
import os
from sklearn.metrics import accuracy_score# 设置中文字体(解决中文显示问题)
plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows系统常用黑体字体
plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号
data = pd.read_csv('data.csv')    #读取数据# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")# 先筛选字符串变量 
discrete_features = data.select_dtypes(include=['object']).columns.tolist()
# Home Ownership 标签编码
home_ownership_mapping = {'Own Home': 1,'Rent': 2,'Have Mortgage': 3,'Home Mortgage': 4
}
data['Home Ownership'] = data['Home Ownership'].map(home_ownership_mapping)# Years in current job 标签编码
years_in_job_mapping = {'< 1 year': 1,'1 year': 2,'2 years': 3,'3 years': 4,'4 years': 5,'5 years': 6,'6 years': 7,'7 years': 8,'8 years': 9,'9 years': 10,'10+ years': 11
}
data['Years in current job'] = data['Years in current job'].map(years_in_job_mapping)# Purpose 独热编码,记得需要将bool类型转换为数值
data = pd.get_dummies(data, columns=['Purpose'])
data2 = pd.read_csv("data.csv") # 重新读取数据,用来做列名对比
list_final = [] # 新建一个空列表,用于存放独热编码后新增的特征名
for i in data.columns:if i not in data2.columns:list_final.append(i) # 这里打印出来的就是独热编码后的特征名
for i in list_final:data[i] = data[i].astype(int) # 这里的i就是独热编码后的特征名# Term 0 - 1 映射
term_mapping = {'Short Term': 0,'Long Term': 1
}
data['Term'] = data['Term'].map(term_mapping)
data.rename(columns={'Term': 'Long Term'}, inplace=True) # 重命名列
continuous_features = data.select_dtypes(include=['int64', 'float64']).columns.tolist()  #把筛选出来的列名转换成列表# 连续特征用中位数补全
for feature in continuous_features:     mode_value = data[feature].mode()[0]            #获取该列的众数。data[feature].fillna(mode_value, inplace=True)          #用众数填充该列的缺失值,inplace=True表示直接在原数据上修改。# 最开始也说了 很多调参函数自带交叉验证,甚至是必选的参数,你如果想要不交叉反而实现起来会麻烦很多
# 所以这里我们还是只划分一次数据集
from sklearn.model_selection import train_test_split
X = data.drop(['Credit Default'], axis=1)  # 特征,axis=1表示按列删除
y = data['Credit Default'] # 标签
# 按照8:2划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)  # 80%训练集,20%测试集
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)# 归一化数据
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train.values).to(device) 
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test.values).to(device) # 模型定义
class MLP(nn.Module):def __init__(self, input_size):super(MLP, self).__init__()self.fc1 = nn.Linear(input_size, 64)self.relu = nn.ReLU()self.fc2 = nn.Linear(64, 2)def forward(self, x):x = self.relu(self.fc1(x))x = self.fc2(x)return x# -------------------- 初始训练阶段(20000轮) --------------------
def initial_training():# 实例化模型并移至GPUmodel = MLP(input_size=X_train.shape[1]).to(device)criterion = nn.CrossEntropyLoss()optimizer = optim.SGD(model.parameters(), lr=0.01)# 初始化记录列表train_losses = []test_losses = []test_accuracies = []eval_epochs = []# 训练参数num_epochs = 20000best_test_loss = float('inf')patience = 50counter = 0# 训练循环with tqdm(total=num_epochs, desc="初始训练进度") as pbar:for epoch in range(num_epochs):# 训练步骤model.train()outputs = model(X_train)train_loss = criterion(outputs, y_train)# 反向传播optimizer.zero_grad()train_loss.backward()optimizer.step()# 每200轮评估测试损失if (epoch + 1) % 200 == 0:model.eval()with torch.no_grad():test_outputs = model(X_test)test_loss = criterion(test_outputs, y_test)# 准确率计算_, predicted = torch.max(test_outputs, 1)accuracy = accuracy_score(y_test.cpu(), predicted.cpu())model.train()# 打印准确率print(f"Epoch [{epoch+1}/{num_epochs}], Test Accuracy: {accuracy:.4f}")# 记录指标train_losses.append(train_loss.item())test_losses.append(test_loss.item())test_accuracies.append(accuracy)eval_epochs.append(epoch + 1)# 保存最佳检查点if test_loss < best_test_loss:best_test_loss = test_loss.item()torch.save({'model_state_dict': model.state_dict(),'optimizer_state_dict': optimizer.state_dict(),'best_test_loss': best_test_loss,'epoch': epoch}, 'best_model.pth')counter = 0else:counter += 1if counter >= patience:print(f"\n初始训练早停于第{epoch+1}轮")break# 更新进度条pbar.set_postfix({'Train Loss': f'{train_loss.item():.4f}', 'Test Loss': f'{test_loss.item():.4f}'})# 更新进度if (epoch + 1) % 1000 == 0:pbar.update(1000)print("初始训练完成,最佳模型已保存")plot_metrics(train_losses, test_losses, test_accuracies, eval_epochs, title_prefix="Initial Training")# -------------------- 绘图函数 --------------------
def plot_metrics(train_losses, test_losses, test_accuracies, eval_epochs, title_prefix=""):plt.figure(figsize=(12, 5))# Loss 曲线plt.subplot(1, 2, 1)plt.plot(eval_epochs, train_losses, label='Train Loss')plt.plot(eval_epochs, test_losses, label='Test Loss')plt.xlabel('Epoch')plt.ylabel('Loss')plt.title(f'{title_prefix} Loss Curve')plt.legend()# Accuracy 曲线plt.subplot(1, 2, 2)plt.plot(eval_epochs, test_accuracies, label='Test Accuracy')plt.xlabel('Epoch')plt.ylabel('Accuracy')plt.title(f'{title_prefix} Accuracy Curve')plt.legend()plt.tight_layout()plt.show()# -------------------- 继续训练阶段(50轮) --------------------  
def continue_training():# 重新实例化模型model = MLP(input_size=X_train.shape[1]).to(device)criterion = nn.CrossEntropyLoss()optimizer = optim.SGD(model.parameters(), lr=0.01)# 初始化记录列表train_losses = []test_losses = []test_accuracies = []eval_epochs = []# 加载检查点if not os.path.exists('best_model.pth'):raise FileNotFoundError("找不到检查点文件,请先运行初始训练")checkpoint = torch.load('best_model.pth')try:model.load_state_dict(checkpoint['model_state_dict'])optimizer.load_state_dict(checkpoint['optimizer_state_dict'])except KeyError:# 兼容旧版检查点model.load_state_dict(torch.load('best_model.pth'))# 重置训练参数num_epochs = 50best_test_loss = float('inf')  # 独立重置patience = 10counter = 0# 继续训练循环with tqdm(total=num_epochs, desc="继续训练进度") as pbar:for epoch in range(num_epochs):# 训练步骤model.train()outputs = model(X_train)train_loss = criterion(outputs, y_train)# 反向传播optimizer.zero_grad()train_loss.backward()optimizer.step()# 评估测试损失和准确率model.eval()with torch.no_grad():test_outputs = model(X_test)test_loss = criterion(test_outputs, y_test)_, predicted = torch.max(test_outputs, 1)accuracy = accuracy_score(y_test.cpu(), predicted.cpu())print(f"Epoch [{epoch+1}/{num_epochs}], Test Accuracy: {accuracy:.4f}")# 记录train_losses.append(train_loss.item())test_losses.append(test_loss.item())test_accuracies.append(accuracy)eval_epochs.append(epoch + 1)model.train()# 早停判断(仅用新参数)if test_loss < best_test_loss:best_test_loss = test_loss.item()counter = 0else:counter += 1if counter >= patience:print(f"\n继续训练早停于第{epoch+1}轮")break# 更新进度pbar.set_postfix({'Train Loss': f'{train_loss.item():.4f}','Test Loss': f'{test_loss.item():.4f}','Test Acc': f'{accuracy:.4f}' })pbar.update(1)print("继续训练完成")plot_metrics(train_losses, test_losses, test_accuracies, eval_epochs, title_prefix="Continue Training")if __name__ == "__main__":initial_training()
初始训练进度:   0%|          | 0/20000 [00:00<?, ?it/s, Train Loss=0.5753, Test Loss=0.5885]
Epoch [200/20000], Test Accuracy: 0.7060
Epoch [400/20000], Test Accuracy: 0.7060
初始训练进度:   0%|          | 0/20000 [00:00<?, ?it/s, Train Loss=0.5611, Test Loss=0.5740]
Epoch [600/20000], Test Accuracy: 0.7060
Epoch [800/20000], Test Accuracy: 0.7060
初始训练进度:   5%|▌         | 1000/20000 [00:01<00:16, 1168.82it/s, Train Loss=0.5458, Test Loss=0.5581]
Epoch [1000/20000], Test Accuracy: 0.7060
Epoch [1200/20000], Test Accuracy: 0.7060
初始训练进度:   5%|▌         | 1000/20000 [00:01<00:16, 1168.82it/s, Train Loss=0.5312, Test Loss=0.5427]
Epoch [1400/20000], Test Accuracy: 0.7140
Epoch [1600/20000], Test Accuracy: 0.7207
初始训练进度:  10%|█         | 2000/20000 [00:01<00:13, 1303.08it/s, Train Loss=0.5185, Test Loss=0.5291]
Epoch [1800/20000], Test Accuracy: 0.7267
Epoch [2000/20000], Test Accuracy: 0.7407
初始训练进度:  10%|█         | 2000/20000 [00:01<00:13, 1303.08it/s, Train Loss=0.5080, Test Loss=0.5179]
Epoch [2200/20000], Test Accuracy: 0.7473
Epoch [2400/20000], Test Accuracy: 0.7593
初始训练进度:  10%|█         | 2000/20000 [00:02<00:13, 1303.08it/s, Train Loss=0.4996, Test Loss=0.5090]
Epoch [2600/20000], Test Accuracy: 0.7640
Epoch [2800/20000], Test Accuracy: 0.7640
初始训练进度:  15%|█▌        | 3000/20000 [00:02<00:12, 1378.59it/s, Train Loss=0.4930, Test Loss=0.5021]
Epoch [3000/20000], Test Accuracy: 0.7653
Epoch [3200/20000], Test Accuracy: 0.7647
初始训练进度:  15%|█▌        | 3000/20000 [00:02<00:12, 1378.59it/s, Train Loss=0.4879, Test Loss=0.4968]
Epoch [3400/20000], Test Accuracy: 0.7653
Epoch [3600/20000], Test Accuracy: 0.7660
初始训练进度:  20%|██        | 4000/20000 [00:02<00:11, 1411.22it/s, Train Loss=0.4840, Test Loss=0.4927]
Epoch [3800/20000], Test Accuracy: 0.7667
Epoch [4000/20000], Test Accuracy: 0.7667
初始训练进度:  20%|██        | 4000/20000 [00:03<00:11, 1411.22it/s, Train Loss=0.4810, Test Loss=0.4895]
Epoch [4200/20000], Test Accuracy: 0.7667
Epoch [4400/20000], Test Accuracy: 0.7667
初始训练进度:  20%|██        | 4000/20000 [00:03<00:11, 1411.22it/s, Train Loss=0.4787, Test Loss=0.4871]
Epoch [4600/20000], Test Accuracy: 0.7667
Epoch [4800/20000], Test Accuracy: 0.7667
初始训练进度:  25%|██▌       | 5000/20000 [00:03<00:10, 1454.26it/s, Train Loss=0.4768, Test Loss=0.4852]
Epoch [5000/20000], Test Accuracy: 0.7673
Epoch [5200/20000], Test Accuracy: 0.7673
初始训练进度:  25%|██▌       | 5000/20000 [00:03<00:10, 1454.26it/s, Train Loss=0.4753, Test Loss=0.4836]
Epoch [5400/20000], Test Accuracy: 0.7673
Epoch [5600/20000], Test Accuracy: 0.7673
初始训练进度:  30%|███       | 6000/20000 [00:04<00:09, 1504.37it/s, Train Loss=0.4740, Test Loss=0.4822]
Epoch [5800/20000], Test Accuracy: 0.7673
Epoch [6000/20000], Test Accuracy: 0.7673
初始训练进度:  30%|███       | 6000/20000 [00:04<00:09, 1504.37it/s, Train Loss=0.4730, Test Loss=0.4811]
Epoch [6200/20000], Test Accuracy: 0.7673
Epoch [6400/20000], Test Accuracy: 0.7673
初始训练进度:  30%|███       | 6000/20000 [00:04<00:09, 1504.37it/s, Train Loss=0.4720, Test Loss=0.4802]
Epoch [6600/20000], Test Accuracy: 0.7673
Epoch [6800/20000], Test Accuracy: 0.7660
初始训练进度:  35%|███▌      | 7000/20000 [00:05<00:08, 1451.45it/s, Train Loss=0.4712, Test Loss=0.4794]
Epoch [7000/20000], Test Accuracy: 0.7660
Epoch [7200/20000], Test Accuracy: 0.7660
初始训练进度:  35%|███▌      | 7000/20000 [00:05<00:08, 1451.45it/s, Train Loss=0.4705, Test Loss=0.4787]
Epoch [7400/20000], Test Accuracy: 0.7660
Epoch [7600/20000], Test Accuracy: 0.7660
初始训练进度:  40%|████      | 8000/20000 [00:05<00:08, 1448.11it/s, Train Loss=0.4699, Test Loss=0.4780]
Epoch [7800/20000], Test Accuracy: 0.7660
Epoch [8000/20000], Test Accuracy: 0.7660
初始训练进度:  40%|████      | 8000/20000 [00:05<00:08, 1448.11it/s, Train Loss=0.4693, Test Loss=0.4775]
Epoch [8200/20000], Test Accuracy: 0.7660
Epoch [8400/20000], Test Accuracy: 0.7660
初始训练进度:  40%|████      | 8000/20000 [00:06<00:08, 1448.11it/s, Train Loss=0.4688, Test Loss=0.4769]
Epoch [8600/20000], Test Accuracy: 0.7660
Epoch [8800/20000], Test Accuracy: 0.7667
初始训练进度:  45%|████▌     | 9000/20000 [00:06<00:07, 1468.30it/s, Train Loss=0.4683, Test Loss=0.4765]
Epoch [9000/20000], Test Accuracy: 0.7667
Epoch [9200/20000], Test Accuracy: 0.7667
初始训练进度:  45%|████▌     | 9000/20000 [00:06<00:07, 1468.30it/s, Train Loss=0.4679, Test Loss=0.4761]
Epoch [9400/20000], Test Accuracy: 0.7667
Epoch [9600/20000], Test Accuracy: 0.7667
初始训练进度:  50%|█████     | 10000/20000 [00:06<00:06, 1505.67it/s, Train Loss=0.4675, Test Loss=0.4757]
Epoch [9800/20000], Test Accuracy: 0.7660
Epoch [10000/20000], Test Accuracy: 0.7667
初始训练进度:  50%|█████     | 10000/20000 [00:07<00:06, 1505.67it/s, Train Loss=0.4671, Test Loss=0.4753]
Epoch [10200/20000], Test Accuracy: 0.7667
Epoch [10400/20000], Test Accuracy: 0.7667
初始训练进度:  50%|█████     | 10000/20000 [00:07<00:06, 1505.67it/s, Train Loss=0.4667, Test Loss=0.4750]
Epoch [10600/20000], Test Accuracy: 0.7673
Epoch [10800/20000], Test Accuracy: 0.7680
初始训练进度:  55%|█████▌    | 11000/20000 [00:07<00:05, 1539.84it/s, Train Loss=0.4664, Test Loss=0.4747]
Epoch [11000/20000], Test Accuracy: 0.7680
Epoch [11200/20000], Test Accuracy: 0.7680
初始训练进度:  55%|█████▌    | 11000/20000 [00:07<00:05, 1539.84it/s, Train Loss=0.4661, Test Loss=0.4744]
Epoch [11400/20000], Test Accuracy: 0.7680
Epoch [11600/20000], Test Accuracy: 0.7680
初始训练进度:  60%|██████    | 12000/20000 [00:08<00:05, 1584.28it/s, Train Loss=0.4658, Test Loss=0.4742]
Epoch [11800/20000], Test Accuracy: 0.7687
Epoch [12000/20000], Test Accuracy: 0.7680
初始训练进度:  60%|██████    | 12000/20000 [00:08<00:05, 1584.28it/s, Train Loss=0.4655, Test Loss=0.4739]
Epoch [12200/20000], Test Accuracy: 0.7680
Epoch [12400/20000], Test Accuracy: 0.7680
初始训练进度:  60%|██████    | 12000/20000 [00:08<00:05, 1584.28it/s, Train Loss=0.4652, Test Loss=0.4737]
Epoch [12600/20000], Test Accuracy: 0.7680
Epoch [12800/20000], Test Accuracy: 0.7687
初始训练进度:  65%|██████▌   | 13000/20000 [00:08<00:04, 1618.84it/s, Train Loss=0.4650, Test Loss=0.4735]
Epoch [13000/20000], Test Accuracy: 0.7687
Epoch [13200/20000], Test Accuracy: 0.7687
初始训练进度:  65%|██████▌   | 13000/20000 [00:09<00:04, 1618.84it/s, Train Loss=0.4647, Test Loss=0.4733]
Epoch [13400/20000], Test Accuracy: 0.7687
Epoch [13600/20000], Test Accuracy: 0.7687
初始训练进度:  70%|███████   | 14000/20000 [00:09<00:03, 1635.55it/s, Train Loss=0.4645, Test Loss=0.4731]
Epoch [13800/20000], Test Accuracy: 0.7687
Epoch [14000/20000], Test Accuracy: 0.7693
初始训练进度:  70%|███████   | 14000/20000 [00:09<00:03, 1635.55it/s, Train Loss=0.4643, Test Loss=0.4730]
Epoch [14200/20000], Test Accuracy: 0.7687
Epoch [14400/20000], Test Accuracy: 0.7687
初始训练进度:  70%|███████   | 14000/20000 [00:09<00:03, 1635.55it/s, Train Loss=0.4641, Test Loss=0.4728]
Epoch [14600/20000], Test Accuracy: 0.7687
Epoch [14800/20000], Test Accuracy: 0.7687
初始训练进度:  75%|███████▌  | 15000/20000 [00:10<00:03, 1641.97it/s, Train Loss=0.4639, Test Loss=0.4727]
Epoch [15000/20000], Test Accuracy: 0.7687
Epoch [15200/20000], Test Accuracy: 0.7687
初始训练进度:  75%|███████▌  | 15000/20000 [00:10<00:03, 1641.97it/s, Train Loss=0.4637, Test Loss=0.4725]
Epoch [15400/20000], Test Accuracy: 0.7700
Epoch [15600/20000], Test Accuracy: 0.7707
初始训练进度:  80%|████████  | 16000/20000 [00:10<00:02, 1630.00it/s, Train Loss=0.4635, Test Loss=0.4724]
Epoch [15800/20000], Test Accuracy: 0.7707
Epoch [16000/20000], Test Accuracy: 0.7707
初始训练进度:  80%|████████  | 16000/20000 [00:10<00:02, 1630.00it/s, Train Loss=0.4633, Test Loss=0.4723]
Epoch [16200/20000], Test Accuracy: 0.7707
Epoch [16400/20000], Test Accuracy: 0.7707
初始训练进度:  80%|████████  | 16000/20000 [00:11<00:02, 1630.00it/s, Train Loss=0.4631, Test Loss=0.4722]
Epoch [16600/20000], Test Accuracy: 0.7707
Epoch [16800/20000], Test Accuracy: 0.7707
初始训练进度:  85%|████████▌ | 17000/20000 [00:11<00:01, 1629.38it/s, Train Loss=0.4629, Test Loss=0.4721]
Epoch [17000/20000], Test Accuracy: 0.7707
Epoch [17200/20000], Test Accuracy: 0.7707
初始训练进度:  85%|████████▌ | 17000/20000 [00:11<00:01, 1629.38it/s, Train Loss=0.4628, Test Loss=0.4720]
Epoch [17400/20000], Test Accuracy: 0.7707
Epoch [17600/20000], Test Accuracy: 0.7707
初始训练进度:  90%|█████████ | 18000/20000 [00:11<00:01, 1594.18it/s, Train Loss=0.4626, Test Loss=0.4719]
Epoch [17800/20000], Test Accuracy: 0.7707
Epoch [18000/20000], Test Accuracy: 0.7700
初始训练进度:  90%|█████████ | 18000/20000 [00:12<00:01, 1594.18it/s, Train Loss=0.4624, Test Loss=0.4718]
Epoch [18200/20000], Test Accuracy: 0.7700
Epoch [18400/20000], Test Accuracy: 0.7700
初始训练进度:  90%|█████████ | 18000/20000 [00:12<00:01, 1594.18it/s, Train Loss=0.4623, Test Loss=0.4717]
Epoch [18600/20000], Test Accuracy: 0.7693
Epoch [18800/20000], Test Accuracy: 0.7693
初始训练进度:  95%|█████████▌| 19000/20000 [00:12<00:00, 1597.16it/s, Train Loss=0.4621, Test Loss=0.4717]
Epoch [19000/20000], Test Accuracy: 0.7693
Epoch [19200/20000], Test Accuracy: 0.7693
初始训练进度:  95%|█████████▌| 19000/20000 [00:12<00:00, 1597.16it/s, Train Loss=0.4620, Test Loss=0.4716]
Epoch [19400/20000], Test Accuracy: 0.7693
Epoch [19600/20000], Test Accuracy: 0.7693
初始训练进度: 100%|██████████| 20000/20000 [00:13<00:00, 1534.45it/s, Train Loss=0.4619, Test Loss=0.4715]
Epoch [19800/20000], Test Accuracy: 0.7693
Epoch [20000/20000], Test Accuracy: 0.7693
初始训练完成,最佳模型已保存

continue_training()
Epoch [29/50], Test Accuracy: 0.7693
Epoch [30/50], Test Accuracy: 0.7693
Epoch [31/50], Test Accuracy: 0.7693
Epoch [32/50], Test Accuracy: 0.7693
Epoch [33/50], Test Accuracy: 0.7693
Epoch [34/50], Test Accuracy: 0.7693
Epoch [35/50], Test Accuracy: 0.7693
Epoch [36/50], Test Accuracy: 0.7693
Epoch [37/50], Test Accuracy: 0.7693
Epoch [38/50], Test Accuracy: 0.7693
Epoch [39/50], Test Accuracy: 0.7693
Epoch [40/50], Test Accuracy: 0.7693
Epoch [41/50], Test Accuracy: 0.7693
Epoch [42/50], Test Accuracy: 0.7693
Epoch [43/50], Test Accuracy: 0.7693
Epoch [44/50], Test Accuracy: 0.7693
Epoch [45/50], Test Accuracy: 0.7693
Epoch [46/50], Test Accuracy: 0.7693
Epoch [47/50], Test Accuracy: 0.7693
Epoch [48/50], Test Accuracy: 0.7693
Epoch [49/50], Test Accuracy: 0.7693
Epoch [50/50], Test Accuracy: 0.7693
继续训练完成

总结

一、初始训练阶段分析(20000轮)

1. 损失曲线表现

  • 训练损失:从约0.60稳步下降至0.46,说明模型在训练集上持续优化。

  • 测试损失:从0.58下降至0.47,但下降幅度较小,且在训练后期趋于稳定。

  • 关键问题

    • 训练损失和测试损失的差距较小(0.46 vs 0.47),表明模型未明显过拟合,但学习能力可能不足

    • 测试损失未进一步下降,可能是模型复杂度过低或数据特征难以捕捉。

2. 准确率曲线表现

  • 测试准确率:最终稳定在 76.93%,未突破77%。

  • 关键问题

    • 准确率曲线在训练后期完全平坦,表明模型已收敛到局部最优,但性能瓶颈明显

    • 可能原因:模型结构简单(仅两层全连接)、特征工程不足或类别不均衡。


二、继续训练阶段分析(50轮)

1. 损失曲线表现

  • 训练/测试损失:完全无变化(保持0.4619和0.4715)。

  • 关键问题

    • 加载初始训练的最佳模型后,继续训练未带来任何优化,说明模型参数已完全收敛,优化器无法找到更优方向。

2. 准确率曲线表现

  • 测试准确率:全程保持在76.93%,与初始训练结果一致。

  • 关键问题

    • 继续训练未能提升性能,表明模型能力已达上限,或需要调整训练策略。

 

http://www.xdnf.cn/news/662923.html

相关文章:

  • 自驾总结Module(综述)
  • CN 第二章 应用层-判断题
  • uniapp-商城-70-shop(3-商品列表,点击规格,进行属性选择)
  • AI巡检系统适合多大面积的餐厅?
  • lc hot 100之:找到所有数组中消失的数字
  • SQL:合并查询(UNION)
  • DL00347-基于人工智能YOLOv11的安检X光危险品刀具检测含数据集
  • 报文完整性与数字签名
  • 【修电脑的小记录】打不开某个网站
  • Linux `ls` 命令深度解析与高阶应用指南
  • Mysql数据库之日志与备份
  • 论坛系统自动化测试实战
  • SpringAI--RAG知识库
  • Windows中安装Neo4j图数据库的配置
  • 数据架构:零售业数字化转型的“隐形引擎”
  • 什么是软件验收测试,出验收测试报告的软件检测机构推荐
  • MySQL问题:数据库有哪些存储引擎,它们有什么区别?
  • Jenkins部署
  • 小型电磁脉冲干扰(EMP)的原理及组成
  • L1-111 大幂数 - java
  • day37打卡
  • 二、网络安全常见编码及算法-(1)
  • 爱芯元智芯片推理cn-clip
  • 11.10 LangGraph状态管理实战:Reducers模式如何重塑企业级多节点协作?
  • 云化全场景+AI智算双擎驱动,打造高教数智化转型新范式,麒麟信安闪耀第63届高等教育博览会!
  • Linux基础IO----动态库与静态库
  • MQTT 在云平台与设备通讯中的连接特性与通讯性质深度解析
  • 网络原理与 TCP/IP 协议详解
  • AJAX-让数据活起来(一):入门
  • 深度PCB干货:如何画出做好一块电路PCB板