5.26打卡
@浙大疏锦行
DAY 35 模型可视化与推理
外出几天,所以提前更新几日作业,对应5.24作业
知识点回顾:
1. 三种不同的模型可视化方法:推荐torchinfo打印summary+权重分布可视化
2. 进度条功能:手动和自动写法,让打印结果更加美观
3. 推理的写法:评估模式
作业:调整模型定义时的超参数,对比下效果。
使用设备: cuda:0
原始模型训练进度: 100%|██████████| 20000/20000 [00:20<00:00, 977.14epoch/s, Loss=0.0627]
原始模型测试集准确率: 96.67%
调整后模型训练进度: 100%|██████████| 30000/30000 [00:29<00:00, 1033.96epoch/s, Loss=0.2562]
调整后模型测试集准确率: 96.67% 准确率提升: 0.00%
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as plt
# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")
# 加载鸢尾花数据集
iris = load_iris()
X = iris.data # 特征数据
y = iris.target # 标签数据
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# 归一化数据
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# 将数据转换为PyTorch张量并移至GPU
X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test).to(device)
# ... existing code ...# 原始模型
class MLP(nn.Module):def __init__(self):super(MLP, self).__init__()self.fc1 = nn.Linear(4, 10) # 输入层到隐藏层self.relu = nn.ReLU()self.fc2 = nn.Linear(10, 3) # 隐藏层到输出层def forward(self, x):out = self.fc1(x)out = self.relu(out)out = self.fc2(out)return out# 实例化原始模型并移至GPU
original_model = MLP().to(device)
# 分类问题使用交叉熵损失函数
original_criterion = nn.CrossEntropyLoss()
# 使用随机梯度下降优化器
original_optimizer = optim.SGD(original_model.parameters(), lr=0.01)# 训练原始模型
num_epochs = 20000
original_losses = []
with tqdm(total=num_epochs, desc="原始模型训练进度", unit="epoch") as pbar:for epoch in range(num_epochs):# 前向传播outputs = original_model(X_train) # 隐式调用forward函数loss = original_criterion(outputs, y_train)# 反向传播和优化original_optimizer.zero_grad()loss.backward()original_optimizer.step()# 记录损失值并更新进度条if (epoch + 1) % 200 == 0:original_losses.append(loss.item())# 更新进度条的描述信息pbar.set_postfix({'Loss': f'{loss.item():.4f}'})# 每1000个epoch更新一次进度条if (epoch + 1) % 1000 == 0:pbar.update(1000) # 更新进度条# 确保进度条达到100%if pbar.n < num_epochs:pbar.update(num_epochs - pbar.n) # 计算剩余的进度并更新# 评估原始模型
original_model.eval()
with torch.no_grad():outputs = original_model(X_test)_, predicted = torch.max(outputs, 1)correct = (predicted == y_test).sum().item()original_accuracy = correct / y_test.size(0)print(f'原始模型测试集准确率: {original_accuracy * 100:.2f}%')# 调整超参数后的模型
class ModifiedMLP(nn.Module):def __init__(self):super(ModifiedMLP, self).__init__()self.fc1 = nn.Linear(4, 20) # 输入层到隐藏层,神经元数量调整为20self.relu = nn.ReLU()self.fc2 = nn.Linear(20, 3) # 隐藏层到输出层def forward(self, x):out = self.fc1(x)out = self.relu(out)out = self.fc2(out)return out# 实例化调整后的模型并移至GPU
modified_model = ModifiedMLP().to(device)
# 分类问题使用交叉熵损失函数
modified_criterion = nn.CrossEntropyLoss()
# 使用随机梯度下降优化器,学习率调整为0.001
modified_optimizer = optim.SGD(modified_model.parameters(), lr=0.001)# 训练调整后的模型
num_epochs = 30000
modified_losses = []
with tqdm(total=num_epochs, desc="调整后模型训练进度", unit="epoch") as pbar:for epoch in range(num_epochs):# 前向传播outputs = modified_model(X_train) # 隐式调用forward函数loss = modified_criterion(outputs, y_train)# 反向传播和优化modified_optimizer.zero_grad()loss.backward()modified_optimizer.step()# 记录损失值并更新进度条if (epoch + 1) % 200 == 0:modified_losses.append(loss.item())# 更新进度条的描述信息pbar.set_postfix({'Loss': f'{loss.item():.4f}'})# 每1000个epoch更新一次进度条if (epoch + 1) % 1000 == 0:pbar.update(1000) # 更新进度条# 确保进度条达到100%if pbar.n < num_epochs:pbar.update(num_epochs - pbar.n) # 计算剩余的进度并更新# 评估调整后的模型
modified_model.eval()
with torch.no_grad():outputs = modified_model(X_test)_, predicted = torch.max(outputs, 1)correct = (predicted == y_test).sum().item()modified_accuracy = correct / y_test.size(0)print(f'调整后模型测试集准确率: {modified_accuracy * 100:.2f}%')# 对比两个模型的准确率
print(f'准确率提升: {(modified_accuracy - original_accuracy) * 100:.2f}%')# 可视化两个模型的损失曲线
plt.plot(range(len(original_losses)), original_losses, label='Original Model')
plt.plot(range(len(modified_losses)), modified_losses, label='Modified Model')
plt.xlabel('Epoch (per 200)')
plt.ylabel('Loss')
plt.title('Training Loss Comparison')
plt.legend()
plt.show()