Python 训练营打卡 Day 36
以信贷风险分析的数据集为例,利用神经网络进行训练
一、数据的预处理
# 导入相关库
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder, MinMaxScaler
import torch
import torch.nn as nn
import torch.optim as optim
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import time
# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# 读取数据
data = pd.read_csv('data.csv')
# 查看数据
data.head()
data.info()
# 数据预处理
# 删除无用列
data.drop(columns=['Id'], inplace=True)
# 分离连续特征与离散特征
continuous_features = data.select_dtypes(include=['float64', 'int64']).columns.tolist()
discrete_features = data.select_dtypes(exclude=['float64', 'int64']).columns.tolist()
# 查看缺失值
data.isnull().sum()
# 缺失值处理
# 对于连续特征,使用中位数填充
for feature in continuous_features:if data[feature].isnull().sum() > 0:data[feature].fillna(data[feature].median(), inplace=True)
# 对于离散特征,使用众数填充
for feature in discrete_features:if data[feature].isnull().sum() > 0:data[feature].fillna(data[feature].mode()[0], inplace=True)
# 再次查看缺失值
data.isnull().sum()
# 有序离散变量进行标签编码
mappings = {"Years in current job": {"10+ years": 10,"2 years": 2,"3 years": 3,"< 1 year": 0,"5 years": 5,"1 year": 1,"4 years": 4,"6 years": 6,"7 years": 7,"8 years": 8,"9 years": 9},"Home Ownership": {"Home Mortgage": 0,"Rent": 1,"Own Home": 2,"Have Mortgage": 3},"Term": {"Short Term": 0,"Long Term": 1}
}
# 使用映射字典进行转换
data["Years in current job"] = data["Years in current job"].map(mappings["Years in current job"])
data["Home Ownership"] = data["Home Ownership"].map(mappings["Home Ownership"])
data["Term"] = data["Term"].map(mappings["Term"])
# 对无序离散变量进行独热编码
data = pd.get_dummies(data, columns=['Purpose'])
# 独热编码后会新增一些列,需要将这些列的类型转换为int
data2 = pd.read_csv("data.csv") # 重新读取数据,用来做列名对比
list_final = [] # 新建一个空列表,用于存放独热编码后新增的特征名
for i in data.columns:if i not in data2.columns:list_final.append(i) # 这里打印出来的就是独热编码后的特征名
for i in list_final:data[i] = data[i].astype(int) # 这里的i就是独热编码后的特征名
# 分离特征和标签
x = data.drop(['Credit Default'], axis=1)
y = data['Credit Default']
# 划分训练集(80%)和测试集(20%):训练集用来学习,测试集验证效果
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# 特征数据归一化处理,神经网络对于输入数据的尺寸敏感,归一化是最常见的处理方式
scaler = MinMaxScaler()
x_train = scaler.fit_transform(X_train)
x_test = scaler.transform(X_test)
# 将数据转换为PyTorch张量并移至GPU
# 分类问题交叉熵损失要求标签为long类型
# 张量具有to(device)方法,可以将张量移动到指定的设备上
x_train = torch.FloatTensor(x_train).to(device)
y_train = torch.LongTensor(y_train.values).to(device) # 注意这里需要使用values属性
x_test = torch.FloatTensor(x_test).to(device)
y_test = torch.LongTensor(y_test.values).to(device)
# 打印下尺寸
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
二、模型架构定义
定义一个多层感知机(MLP)模型,包含一个输入层、两个隐藏层和一个输出层
class MLP(nn.Module):def __init__(self):super(MLP, self).__init__()self.fc1 = nn.Linear(30, 64) # 输入层到第一隐藏层self.relu = nn.ReLU() # 激活函数ReLUself.dropout = nn.Dropout(0.3) # 添加Dropout防止过拟合self.fc2 = nn.Linear(64, 32) # 第一隐藏层到第二隐藏层self.fc3 = nn.Linear(32, 2) # 第二隐藏层到输出层def forward(self, x):x = self.fc1(x)x = self.relu(x)x = self.dropout(x)x = self.fc2(x)x = self.relu(x)x = self.dropout(x)x = self.fc3(x)return x# 初始化模型
model = MLP().to(device)
三、模型训练
# 定义损失函数和优化器
# 分类问题使用交叉熵损失函数,适用于多分类问题,应用softmax函数将输出映射到概率分布,然后计算交叉熵损失
criterion = nn.CrossEntropyLoss()
# 使用随机梯度下降优化器(SGD),学习率为0.01
optimizer = optim.SGD(model.parameters(), lr=0.01)
# 训练模型
num_epochs = 20000 # 训练的轮数
losses = []
epochs = []
from tqdm import tqdm # 导入tqdm库用于进度条显示
start_time = time.time() # 记录开始时间# 创建tqdm进度条
with tqdm(total=num_epochs, desc="训练进度", unit="epoch") as pbar:# 训练模型for epoch in range(num_epochs):# 前向传播outputs = model(x_train) # 隐式调用forward函数loss = criterion(outputs, y_train)# 反向传播和优化optimizer.zero_grad()loss.backward()optimizer.step()# 记录损失值并更新进度条if (epoch + 1) % 200 == 0:losses.append(loss.item())epochs.append(epoch + 1)# 更新进度条的描述信息pbar.set_postfix({'Loss': f'{loss.item():.4f}'})# 每1000个epoch更新一次进度条if (epoch + 1) % 1000 == 0:pbar.update(1000) # 更新进度条# 确保进度条达到100%if pbar.n < num_epochs:pbar.update(num_epochs - pbar.n) # 计算剩余的进度并更新time_all = time.time() - start_time # 计算训练时间
print(f'Training time: {time_all:.2f} seconds')
四、可视化分析与评估模型
# 可视化损失曲线
plt.figure(figsize=(10, 6))
plt.plot(epochs, losses, 'b-', linewidth=2) # 蓝色实线,线宽为2
plt.title('Training Loss Curve', fontsize=16) # 标题,字体大小为16
plt.xlabel('Epochs', fontsize=14) # X轴标签,字体大小为14
plt.ylabel('Loss', fontsize=14) # Y轴标签,字体大小为14
plt.grid(True, linestyle='--', alpha=0.5) # 显示网格线,虚线,透明度为0.5
plt.show()model.eval() # 设置为评估模式
with torch.no_grad():correct = 0total = 0outputs = model(x_test)_, predicted = torch.max(outputs.data, 1)total += y_test.size(0)correct += (predicted == y_test).sum().item()accuracy = 100 * correct / total
print(f'Accuracy on test set: {accuracy:.2f}%')