当前位置: 首页 > news >正文

python学习打卡day36

DAY 36 复习日

对应5.25作业

仔细回顾一下神经网络到目前的内容,没跟上进度的同学补一下进度。

  • 作业:对之前的信贷项目,利用神经网络训练下,尝试用到目前的知识点让代码更加规范和美观。
  • 探索性作业(随意完成):尝试进入nn.Module中,查看他的方法
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder, LabelEncoder
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
from imblearn.over_sampling import SMOTE
# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")# 加载信贷预测数据集
data = pd.read_csv('data.csv')# 丢弃掉Id列
data = data.drop(['Id'], axis=1)# 区分连续特征与离散特征
continuous_features = data.select_dtypes(include=['float64', 'int64']).columns.tolist()
discrete_features = data.select_dtypes(exclude=['float64', 'int64']).columns.tolist()# 离散特征使用众数进行补全
for feature in discrete_features:if data[feature].isnull().sum() > 0:mode_value = data[feature].mode()[0]data[feature].fillna(mode_value, inplace=True)# 连续变量用中位数进行补全
for feature in continuous_features:if data[feature].isnull().sum() > 0:median_value = data[feature].median()data[feature].fillna(median_value, inplace=True)# 有顺序的离散变量进行标签编码
mappings = {"Years in current job": {"10+ years": 10,"2 years": 2,"3 years": 3,"< 1 year": 0,"5 years": 5,"1 year": 1,"4 years": 4,"6 years": 6,"7 years": 7,"8 years": 8,"9 years": 9},"Home Ownership": {"Home Mortgage": 0,"Rent": 1,"Own Home": 2,"Have Mortgage": 3},"Term": {"Short Term": 0,"Long Term": 1}
}# 使用映射字典进行转换
data["Years in current job"] = data["Years in current job"].map(mappings["Years in current job"])
data["Home Ownership"] = data["Home Ownership"].map(mappings["Home Ownership"])
data["Term"] = data["Term"].map(mappings["Term"])# 对没有顺序的离散变量进行独热编码
data = pd.get_dummies(data, columns=['Purpose'])
list_final = []
data2 = pd.read_csv('data.csv')
for i in data.columns:if i not in data2.columns:list_final.append(i)
for i in list_final:data[i] = data[i].astype(int)  # 将bool型转换为数值型# 分离特征数据和标签数据
X = data.drop(['Credit Default'], axis=1)  # 特征数据
y = data['Credit Default']  # 标签数据X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)  # 确保训练集和测试集是相同的缩放X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train.values).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test.values).to(device)class MLP(nn.Module):def __init__(self):super(MLP, self).__init__()self.fc1 = nn.Linear(30, 64)  # 增加第一层神经元数量self.relu = nn.ReLU()self.dropout = nn.Dropout(0.3)  # 添加Dropout防止过拟合self.fc2 = nn.Linear(64, 32)self.relu = nn.ReLU()self.fc3 = nn.Linear(32, 2)  # 减少隐藏层数,保持输出层不变def forward(self, x):out = self.fc1(x)out = self.relu(out)out = self.dropout(out)  # 应用Dropoutout = self.fc2(out)out = self.relu(out)out = self.fc3(out)return out# 实例化模型并移至GPU
model = MLP().to(device)# 分类问题使用交叉熵损失函数
criterion = nn.CrossEntropyLoss()# 使用随机梯度下降优化器
optimizer = optim.Adam(model.parameters(), lr=0.001)# 训练模型
num_epochs = 20000  # 训练的轮数# 用于存储每200个epoch的损失值、准确率和对应的epoch数
losses = []
accuracies = []
epochs = []start_time = time.time()  # 记录开始时间# 创建tqdm进度条
with tqdm(total=num_epochs, desc="训练进度", unit="epoch") as pbar:# 训练模型for epoch in range(num_epochs):# 前向传播outputs = model(X_train)  # 隐式调用forward函数loss = criterion(outputs, y_train)# 反向传播和优化optimizer.zero_grad()loss.backward()optimizer.step()# 记录损失值、准确率并更新进度条if (epoch + 1) % 200 == 0:losses.append(loss.item())epochs.append(epoch + 1)# 在测试集上评估模型model.eval()with torch.no_grad():test_outputs = model(X_test)_, predicted = torch.max(test_outputs, 1)correct = (predicted == y_test).sum().item()accuracy = correct / y_test.size(0)accuracies.append(accuracy)# 更新进度条的描述信息pbar.set_postfix({'Loss': f'{loss.item():.4f}', 'Accuracy': f'{accuracy * 100:.2f}%'})# 每1000个epoch更新一次进度条if (epoch + 1) % 1000 == 0:pbar.update(1000)  # 更新进度条# 确保进度条达到100%if pbar.n < num_epochs:pbar.update(num_epochs - pbar.n)  # 计算剩余的进度并更新time_all = time.time() - start_time  # 计算训练时间
print(f'Training time: {time_all:.2f} seconds')# 绘制损失和准确率曲线
fig, ax1 = plt.subplots(figsize=(10, 6))# 绘制损失曲线
color = 'tab:red'
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Loss', color=color)
ax1.plot(epochs, losses, color=color)
ax1.tick_params(axis='y', labelcolor=color)# 创建第二个y轴用于绘制准确率曲线
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('Accuracy', color=color)
ax2.plot(epochs, accuracies, color=color)
ax2.tick_params(axis='y', labelcolor=color)plt.title('Training Loss and Accuracy over Epochs')
plt.grid(True)
plt.show()# 在测试集上评估模型,此时model内部已经是训练好的参数了
# 评估模型
model.eval()  # 设置模型为评估模式
with torch.no_grad():  # torch.no_grad()的作用是禁用梯度计算,可以提高模型推理速度outputs = model(X_test)  # 对测试数据进行前向传播,获得预测结果_, predicted = torch.max(outputs, 1)  # torch.max(outputs, 1)返回每行的最大值和对应的索引correct = (predicted == y_test).sum().item()  # 计算预测正确的样本数accuracy = correct / y_test.size(0)print(f'测试集准确率: {accuracy * 100:.2f}%')

  ​​​​​​​

@浙大疏锦行​​​​​​​ 

http://www.xdnf.cn/news/635653.html

相关文章:

  • 【node.js】node.js 安装详细步骤教程【安装在D盘】
  • Vite 构建原理 的深度解析
  • Vue3 + TypeScript + el-input 实现人民币金额的输入和显示
  • react 脚手架
  • mysql数据库之备份
  • 前端的core-js是什么?有什么作用?
  • 基于javaweb的SpringBoot体检管理系统设计与实现(源码+文档+部署讲解)
  • #RabbitMQ# 消息队列入门
  • 嵌入式预处理链接脚本lds和map文件
  • ​​IIS文件上传漏洞绕过:深入解析与高效防御​
  • MySQL索引失效的12种场景及解决方案
  • 深入理解 Linux 的 set、env 和 printenv 命令
  • ZLG USBCANFD python UDS刷写脚本
  • Nature图形解析与绘制—热图的绘制及深入解析
  • React整合【ECharts】教程002:折线图的构建和基本设置
  • 初学Transformer架构和注意力机制
  • OpenCV 第7课 图像处理之平滑(二)
  • QML与C++交互2
  • 历年哈尔滨工业大学保研上机真题
  • uni-app学习笔记十二-vue3中组件传值(对象传值)
  • urdf文件和DH模型参数是一一对应的吗??
  • 在Windows平台基于VSCode准备GO的编译环境
  • Linux基本指令篇 —— whoami指令
  • JavaScript 中 console.log() 使用逗号和加号的区别
  • C++多态与虚函数详解:从入门到精通
  • 27. 自动化测试开发框架拓展之测试数据构造(一)
  • uniapp-商城-68-shop(1-商品列表,获取数据,utils、tofixed 、parseInt的使用)
  • 【b站计算机拓荒者】【2025】微信小程序开发教程 - chapter2 小程序核心
  • STM32八股【11】-----Linux Bootloader (U-Boot)
  • 嵌入式STM32学习—— 定时器中断(应用-利用定时器中断实现LED亮一秒灭一秒)