当前位置: 首页 > backend >正文

无网络环境下配置并运行 word2vec复现.py

需运行文件

# -*- coding: utf-8 -*-
import torch
import pandas as pd
import jieba
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader,Dataset
from transformers import AutoTokenizer,AutoModeldef get_stop_word():with open("../data/baidu_stopwords.txt",encoding="utf-8") as f:return f.read().split("\n")def read_data(n=3):import jieba.posseg as psg# with open("../data/数学原始数据.csv",encoding="gbk") as f:all_data = pd.read_csv("../data/数学原始数据.csv",names=["data"],encoding="gbk") #all_data = all_data["data"].tolist()no_t = ["x","c","m","d","uj","r",""]result = []word_fre = {}for data in all_data:words = psg.lcut(data)new_word = []for word,t in words:if t in no_t:continueif word not in stop_words:word_fre[word] = word_fre.get(word,0) + 1new_word.append(word)result.append(new_word)new_result = []for words in result:new_word = []for word in words:if word_fre[word]<n:continuenew_word.append(word)new_result.append(new_word)return new_resultdef build_data(all_data):result = []for data in all_data:for ni,now_word in enumerate(data):other_word = data[max(ni-n_gram,0):ni] + data[ni+1:ni+1+n_gram]for o in other_word:result.append((now_word,o))return resultclass MyDataset(Dataset):def __init__(self,all_data):self.all_data = all_datadef __len__(self):return len(self.all_data)def __getitem__(self, index):data = self.all_data[index]# index = word_2_index[data[0]]# index = [word_2_index[i] for i in data[1]]word1_idx = tokenizer(data[0])["input_ids"][0]word2_idx = tokenizer(data[1])["input_ids"][0]return word1_idx,word2_idxclass Model(nn.Module):def __init__(self):super(Model, self).__init__()self.base_model = AutoModel.from_pretrained("../model/Qwen2.5-0.5B-Instruct")self.linear1 = nn.Linear(corpus_len,emb_dim)self.linear1.weight.data[:,:151936] = self.base_model.embed_tokens.weight.data.Tself.linear2 = nn.Linear( emb_dim, corpus_len)self.linear2.weight.requires_grad = False# self.linear2.weight.rself.loss_fun = nn.CrossEntropyLoss()def forward(self,batch_w1_index,batch_w2_index):word1_onehot = torch.zeros(size=[len(batch_w1_index),corpus_len])# word1_onehot[batch_w1_index] = 1.0for i in range(len(batch_w1_index)):word1_onehot[i][batch_w1_index] = 1.0# word2_onehot = torch.zeros(size=[1, corpus_len])# word2_onehot[0][batch_w2_index] = 1.0h = self.linear1(word1_onehot)predict = self.linear2(h)loss = self.loss_fun(predict,batch_w2_index)return lossdef add_word(all_data):global tokenizernew_data = []for i in all_data:new_data.extend(i)new_data = list(set(new_data))# tokenizer.convert_tokens_to_string("hh")for word in new_data:t = tokenizer(word)["input_ids"]if len(t)!=1:tokenizer.add_tokens(word)# print(word)if __name__ == "__main__":aaa = 10n_gram = 1batch_size = 100epoch = 10emb_dim = 896lr = 0.01grad_acc = 1stop_words = get_stop_word()stop_words = stop_words + ["。",",","(",")"]all_data = read_data()rel_words = build_data(all_data)tokenizer = AutoTokenizer.from_pretrained("../model/Qwen2.5-0.5B-Instruct")add_word(all_data)corpus_len = len(tokenizer.get_vocab())# tokenizer.convert_tokens_to_string("hh")train_dataset = MyDataset(rel_words)train_dataloader = DataLoader(train_dataset,batch_size=batch_size,shuffle=False)model = Model()opt = torch.optim.Adam(model.parameters(),lr=lr)for e in range(epoch):for batch_idx,(batch_w1_index,batch_w2_index) in tqdm(enumerate(train_dataloader,start=1)):loss = model.forward(batch_w1_index,batch_w2_index)loss.backward()if batch_idx%grad_acc == 0:opt.step()opt.zero_grad()print(loss)

创建和激活虚拟环境(可选)

python3 -m venv word2vec_offline
source word2vec_offline/bin/activate

安装依赖

pip install torch pandas jieba tqdm transformers

 下载依赖的离线安装包

在有网络的机器上,执行:

mkdir offline_pkgs
pip download torch pandas jieba tqdm transformers -d offline_pkgs

这样会把所有依赖包(包括依赖的依赖)下载到 offline_pkgs 文件夹。

拷贝依赖和项目文件到无网络环境

  • 拷贝 offline_pkgs 文件夹到无网络环境
  • 拷贝你的 word2vec复现.py 以及所需的 ../data/、../model/ 文件夹

3. 在无网络环境下新建虚拟环境

python3 -m venv venv
source venv/bin/activate

4. 离线安装依赖

进入 offline_pkgs 文件夹,执行:

pip install --no-index --find-links=offline_pkgs torch pandas jieba tqdm transformers

如果有依赖报错,先安装报错的依赖,再装主包。

5. 检查依赖安装

pip list

确认 torch、pandas、jieba、tqdm、transformers 都已安装。

6. 运行你的代码

确保你在虚拟环境中,且数据和模型路径正确:

python word2vec复现.py

http://www.xdnf.cn/news/4479.html

相关文章:

  • tmpfs和普通文件系统相比有哪些优缺点
  • CentOS 安装 Zellij 终端复用器教程
  • Android 移动应用开发:点击按钮打开电话拨号界面
  • Object.defineProperty()
  • LC滤波电路使用TSMI一体成型贴片电感的好处
  • Python初学者笔记第十一期 -- (字符串编程练习题)
  • k8s高可用集群,自动化更新证书脚本
  • 2025-05-07 Unity 网络基础8——UDP同步异步通信
  • 111、二叉树的最小深度
  • 信息革命对经济、货币体系及权力结构的颠覆性影响
  • 数据结构——排序(万字解说)初阶数据结构完
  • 【Python爬虫电商数据采集+数据分析】采集电商平台数据信息,并做可视化演示
  • 【C/C++】虚函数
  • 某大型交通规划设计院转型实践:数智化破局复杂工程项目管理,实现高效人力资源一体化管理
  • 华为设备链路聚合实验:网络工程实战指南
  • 【LeetCode】高频 SQL 50题 题解
  • C语言编程--递归程序--Hanoi塔
  • 企业智能化第一步:用「Deepseek+自动化」打造企业资源管理的智能中枢
  • MEGA3:分子进化遗传学分析和序列比对集成软件
  • 检测内存条好坏有工具,推荐几款内存检测工具
  • github+ Picgo+typora
  • OpenCV提取图像中的暗斑/亮斑
  • IvorySQL 再次走进北京大学研究生开源公选课
  • onenet连接微信小程序(mqtt协议)
  • 【国产化】在银河麒麟ARM环境下离线安装docker
  • Spring 如何解决循环依赖问题?
  • JavaScript性能优化:从青铜到王者的进阶之路
  • 从人体姿态到机械臂轨迹:基于深度学习的Kinova远程操控系统架构解析
  • Kubernetes(k8s)学习笔记(九)--搭建多租户系统
  • QMK键盘固件配置详解