需運行文件
# -*- coding: utf-8 -*-
import torch
import pandas as pd
import jieba
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader,Dataset
from transformers import AutoTokenizer,AutoModeldef get_stop_word():with open("../data/baidu_stopwords.txt",encoding="utf-8") as f:return f.read().split("\n")def read_data(n=3):import jieba.posseg as psg# with open("../data/數學原始數據.csv",encoding="gbk") as f:all_data = pd.read_csv("../data/數學原始數據.csv",names=["data"],encoding="gbk") #all_data = all_data["data"].tolist()no_t = ["x","c","m","d","uj","r",""]result = []word_fre = {}for data in all_data:words = psg.lcut(data)new_word = []for word,t in words:if t in no_t:continueif word not in stop_words:word_fre[word] = word_fre.get(word,0) + 1new_word.append(word)result.append(new_word)new_result = []for words in result:new_word = []for word in words:if word_fre[word]<n:continuenew_word.append(word)new_result.append(new_word)return new_resultdef build_data(all_data):result = []for data in all_data:for ni,now_word in enumerate(data):other_word = data[max(ni-n_gram,0):ni] + data[ni+1:ni+1+n_gram]for o in other_word:result.append((now_word,o))return resultclass MyDataset(Dataset):def __init__(self,all_data):self.all_data = all_datadef __len__(self):return len(self.all_data)def __getitem__(self, index):data = self.all_data[index]# index = word_2_index[data[0]]# index = [word_2_index[i] for i in data[1]]word1_idx = tokenizer(data[0])["input_ids"][0]word2_idx = tokenizer(data[1])["input_ids"][0]return word1_idx,word2_idxclass Model(nn.Module):def __init__(self):super(Model, self).__init__()self.base_model = AutoModel.from_pretrained("../model/Qwen2.5-0.5B-Instruct")self.linear1 = nn.Linear(corpus_len,emb_dim)self.linear1.weight.data[:,:151936] = self.base_model.embed_tokens.weight.data.Tself.linear2 = nn.Linear( emb_dim, corpus_len)self.linear2.weight.requires_grad = False# self.linear2.weight.rself.loss_fun = nn.CrossEntropyLoss()def forward(self,batch_w1_index,batch_w2_index):word1_onehot = torch.zeros(size=[len(batch_w1_index),corpus_len])# word1_onehot[batch_w1_index] = 1.0for i in range(len(batch_w1_index)):word1_onehot[i][batch_w1_index] = 1.0# word2_onehot = torch.zeros(size=[1, corpus_len])# word2_onehot[0][batch_w2_index] = 1.0h = self.linear1(word1_onehot)predict = self.linear2(h)loss = self.loss_fun(predict,batch_w2_index)return lossdef add_word(all_data):global tokenizernew_data = []for i in all_data:new_data.extend(i)new_data = list(set(new_data))# tokenizer.convert_tokens_to_string("hh")for word in new_data:t = tokenizer(word)["input_ids"]if len(t)!=1:tokenizer.add_tokens(word)# print(word)if __name__ == "__main__":aaa = 10n_gram = 1batch_size = 100epoch = 10emb_dim = 896lr = 0.01grad_acc = 1stop_words = get_stop_word()stop_words = stop_words + ["。",",","(",")"]all_data = read_data()rel_words = build_data(all_data)tokenizer = AutoTokenizer.from_pretrained("../model/Qwen2.5-0.5B-Instruct")add_word(all_data)corpus_len = len(tokenizer.get_vocab())# tokenizer.convert_tokens_to_string("hh")train_dataset = MyDataset(rel_words)train_dataloader = DataLoader(train_dataset,batch_size=batch_size,shuffle=False)model = Model()opt = torch.optim.Adam(model.parameters(),lr=lr)for e in range(epoch):for batch_idx,(batch_w1_index,batch_w2_index) in tqdm(enumerate(train_dataloader,start=1)):loss = model.forward(batch_w1_index,batch_w2_index)loss.backward()if batch_idx%grad_acc == 0:opt.step()opt.zero_grad()print(loss)
創建和激活虛擬環境(可選)
python3 -m venv word2vec_offline
source word2vec_offline/bin/activate
安裝依賴
pip install torch pandas jieba tqdm transformers
?下載依賴的離線安裝包
在有網絡的機器上,執行:
mkdir offline_pkgs
pip download torch pandas jieba tqdm transformers -d offline_pkgs
這樣會把所有依賴包(包括依賴的依賴)下載到?offline_pkgs?文件夾。
拷貝依賴和項目文件到無網絡環境
- 拷貝?offline_pkgs?文件夾到無網絡環境
- 拷貝你的?word2vec復現.py?以及所需的?../data/、../model/?文件夾
3. 在無網絡環境下新建虛擬環境
python3 -m venv venv
source venv/bin/activate
4. 離線安裝依賴
進入?offline_pkgs?文件夾,執行:
pip install --no-index --find-links=offline_pkgs torch pandas jieba tqdm transformers
如果有依賴報錯,先安裝報錯的依賴,再裝主包。
5. 檢查依賴安裝
pip list
確認?torch、pandas、jieba、tqdm、transformers?都已安裝。
6. 運行你的代碼
確保你在虛擬環境中,且數據和模型路徑正確:
python word2vec復現.py