先运行main.py进行文本序列化,再train.py模型训练

dataset.py

from torch.utils.data import DataLoader,Dataset
import torch
import os
from utils import tokenlize
import config class ImdbDataset(Dataset):
def __init__(self,train=True):
super(ImdbDataset,self).__init__()
data_path = r"H:\073-nlp自然语言处理-v5.bt38[周大伟]\073-nlp自然语言处理-v5.bt38[周大伟]\第四天\代码\data\aclImdb_v1\aclImdb"
data_path += r"\train" if train else r"\test"
self.total_path = []
for temp_path in [r"\pos",r"\neg"]:
cur_path = data_path + temp_path
self.total_path += [os.path.join(cur_path,i) for i in os.listdir(cur_path) if i.endswith(".txt")] def __getitem__(self, idx):
file = self.total_path[idx]
review = open(file,encoding="utf-8").read()
review = tokenlize(review)
label = int(file.split("_")[-1].split(".")[0])
label = 0 if label < 5 else 1
return review,label def __len__(self):
return len(self.total_path) def collate_fn(batch):
'''
对batch数据进行处理
:param batch:
:return:
'''
reviews,labels = zip(*batch)
reviews = torch.LongTensor([config.ws.transform(i,max_len=config.max_len) for i in reviews])
labels = torch.LongTensor(labels)
return reviews,labels def get_dataloader(train):
imdbdataset = ImdbDataset(train=True)
batch_size = config.train_batch_size if train else config.test_batch_size
return DataLoader(imdbdataset,batch_size=batch_size,shuffle=True,collate_fn=collate_fn) if __name__ == '__main__':
# dataset = ImdbDataset(train=True)
# print(dataset[1])
for idx,(review,label) in enumerate(get_dataloader(train=True)):
print(review)
print(label)
break

  utils.py

"""
实现额外的方法
"""
import re def tokenlize(sentence):
"""
进行文本分词
:param sentence: str
:return: [str,str,str]
""" fileters = ['!', '"', '#', '$', '%', '&', '\(', '\)', '\*', '\+', ',', '-', '\.', '/', ':', ';', '<', '=', '>',
'\?', '@', '\[', '\\', '\]', '^', '_', '`', '\{', '\|', '\}', '~', '\t', '\n', '\x97', '\x96', '”', '“', ]
sentence = sentence.lower() #把大写转化为小写
sentence = re.sub("<br />"," ",sentence)
# sentence = re.sub("I'm","I am",sentence)
# sentence = re.sub("isn't","is not",sentence)
sentence = re.sub("|".join(fileters)," ",sentence)
result = [i for i in sentence.split(" ") if len(i)>0] return result

word_sequence.py

'''
文本序列化
''' class WordSequence():
UNK_TAG = "<UNK>"
PAD_TAG = "<PAD>"
UNK = 1
PAD = 0 def __init__(self):
self.dict = {
self.UNK_TAG:self.UNK,
self.PAD_TAG:self.PAD
}
self.count = {} def fit(self,sentence):
'''
统计词频
:param sentence:
:return:
'''
for word in sentence:
self.count[word] = self.count.get(word,0)+1 def build_vocab(self,min_count=0,max_count = None,max_features = None):
"""
根据条件构建 词典
:param min_count:最小词频
:param max_count: 最大词频
:param max_features: 最大词语数
:return:
"""
if min_count is not None:
self.count = {word:count for word,count in self.count.items() if count >min_count}
if max_count is not None:
self.count = {word:count for word,count in self.count.items() if count<max_count}
if max_features is not None:
#排序
self.count = dict(sorted(self.count.items(),lambda x:x[-1],reverse=True)[:max_features]) for word in self.count:
self.dict[word] = len(self.dict) #每次word对应一个数字 #把dict进行翻转
self.inverse_dict = dict(zip(self.dict.values(),self.dict.keys())) def transform(self,sentence,max_len =None):
'''
把句子转化为数字序列
:param sentence:
:return:
'''
if len(sentence) > max_len:
sentence = sentence[:max_len]
else:
sentence = sentence + [self.PAD_TAG]*(max_len-len(sentence))
return [self.dict.get(i,1) for i in sentence] def inverse_transform(self,incides):
"""
把数字序列转化为字符
:param incides:
:return:
"""
return [self.inverse_dict.get(i,"<UNK>") for i in incides] def __len__(self):
return len(self.dict) if __name__ == '__main__':
sentences = [["今天","天气","很","好"],
["今天","去","吃","什么"]] ws = WordSequence()
for sentence in sentences:
ws.fit(sentence) ws.build_vocab(min_count=0)
print(ws.dict)
ret = ws.transform(["好","热","呀","呀","呀","呀","呀","呀","呀"],max_len=5)
print(ret)
ret = ws.inverse_transform(ret)
print(ret)

  main.py

from word_sequence import WordSequence
from dataset import get_dataloader
import pickle
from tqdm import tqdm if __name__ == '__main__':
ws = WordSequence()
train_data = get_dataloader(True)
test_data = get_dataloader(False)
for reviews,labels in tqdm(train_data,total=len(train_data)):
for review in reviews:
ws.fit(review)
for reviews,labels in tqdm(test_data,total=len(test_data)):
for review in reviews:
ws.fit(review)
print("正在建立...")
ws.build_vocab()
print(len(ws))
pickle.dump(ws,open("./models/ws.pkl","wb"))

  model.py

"""
构建模型
"""
import torch.nn as nn
import config
import torch.nn.functional as F class ImdbModel(nn.Module):
def __init__(self):
super(ImdbModel,self).__init__()
self.embedding = nn.Embedding(num_embeddings=len(config.ws),embedding_dim=300,padding_idx=config.ws.PAD)
self.fc = nn.Linear(config.max_len*300,2) def forward(self,input):
'''
:param input:
:return:
'''
input_embeded = self.embedding(input) input_embeded_viewed = input_embeded.view(input_embeded.size(0),-1) out = self.fc(input_embeded_viewed)
return F.log_softmax(out,dim=-1)

  LSTMmodel.py

"""
构建模型
"""
import torch.nn as nn
import torch
import config
import torch.nn.functional as F class ImdbModel(nn.Module):
def __init__(self):
super(ImdbModel,self).__init__()
self.embedding = nn.Embedding(num_embeddings=len(config.ws),embedding_dim=300,padding_idx=config.ws.PAD)
self.lstm = nn.LSTM(input_size=200,hidden_size=64,num_layers=2,batch_first=True,bidirectional=True,dropout=0.5)
self.fc1 = nn.Linear(64*2,64)
self.fc2 = nn.Linear(64,2) def forward(self,input):
'''
:param input:
:return:
'''
input_embeded = self.embedding(input) #[batch_size,seq_len,200] output,(h_n,c_n) = self.lstm(input_embeded)
out = torch.cat(h_n[-1,:,:],h_n[-2,:,:],dim=-1) #拼接正向最后一个输出和反向最后一个输出 #进行全连接
out_fc1 = self.fc1(out)
#进行relu
out_fc1_relu = F.relu(out_fc1)
#全连接
out = self.fc2(out_fc1_relu)
return F.log_softmax(out,dim=-1)

  train.py

'''
进行模型的训练
'''
import torch import config
from model import ImdbModel
from dataset import get_dataloader
from torch.optim import Adam
from tqdm import tqdm
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from eval import eval model = ImdbModel().to(config.device)
optimizer = Adam(model.parameters(),lr=0.001)
loss_list = [] def train(epoch):
train_dataloader = get_dataloader(train=True)
bar = tqdm(train_dataloader,total=len(train_dataloader)) for idx,(input,target) in enumerate(bar):
optimizer.zero_grad()
input = input.to(config.device)
target = target.to(config.device)
output = model(input)
loss = F.nll_loss(output,target)
loss.backward()
loss_list.append(loss.item())
optimizer.step()
bar.set_description("epoch:{} idx:{} loss:{:.6f}".format(epoch,idx,np.mean(loss_list))) if idx%10 == 0:
torch.save(model.state_dict(),"./models/model.pkl")
torch.save(optimizer.state_dict(),"./models/optimizer.pkl") if __name__ == '__main__':
for i in range(5):
train(i)
eval()
plt.figure(figsize=(20,8))
plt.plot(range(len(loss_list)),loss_list)

  eval.py

'''
进行模型的训练
'''
import torch import config
from model import ImdbModel
from dataset import get_dataloader
from torch.optim import Adam
from tqdm import tqdm
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt def eval():
model = ImdbModel().to(config.device)
model.load_state_dict(torch.load("./models/model.pkl"))
model.eval()
loss_list = []
acc_list = []
test_dataloader = get_dataloader(train=False)
with torch.no_grad():
for input,target in test_dataloader:
input = input.to(config.device)
target = target.to(config.device)
output = model(input)
loss = F.nll_loss(output,target)
loss_list.append(loss.item())
#准确率
pred= output.max(dim = -1)[-1]
acc_list.append(pred.eq(target).cpu().float().mean())
print("loss:{:.6f},acc:{}".format(np.mean(loss_list),np.mean(acc_list))) if __name__ == '__main__':
eval()

  

pytorch LSTM情感分类全部代码的更多相关文章

  1. pytorch 文本情感分类和命名实体识别NER中LSTM输出的区别

    文本情感分类: 文本情感分类采用LSTM的最后一层输出 比如双层的LSTM,使用正向的最后一层和反向的最后一层进行拼接 def forward(self,input): ''' :param inpu ...

  2. NLP(十九) 双向LSTM情感分类模型

    使用IMDB情绪数据来比较CNN和RNN两种方法,预处理与上节相同 from __future__ import print_function import numpy as np import pa ...

  3. PaddlePaddle︱开发文档中学习情感分类(CNN、LSTM、双向LSTM)、语义角色标注

    PaddlePaddle出教程啦,教程一部分写的很详细,值得学习. 一期涉及新手入门.识别数字.图像分类.词向量.情感分析.语义角色标注.机器翻译.个性化推荐. 二期会有更多的图像内容. 随便,帮国产 ...

  4. 使用BERT进行情感分类预测及代码实例

    文章目录 0. BERT介绍 1. BERT配置 1.1. clone BERT 代码 1.2. 数据处理 1.2.1预训练模型 1.2.2数据集 训练集 测试集 开发集 2. 修改代码 2.1 加入 ...

  5. 基于Bert的文本情感分类

    详细代码已上传到github: click me Abstract:    Sentiment classification is the process of analyzing and reaso ...

  6. kaggle——Bag of Words Meets Bags of Popcorn(IMDB电影评论情感分类实践)

    kaggle链接:https://www.kaggle.com/c/word2vec-nlp-tutorial/overview 简介:给出 50,000 IMDB movie reviews,进行0 ...

  7. 文本情感分类:分词 OR 不分词(3)

    为什么要用深度学习模型?除了它更高精度等原因之外,还有一个重要原因,那就是它是目前唯一的能够实现“端到端”的模型.所谓“端到端”,就是能够直接将原始数据和标签输入,然后让模型自己完成一切过程——包括特 ...

  8. 使用bert进行情感分类

    2018年google推出了bert模型,这个模型的性能要远超于以前所使用的模型,总的来说就是很牛.但是训练bert模型是异常昂贵的,对于一般人来说并不需要自己单独训练bert,只需要加载预训练模型, ...

  9. NLP文本情感分类传统模型+深度学习(demo)

    文本情感分类: 文本情感分类(一):传统模型 摘自:http://spaces.ac.cn/index.php/archives/3360/ 测试句子:工信处女干事每月经过下属科室都要亲口交代24口交 ...

随机推荐

  1. ICML 2019论文录取Top100:谷歌霸榜

    [导读]人工智能顶级会议ICML 2019发布了今年论文录取结果.提交的3424篇论文中,录取了774篇,录取率为22.6%,较去年有所降低.从录取论文数量来看,谷歌成为今年最大赢家,紧随其后的是MI ...

  2. Vue路由配置history模式

    我的博客: https://github.com/Daotin/fe-notes/issues vue需要node.js吗? 你可以用 script 标签的形式引入vue.min.js 这样的,不需要 ...

  3. coding++:idea提交SVN或GIT时,忽略某些文件

    设置步骤:Settings→Editor→File Types在窗口最下方“Ignore files and folders”一栏中添加如下忽略: *.iml;*.idea;*.gitignore;* ...

  4. 线程状态以及sleep yield wait join方法

    前言 在日常的开发过程中,我们通过会使用Thread.sleep模拟一个耗时的任务执行过程. 在深入理解这四个方法之前,首先对线程的状态进行理解阐述. 线程概念 线程是操作系统执行任务的基本单位,处理 ...

  5. Light of future-凡事预则立

    目录 1.冲刺的时间计划安排 2.针对上一次作业同学.助教提出的问题的回答 3.针对前几次作业的不足的地方进行思考和总结 4.需要改进的团队分工 5.团队的代码规范 6.Github仓库链接 归属班级 ...

  6. spark——spark中常说RDD,究竟RDD是什么?

    本文始发于个人公众号:TechFlow,原创不易,求个关注 今天是spark专题第二篇文章,我们来看spark非常重要的一个概念--RDD. 在上一讲当中我们在本地安装好了spark,虽然我们只有lo ...

  7. 微信小程序wx.setStorage(OBJECT)

    关于微信小程序的:wx.setStorage(OBJECT)在官网API介绍到:

  8. MySQL入门,第三部分,学会添加删除数据库

    一.建立数据库 create database [if not exists] database_name [create_specification] 注意: 1.if not exists === ...

  9. C语言 文件操作(一)

    #include<stdio.h> int main(){          FILE *fp = fopen("f:\\lanyue.txt","r&quo ...

  10. ConcurrentHashMap中节点数目并发统计的实现原理

    前言: 前段时间又看了一遍ConcurrentHashMap的源码,对该并发容器的底层实现原理有了更进一步的了解,本想写一篇关于ConcurrentHashMap的put方法所涉及的初始化以及扩容操作 ...