python3情感分析方法
这里介绍两种方法,第一种用自带的snownlp库,snownlp针对中文的,英文的可以用textblob可以参考(https://www.jianshu.com/p/d50a14541d01)
第一种:
#meidi_jd_neg.txt数据格式如下(一行一条评论)
#好像遥控是坏的,还是送的电池没有电,算了,热水器上将就着按吧
#要打十个字才能发,我就打十个字
#调温的开关太紧了,不知道是不是都这样,送货和安装的师傅来的很准时,不像以前要等老半天
#上面安装既然花了我差不多*块,但是这热水器马马虎虎吧
#这东西有不是什么高科技,比别的厂家还贵,想不明白
#......
from snownlp import sentiment
from snownlp import SnowNLP
#这里是训练数据并且生成模型,只需要训练一次就好了
#sentiment.train('C:\\Users\\Administrator\\Desktop\\data\\meidi_jd_neg.txt', 'C:\\Users\\Administrator\\Desktop\\data\\meidi_jd_pos.txt')
#sentiment.save('D:/pyscript/sentiment.marshal')
#这里是情感分析
text = '好像遥控是坏的,还是送的电池没有电,算了,热水器上将就着按吧'
print(SnowNLP(text).sentiments)
#结果打印如下0.11775341066972067 (结果在0-1之间变化)
#0.3306322286731991这个就结果是自己数据训练出来的,因为训练的数据量不大所以结果没上面原生的好
#如果用自己的数据训练要把seg\__init__.py的 data_path改成自己模型保存的路径,如果不改就是默认的,改完执行可能会报错 直接定位错误改源码就好了第二种:
转自https://zhuanlan.zhihu.com/p/23225934
#文件格式如下比如positive.txt
#无误
#感激不尽
#受大众欢迎
#敬礼
#合口味
#突出
#文雅
#一尘不染
#高精度
#兴盛
#如获至宝
import jieba
import numpy as np
import sys
#打开词典文件,返回列表
def open_dict(Dict = 'hahah', path=r'C:/Users/Administrator/Desktop/Textming/'):
path = path + '%s.txt' % Dict
dictionary = open(path, 'r', encoding='utf-8')
dict = []
for word in dictionary:
word = word.strip('\n')
dict.append(word)
return dict
def judgeodd(num):
if (num % 2) == 0:
return 'even'
else:
return 'odd'
#注意,这里你要修改path路径。
deny_word = open_dict(Dict = '否定词', path= r'C:/Users/Administrator/Desktop/Textming/')
posdict = open_dict(Dict = 'positive', path= r'C:/Users/Administrator/Desktop/Textming/')
negdict = open_dict(Dict = 'negative', path= r'C:/Users/Administrator/Desktop/Textming/')
degree_word = open_dict(Dict = '程度级别词语', path= r'C:/Users/Administrator/Desktop/Textming/')
mostdict = degree_word[degree_word.index('extreme')+1 : degree_word.index('very')]#权重4,即在情感词前乘以4
verydict = degree_word[degree_word.index('very')+1 : degree_word.index('more')]#权重3
moredict = degree_word[degree_word.index('more')+1 : degree_word.index('ish')]#权重2
ishdict = degree_word[degree_word.index('ish')+1 : degree_word.index('last')]#权重0.5
def sentiment_score_list(dataset):
seg_sentence = dataset.split('。')
count1 = []
count2 = []
for sen in seg_sentence: #循环遍历每一个评论
segtmp = jieba.lcut(sen, cut_all=False) #把句子进行分词,以列表的形式返回
i = 0 #记录扫描到的词的位置
a = 0 #记录情感词的位置
poscount = 0 #积极词的第一次分值
poscount2 = 0 #积极词反转后的分值
poscount3 = 0 #积极词的最后分值(包括叹号的分值)
negcount = 0
negcount2 = 0
negcount3 = 0
for word in segtmp:
#data2= '我好开心啊,非常高兴!今天我得了一百分,我很兴奋开心,愉快,开心'
if word in posdict: # 判断词语是否是情感词
poscount += 1
c = 0
for w in segtmp[a:i]: # 扫描情感词前的程度词
if w in mostdict:
poscount *= 4.0
elif w in verydict:
poscount *= 3.0
elif w in moredict:
poscount *= 2.0
elif w in ishdict:
poscount *= 0.5
elif w in deny_word:
c += 1
if judgeodd(c) == 'odd': # 扫描情感词前的否定词数
poscount *= -1.0
poscount2 += poscount
poscount = 0
poscount3 = poscount + poscount2 + poscount3
poscount2 = 0
else:
poscount3 = poscount + poscount2 + poscount3
poscount = 0
a = i + 1 # 情感词的位置变化
elif word in negdict: # 消极情感的分析,与上面一致
negcount += 1
d = 0
for w in segtmp[a:i]:
if w in mostdict:
negcount *= 4.0
elif w in verydict:
negcount *= 3.0
elif w in moredict:
negcount *= 2.0
elif w in ishdict:
negcount *= 0.5
elif w in degree_word:
d += 1
if judgeodd(d) == 'odd':
negcount *= -1.0
negcount2 += negcount
negcount = 0
negcount3 = negcount + negcount2 + negcount3
negcount2 = 0
else:
negcount3 = negcount + negcount2 + negcount3
negcount = 0
a = i + 1
elif word == '!' or word == '!': ##判断句子是否有感叹号
for w2 in segtmp[::-1]: # 扫描感叹号前的情感词,发现后权值+2,然后退出循环
if w2 in posdict or negdict:
poscount3 += 2
negcount3 += 2
break
i += 1 # 扫描词位置前移
# 以下是防止出现负数的情况
pos_count = 0
neg_count = 0
if poscount3 < 0 and negcount3 > 0:
neg_count += negcount3 - poscount3
pos_count = 0
elif negcount3 < 0 and poscount3 > 0:
pos_count = poscount3 - negcount3
neg_count = 0
elif poscount3 < 0 and negcount3 < 0:
neg_count = -poscount3
pos_count = -negcount3
else:
pos_count = poscount3
neg_count = negcount3
count1.append([pos_count, neg_count])
count2.append(count1)
count1 = []
return count2
def sentiment_score(senti_score_list):
score = []
for review in senti_score_list:
if len(review) == 0:
break
score_array = np.array(review)
Pos = np.sum(score_array[:, 0])
Neg = np.sum(score_array[:, 1])
AvgPos = np.mean(score_array[:, 0])
AvgPos = float('%.1f'%AvgPos)
AvgNeg = np.mean(score_array[:, 1])
AvgNeg = float('%.1f'%AvgNeg)
StdPos = np.std(score_array[:, 0])
StdPos = float('%.1f'%StdPos)
StdNeg = np.std(score_array[:, 1])
StdNeg = float('%.1f'%StdNeg)
score.append([Pos, Neg, AvgPos, AvgNeg, StdPos, StdNeg])
return score
data = '你就是个王八蛋,混账玩意!你们的手机真不好用!非常生气,我非常郁闷!!!!'
data2= '我好高兴啊,非常高兴!今天我得了一百分,我很兴奋开心,愉快,开心'
# print(sentiment_score(sentiment_score_list(data)))
print(sentiment_score(sentiment_score_list(data2)))以上两种效果都不错,应用的时候可以拿一部分测试数据用来对比效果并且可以根据实际情况建立自己的语料库