python对⽂本进⾏分词_Python⽂本处理:分词和词云图
‘‘‘ import os import jieba # 分词包 import numpy # numpy计算包 import codecs # codecs提供open⽅法指定打开的⽂件的语⾔编码,它会在读取时⾃动转换为内部的unicode import pandas # 统计学⼯具包 import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator # 词云包 from scipy.misc import imread from time import sleep def join_txt(): # ----合并txt⽂件 # 获取⽬标⽂件夹的路径 meragefiledir = os.getcwd() + ‘\\corpus‘ # 获取当前⽂件夹中的⽂件名称列表 filenames = os.listdir(meragefiledir) # 打开当前⽬录下的⽂件,如果没有则创建 file = open(‘‘, ‘w‘) # 向⽂件中写⼊字符 先遍历⽂件名 for filename in filenames: filepath = meragefiledir + ‘\\‘ filepath = filepath + filename # 遍历单个⽂件,读取⾏数 for line in open(filepath,encoding=‘utf-8‘): file.writelines(line) file.write(‘\n‘) file.close() def make_pic(): # 导⼊⽂本,分词处理 file = codecs.open(u‘‘, ‘r‘) content = ad() file.close() segment = [] segs =
jieba.cut(content) # 使⽤jieba分词 for seg in segs: if len(seg) > 1 and seg != ‘\r\n‘: segment.append(seg) # 去停⽤词(⽂本去噪) words_df = pandas.DataFrame({‘segment‘: segment}) words_df.head() stopwords = ad_csv("", index_col=False, quoting=3, sep=‘\t‘, names=[‘stopword‘], encoding="utf8") words_df =
words_df[~words_df.segment.isin(stopwords.stopword)] # print(words_df.head(6)) # 词汇频率表 words_stat =
upby(by=[‘segment‘])[‘segment‘].agg({"count": numpy.size}) words_stat =
set_index().sort_values(by="count", ascending=False) # ⾃定义词云背景 bimg = imread(‘mangguo.png‘) wordcloud = WordCloud(background_color="white", mask=bimg, font_path=‘f‘) wordcloud =
wordcloud.fit_words(dict(words_stat.head(990000).itertuples(index=False))) # 从背景图⽚⽣成颜⾊值 bimgColors = ImageColorGenerator(bimg) plt.axis("off") plt.lor(color_func=bimgColors)) # plt.show()
<_file( "ciyun.png") if __name__ == ‘__main__‘: join_txt() sleep(2) print(‘txt ⽂件整合完成!----‘) make_pic() print(‘ 词云 图⽚⽣成 完成-----ciyun.png ‘) ‘‘‘
需要注意:
wordcloud = wordcloud.fit_words(dict(words_stat.head(990000).itertuples(index=False)))
这⾥接受的是⼀个 dict类型
>writeline和write的区别python
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系QQ:729038198,我们将在24小时内删除。
发表评论