from pyquery import PyQuery as pq import matplotlib as plt from wordcloud import wordcloud from PIL import Image import numpy as np import jieba
with open("1.txt", "r") as fp: strsuddle = fp.read().encode("utf-8") #读取内容 cut_text = jieba.cut(strsuddle.decode("utf8"))#使用分词工具进行分词 allword= ' '.join(cut_text) wordpic = wordcloud.WordCloud( background_color='white', # 背景颜色 width=1000, height=600, stopwords=["的","第一节","第二节","第三节","第四节","第一章","第二章","第三章","等","为","大","新","及","最","区","将"],#所要忽略的词 font_path="msyh.ttf", #文字库 避免家在出来出现乱码 max_font_size=50, # 字体大小 min_font_size=10, max_words=1000)
wordpic.generate(allword) image = wordpic.to_image()#绘图 image.show() # 显示词云