Python大作业

import requests
from bs4 import BeautifulSoup
import jieba
for i in range(2,10):
        pages = i;
        nexturl = 'http://www.shicimingju.com/chaxun/zuozhe/1_%s.html' % (pages)
        reslist = requests.get(nexturl)
        reslist.encoding = 'utf-8'
        soup_list = BeautifulSoup(reslist.text, 'html.parser')
        for news in soup_list.find_all('div',class_='summary'):
            print(news.text)
            f = open('123.txt', 'a', encoding='utf-8')
            f.write(news.text)
            f.close()
def changeTitleToDict():
    f = open("123.txt", "r", encoding='utf-8')
    str = f.read()
    stringList = list(jieba.cut(str))
    delWord = {"+", "/", "(", ")", "【", "】", " ", ";", "!", "、"}
    stringSet = set(stringList) - delWord
    title_dict = {}
    for i in stringSet:
        title_dict[i] = stringList.count(i)
    print(title_dict)
    return title_dict

  先把代码弄出来

然后就开始生成词云

from PIL import Image,ImageSequence
import numpy as np
import matplotlib.pyplot as plt
from wordcloud import WordCloud,ImageColorGenerator
# 获取上面保存的字典
title_dict = changeTitleToDict()
graph = np.array(title_dict)
font = r'C:WindowsFontssimhei.ttf'
wc = WordCloud(background_color='white',max_words=500,font_path=font)
wc.generate_from_frequencies(title_dict)
plt.imshow(wc)
plt.axis("off")
plt.show()

  我选择的网站是李白的古诗网站,中间遇到很多问题,最大的莫过于找不到词,其次是词云的安装wordcloud总是失败。

找不到词在同学帮助下成功了,词云也是如此,通过安装包,而不是通过pycharm安装。

可见李白最喜欢三件东西:拒绝,百日,成仙

原文地址:https://www.cnblogs.com/phoenlix/p/8950220.html