数据结构化与保存

import requests
import re
import pandas
from bs4 import BeautifulSoup
from datetime import datetime
 
# 1. 将新闻的正文内容保存到文本文件。
def writeNewsDetail(content):
    f = open('gzccNews.txt', 'a',encoding='utf-8')
    f.write(content)
    f.close()
# 2. 将新闻数据结构化为字典的列
def getClickCount(newsUrl):
    newsId = re.search('\_(.*).html', newsUrl).group(1).split('/')[-1]
    clickUrl = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newsId)
    return(int(requests.get(clickUrl).text.split('.html')[-1].lstrip("('").rstrip("');")))
 
def getNewDetail(newsUrl):
    resd = requests.get(newsUrl)
    resd.encoding = 'utf-8'
    soupd = BeautifulSoup(resd.text, 'html.parser')
    news = {}
    news['title']=soupd.select('.show-title')[0].text
    info = soupd.select('.show-info')[0].text
    news['dt'] = datetime.strptime(info.lstrip('发布时间')[1:20], '%Y-%m-%d %H:%M:%S')
    if info.find('来源:') > 0:
        news['source'] = info[info.find('来源:'):].split()[0].lstrip('来源:')
    else:
        news['source'] = 'none'
    news['content'] = soupd.select('.show-content')[0].text.strip()
    news['click'] = getClickCount(newsUrl)
    news['newsUrl'] = newsUrl
    return (news)
 
 
def getListPage(pageUrl):
    res = requests.get(pageUrl)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    newsList = []
    for news in soup.select('li'):
        if len(news.select('.news-list-title')) > 0:
            newsUrl = news.select('a')[0].attrs['href'] #链接
            newsList.append(getNewDetail(newsUrl))
    return (newsList)
 
def getPageN():  # 新闻列表页的总页数
    res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')
    n = int(soup.select('.a1')[0].text.rstrip(''))
    return (n // 10 + 1)
 
newsTotal = []
fristPageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
newsTotal.extend(getListPage(fristPageUrl))
 
n = getPageN()
for i in range(n,n+1):
    listPageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
    newsTotal.extend(getListPage(listPageUrl))
 
# 3. 安装pandas,用pandas.DataFrame(newstotal),创建一个DataFrame对象df.
df = pandas.DataFrame(newsTotal)
# 4. 通过df将提取的数据保存到csv或excel 文件。
df.to_excel('416.xlsx')
 
# 5. 用pandas提供的函数和方法进行数据分析:
print(df[['click','title','source']].head(6))
print(df[(df['source'] == '学校综合办') | (df['click']>3000) ])
sourcelist = ['国际学院','学生工作处']
print(df[df['source'].isin(sourcelist)])
原文地址:https://www.cnblogs.com/cyn-kk/p/8858881.html