《Python网络数据采集》笔记之采集

一 遍历单个域名

import random
from urllib.request import urlopen

import re

import datetime
from bs4 import  BeautifulSoup



random.seed(datetime.datetime.now())
def getLinks(articleurl):
    html = urlopen("https://en.wikipedia.org"+articleurl)
    bsobj = BeautifulSoup(html)
    return bsobj.find("div",{"id":"bodyContent"}).findAll("a",href=re.compile("^(/wiki/)(?!:)"))

links = getLinks("/wiki/Kevin_Bacon")

while len(links) > 0:
    newArticle = links[random.randint(0,len(links)-1)].attrs["href"]
    print(newArticle)
    links = getLinks(newArticle)

二  采集整个网络

from urllib.request import urlopen
from bs4 import BeautifulSoup
import  re



pages = set()
def getLinks(pageUrl):
    global pages
    html = urlopen("https://en.wikipedia.org" + pageUrl)
    bsobj = BeautifulSoup(html)
    for link in bsobj.findAll("a",href=re.compile("^(/wiki/)")):
        if 'href' in link.attrs:
            if link.attrs['href'] not in pages:
                #遇到新的页面
                newPage = link.attrs['href']
                print(newPage)
                pages.add(newPage)
                getLinks(newPage)

getLinks("")

三 收集整个网站的数据

from urllib.request import urlopen
from bs4 import BeautifulSoup
import re

pages = set()

def getLinks(pageUrl):
    global pages
    html = urlopen("https://en.wikipedia.org" + pageUrl)
    bsobj = BeautifulSoup(html)
    try:
        print(bsobj.h1.get_text())

        print(bsobj.find(id="mw-content-text").findAll("p")[0])
        # print(bsobj.find(id="ca-edit").find("span").find("a").attrs['href'])
    except AttributeError as e:
        print(e)
        print("页面少了一些属性,不用担心")

    for link in bsobj.findAll("a",href=re.compile("^(/wiki/)")):
        if 'href' in link.attrs:
            if link.attrs['href'] not in pages:
                newPage = link.attrs['href']
                print("
")
                print(newPage)
                pages.add(newPage)
                getLinks(newPage)


getLinks("")

四 解析JSON数据

import json
from urllib.request import urlopen
def getCountry(ipAddress):
    response = urlopen("http://freegeoip.net/json/"+ipAddress).read() .decode('utf-8')
    responseJson = json.loads(response)
    return responseJson["country_code"]

print(getCountry("50.78.253.58"))

五 根据文件的url下载文件

from urllib.request import urlretrieve
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com")
bsObj = BeautifulSoup(html)
imageLocation = bsObj.find("a", {"id": "logo"}).find("img")["src"]
urlretrieve (imageLocation, "logo.jpg")

 

原文地址:https://www.cnblogs.com/xiangshigang/p/7229295.html