一个简单的爬虫case1

目标是把这里的所有东西下载下来:
http://ftp.sjtu.edu.cn/archlinux/core/os/x86_64/

直接看代码吧,也没啥好说的。

import requests
from bs4 import BeautifulSoup
import os


url = 'http://ftp.sjtu.edu.cn/archlinux/core/os/x86_64/'
r = requests.get(url)
root = './crawl'
soup = BeautifulSoup(r.text,"html.parser")
count = 0
if not os.path.exists(root):
    os.mkdir(root)
for link in soup.find_all("a"):
    name = link.get('href')
    path = root + '/'+ name
    if not os.path.exists(path):
        r = requests.get(url + name)
        with open(path, "wb") as f:
            f.write(r.content)
            f.close()
    count += 1
    print(count)
print("finished")
原文地址:https://www.cnblogs.com/bernieloveslife/p/10051264.html