远程下载文件并设置进度显示

 第一种方式用urlretrieve下载:

def Schedule(a, b, c):
    """
    进度条显示
    :param a:已经下载的数据块
    :param b:数据块的大小
    :param c:远程文件的大小
    :return:
    """
    per = 100.0 * a * b / c
    if per > 100:
        per = 100
    sys.stdout.write('
')
    sys.stdout.write('		%.2f%% - 已下载的大小:%d - 文件大小:%d' % (per, a * b, c))
    sys.stdout.flush()
    time.sleep(0.5)


def run():
    request.urlretrieve('https://***', 'ttt', Schedule)

但是urlretrieve下载大文件很慢,还总是出错:

fname = 'data/%s' % u.split('/')[-1]
if os.path.exists(fname):
    print('	***文件已存在:', u, time.strftime('%Y-%m-%d %H:%M:%S'))
    continue
print('
	>>>开始下载文件:', u, time.strftime('%Y-%m-%d %H:%M:%S'))
try:
    # request.urlretrieve(u, fname, Schedule)

    response = request.urlopen(parse.quote(u, safe=':/'))
    chunk_size = 16 * 1024
    n = 0
    c = response.length
    t = c / chunk_size
    t = int(t) + 1 if int(t) < t else int(t)
    a = 0
    with open(fname, 'wb') as f:
        while True:
            n += 1
            chunk = response.read(chunk_size)
            if not chunk:
                break
            f.write(chunk)
            a += len(chunk)
            if n == 1 or n % 50 == 0 or n == t:
                per = 100.0 * a / c
                if per > 100:
                    per = 100

                sys.stdout.write('
')
                sys.stdout.write('		%.2f%% - 已下载的大小:%d - 文件大小:%d' % (per, a, c))
                sys.stdout.flush()


except Exception as e:
    print('
	>>>文件下载失败:', u, time.strftime('%Y-%m-%d %H:%M:%S'))
    traceback.print_exc()
    continue

参考:https://www.cnblogs.com/rkfeng/p/8366327.html

原文地址:https://www.cnblogs.com/haoxr/p/9007476.html