Perl网页爬虫

这代码还没来的急分析,这几天忙一些事情,先mark下官方的代码,等忙完这几天我就好好看看!

#!/usr/bin/perl

use LWP::UserAgent;
use HTML::LinkExtor;
use URI::URL;

$url = "http://www.163.com";  # for instance
$ua = LWP::UserAgent->new;

  # Set up a callback that collect image links
my @imgs = ();
sub callback {
     my($tag, %attr) = @_;
     return if $tag ne 'a';  # we only look closer at <img ...>
     push(@imgs, values %attr);
}

  # Make the parser.  Unfortunately, we don't know the base yet
  # (it might be different from $url)
$p = HTML::LinkExtor->new(\&callback);

  # Request document and parse it as it arrives
$res = $ua->request(HTTP::Request->new(GET => $url),
                      sub {$p->parse($_[0])});

  # Expand all image URLs to absolute ones
my $base = $res->base;
@imgs = map { $_ = url($_, $base)->abs; } @imgs;

  # Print them out
print join("\n", @imgs), "\n";
原文地址:https://www.cnblogs.com/xiaoCon/p/3061967.html