perl爬虫
云舒
摘要:昨天阴沉了一天,闷在家里做了个简单的爬虫,主要是多线程、队列、Bloom Filter等的使用,算是个demo吧。今天终于下雪了,和LP出去走了走,很有感觉。
#!/usr/bin/perl
use strict;
use warnings;
use threads;
use threads::shared;
use Thread::Queue;
use Thread::Semaphore;
use Bloom::Filter;
use URI::URL;
use Web::Scraper;
my $max_threads = 30;
my $base_url = $ARGV[0] || http://www.icylife.net/;
my $host = URI->new($base_url)->host;
my $queue = Thread::Queue->new( );
my $semaphore = Thread::Semaphore->new( $max_threads );
my $filter = shared_clone( Bloom::Filter->new(capacity => 50000, error_rate => 0.001) );
$queue->enqueue( $base_url );
$filter->add( $base_url );
while( 1 )
{
# join all threads which can be joined
foreach ( threads->list(threads::joinable) )
{
$_->join( );
}
# if there are no url need process.
my $item = $queue->pending();
if( $item == 0 )
{
# there are no active thread, we finish the job
if( threads->list(threads::running) == 0 )
{
print "All done!n";
last;
}
# we will get some more url if there are some active threads, just wait for them
else
{
sleep 1;
next;
}
}
# if there are some url need process
while( $semaphore->down )
{
threads->create( &ProcessUrl );
}
}
# join all threads which can be joined
foreach ( threads->list() )
{
$_->join( );
}
sub ProcessUrl
{
my $scraper = scraper
{
process //a, links[] => @href;
};
my $res;
my $link;
while( my $url = $queue->dequeue_nb() )
{
$res = $scraper->scrape( URI->new($url) )->{links};
#print "there are ".scalar(threads->list(threads::running))." threads, ", $queue->pending(), " urls need process.n";
foreach( @{$res} )
{
$link = $_->as_string;
$link = URI::URL->new($link, $url);
# not http and not https?
next if( $link->scheme ne http && $link->scheme ne https );
# another domain?
next if( $link->host ne $host );
$link = $link->abs->as_string;
if( $link =~ /(.*?)#(.*)/ )
{
$link = $1;
}
next if( $link =~ /.(jpg|png|zip|rar|iso)$/i );
if( ! $filter->check($link) )
{
print $filter->key_count(), " ", $link, "n";
$filter->add($link);
$queue->enqueue($link);
}
}
}
$semaphore->up( );
}
补充:综合编程 , 安全编程 ,