perl清理电脑上重复的文件续

续上一篇,利用PERL搜索电脑上重复文件。全源代码:
use File::DirWalk;
use File::Basename;
use Data::Dumper;
use warnings;
use strict;
my$dw= new File::DirWalk;
my%files;

fileparse_set_fstype(
"MSWin32");
$dw->onFile(
sub {
my ($file) =@_;
push @{$files{basename($file)}->{"Paths"}},$file;
$files{basename($file)}->{"Num"} +=1;
return File::DirWalk::SUCCESS;
}
);

my$hTrace;
open$hTrace,'> Trace.txt';
select$hTrace;

$dw->walk('D:/old/perl');

my@newFiles;
while( my ($k,$v) =each%files)
{
if($v->{"Num"} >1)
{
#print $k."\n";
#print Dumper($v);


push@newFiles,
{
"Name"=>$k,
"Paths"=>$v->{"Paths"},
"Num"=>$v->{"Num"}
}
}
}

#print Dumper(@newFiles);
@newFiles=sort {($a->{"Num"}) <=> ($b->{"Num"})} @newFiles;

print Dumper(@newFiles);
close$hTrace;
原文地址:https://www.cnblogs.com/hurner/p/1986143.html