一个实用的C#网页抓取类代码分享

一个实用的C# 网页抓取类 模拟蜘蛛,类中定义了超多的C#采集文章、网页抓取文章的基础技巧,下面分享代码:
using System;
using System.Data;
using System.Configuration;
using System.Net;
using System.IO;
using System.Text;
using System.Collections.Generic;
using System.Text.RegularExpressions;
using System.Threading;
using System.Web;
namespace MyWebPage
{
    public class Link
    {
        public string Text
        {
            get;
            set;
        }
        public string NavigateUrl
        {
            get;
            set;
        }
    }
    /// <summary>
    /// 网页类
    /// </summary>
    public class WebPage
    {
        #region 私有成员
        private Uri m_uri;   //url
        private List<Link> m_links=new List<Link>();    //此网页上的链接
        private string m_title="";        //标题
        private string m_html="";         //HTML代码
        private string m_outstr="";       //网页可输出的纯文本
        private bool m_good;           //网页是否可用
        private int m_pagesize;       //网页的大小
        private static Dictionary<string, CookieContainer> webcookies = new Dictionary<string, CookieContainer>();//存放所有网页的Cookie
        #endregion
        #region 属性
        /// <summary>
        /// 通过此属性可获得本网页的网址,只读
        /// </summary>
        public string URL
        {
            get
            {
                return m_uri.AbsoluteUri;
            }
        }
        /// <summary>
        /// 通过此属性可获得本网页的标题,只读
        /// </summary>
        public string Title
        {
            get
            {
                if (m_title == "")
                {
                    Regex reg = new Regex(@"(?m)<title[^>]*>(?<title>(?:w|W)*?)</title[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase);
                    Match mc = reg.Match(m_html);
                    if (mc.Success)
                        m_title = mc.Groups["title"].Value.Trim();
                }
                return m_title;
            }
        }
        public string M_html
        {
            get
            {
                if (m_html == null)
                {
                    m_html = "";
                }
                return m_html;
            }
        }
        /// <summary>
        /// 此属性获得本网页的所有链接信息,只读
        /// </summary>
        public List<Link> Links
        {
            get
            {
                if (m_links.Count == 0) getLinks();
                return m_links;
            }
        }
        /// <summary>
        /// 此属性返回本网页的全部纯文本信息,只读
        /// </summary>
        public string Context
        {
            get
            {
                if (m_outstr == "") getContext(Int16.MaxValue);
                return m_outstr;
            }
        }
        /// <summary>
        /// 此属性获得本网页的大小
        /// </summary>
        public int PageSize
        {
            get
            {
                return m_pagesize;
            }
        }
        /// <summary>
        /// 此属性获得本网页的所有站内链接
        /// </summary>
        public List<Link> InsiteLinks
        {
            get
            {
                return getSpecialLinksByUrl("^http://" + m_uri.Host, Int16.MaxValue);
            }
        }
        /// <summary>
        /// 此属性表示本网页是否可用
        /// </summary>
        public bool IsGood
        {
            get
            {
                return m_good;
            }
        }
        /// <summary>
        /// 此属性表示网页的所在的网站
        /// </summary>
        public string Host
        {
            get
            {
                return m_uri.Host;
            }
        }
        #endregion
        /// <summary>
        /// 从HTML代码中分析出链接信息
        /// </summary>
        /// <returns>List<Link></returns>
        private List<Link> getLinks()
        {
            if (m_links.Count == 0)
            {
                Regex[] regex = new Regex[2];
                regex[0] = new Regex(@"<ashrefs*=""(?<URL>[^""]*).*?>(?<title>[^<]*)</a>", RegexOptions.IgnoreCase | RegexOptions.Singleline);
                regex[1] = new Regex("<[i]*frame[^><]+src=("|')?(?<url>([^>"'\s)])+)("|')?[^>]*>", RegexOptions.IgnoreCase);
                for (int i = 0; i < 2; i++)
                {
                    Match match = regex[i].Match(m_html);
                    while (match.Success)
                    {
                        try
                        {
                            string url = HttpUtility.UrlDecode(new Uri(m_uri, match.Groups["URL"].Value).AbsoluteUri);
                            string text = "";
                            if (i == 0) text = new Regex("(<[^>]+>)|(\s)|(?)|&|"", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(match.Groups["text"].Value, "");
                            Link link = new Link();
                            link.Text = text;
                            link.NavigateUrl = url;
                            m_links.Add(link);
                        }
                        catch (Exception ex) { Console.WriteLine(ex.Message); };
                        match = match.NextMatch();
                    }
                }
            }
            return m_links;
        }
        /// <summary>
        /// 此私有方法从一段HTML文本中提取出一定字数的纯文本
        /// </summary>
        /// <param name="instr">HTML代码</param>
        /// <param name="firstN">提取从头数多少个字</param>
        /// <param name="withLink">是否要链接里面的字</param>
        /// <returns>纯文本</returns>
        private string getFirstNchar(string instr, int firstN, bool withLink)
        {
            if (m_outstr == "")
            {
                m_outstr = instr.Clone() as string;
                m_outstr = new Regex(@"(?m)<script[^>]*>(w|W)*?</script[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
                m_outstr = new Regex(@"(?m)<style[^>]*>(w|W)*?</style[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
                m_outstr = new Regex(@"(?m)<select[^>]*>(w|W)*?</select[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
                if (!withLink) m_outstr = new Regex(@"(?m)<a[^>]*>(w|W)*?</a[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
                Regex objReg = new System.Text.RegularExpressions.Regex("(<[^>]+?>)|?", RegexOptions.Multiline | RegexOptions.IgnoreCase);
                m_outstr = objReg.Replace(m_outstr, "");
                Regex objReg2 = new System.Text.RegularExpressions.Regex("(\s)+", RegexOptions.Multiline | RegexOptions.IgnoreCase);
                m_outstr = objReg2.Replace(m_outstr, " ");
            }
            return m_outstr.Length > firstN ? m_outstr.Substring(0, firstN) : m_outstr;
        }
        #region 公有文法
        /// <summary>
        /// 此公有方法提取网页中一定字数的纯文本,包括链接文字
        /// </summary>
        /// <param name="firstN">字数</param>
        /// <returns></returns>
        public string getContext(int firstN)
        {
            return getFirstNchar(m_html, firstN, true);
        }
        /// <summary>
        /// 此公有方法从本网页的链接中提取一定数量的链接,该链接的URL满足某正则式
        /// </summary>
        /// <param name="pattern">正则式</param>
        /// <param name="count">返回的链接的个数</param>
        /// <returns>List<Link></returns>
        public List<Link> getSpecialLinksByUrl(string pattern, int count)
        {
            if (m_links.Count == 0) getLinks();
            List<Link> SpecialLinks = new List<Link>();
            List<Link>.Enumerator i;
            i = m_links.GetEnumerator();
            int cnt = 0;
            while (i.MoveNext() && cnt < count)
            {
                if (new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase).Match(i.Current.NavigateUrl).Success)
                {
                    SpecialLinks.Add(i.Current);
                    cnt++;
                }
            }
            return SpecialLinks;
        }
        /// <summary>
        /// 此公有方法从本网页的链接中提取一定数量的链接,该链接的文字满足某正则式
        /// </summary>
        /// <param name="pattern">正则式</param>
        /// <param name="count">返回的链接的个数</param>
        /// <returns>List<Link></returns>
        public List<Link> getSpecialLinksByText(string pattern, int count)
        {
            if (m_links.Count == 0) getLinks();
            List<Link> SpecialLinks = new List<Link>();
            List<Link>.Enumerator i;
            i = m_links.GetEnumerator();
            int cnt = 0;
            while (i.MoveNext() && cnt < count)
            {
                if (new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase).Match(i.Current.Text).Success)
                {
                    SpecialLinks.Add(i.Current);
                    cnt++;
                }
            }
            return SpecialLinks;
        }
        /// <summary>
        /// 这公有方法提取本网页的纯文本中满足某正则式的文字
        /// </summary>
        /// <param name="pattern">正则式</param>
        /// <returns>返回文字</returns>
        public string getSpecialWords(string pattern)
        {
            if (m_outstr == "") getContext(Int16.MaxValue);
            Regex regex = new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase);
            Match mc = regex.Match(m_outstr);
            if (mc.Success)
                return mc.Groups[1].Value;
            return string.Empty;
        }
        #endregion
        #region 构造函数
        private void Init(string _url)
        {
            try
            {
                m_uri = new Uri(_url);
                m_links = new List<Link>();
                m_html = "";
                m_outstr = "";
                m_title = "";
                m_good = true;
                if (_url.EndsWith(".rar") || _url.EndsWith(".dat") || _url.EndsWith(".msi"))
                {
                    m_good = false;
                    return;
                }
                HttpWebRequest rqst = (HttpWebRequest)WebRequest.Create(m_uri);
                rqst.AllowAutoRedirect = true;
                rqst.MaximumAutomaticRedirections = 3;
                rqst.UserAgent = "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)";
                rqst.KeepAlive = true;
                rqst.Timeout = 10000;
                lock (WebPage.webcookies)
                {
                    if (WebPage.webcookies.ContainsKey(m_uri.Host))
                        rqst.CookieContainer = WebPage.webcookies[m_uri.Host];
                    else
                    {
                        CookieContainer cc = new CookieContainer();
                        WebPage.webcookies[m_uri.Host] = cc;
                        rqst.CookieContainer = cc;
                    }
                }
                HttpWebResponse rsps = (HttpWebResponse)rqst.GetResponse();
                Stream sm = rsps.GetResponseStream();
                if (!rsps.ContentType.ToLower().StartsWith("text/") || rsps.ContentLength > 1 << 22)
                {
                    rsps.Close();
                    m_good = false;
                    return;
                }
                Encoding cding = System.Text.Encoding.Default;
                string contenttype = rsps.ContentType.ToLower();
                int ix = contenttype.IndexOf("charset=");
                if (ix != -1)
                {
                    try
                    {
                        cding = System.Text.Encoding.GetEncoding(rsps.ContentType.Substring(ix + "charset".Length + 1));
                    }
                    catch
                    {
                        cding = Encoding.Default;
                    }
                    //该处视情况而定 有的需要解码
                    //m_html = HttpUtility.HtmlDecode(new StreamReader(sm, cding).ReadToEnd());
                    m_html = new StreamReader(sm, cding).ReadToEnd();
                }
                else
                {
                    //该处视情况而定 有的需要解码
                    //m_html = HttpUtility.HtmlDecode(new StreamReader(sm, cding).ReadToEnd());
                    m_html = new StreamReader(sm, cding).ReadToEnd();
                    Regex regex = new Regex("charset=(?<cding>[^=]+)?"", RegexOptions.IgnoreCase);
                    string strcding = regex.Match(m_html).Groups["cding"].Value;
                    try
                    {
                        cding = Encoding.GetEncoding(strcding);
                    }
                    catch
                    {
                        cding = Encoding.Default;
                    }
                    byte[] bytes = Encoding.Default.GetBytes(m_html.ToCharArray());
                    m_html = cding.GetString(bytes);
                    if (m_html.Split('?').Length > 100)
                    {
                        m_html = Encoding.Default.GetString(bytes);
                    }
                }
                m_pagesize = m_html.Length;
                m_uri = rsps.ResponseUri;
                rsps.Close();
            }
            catch (Exception ex)
            {
            }
        }
        public WebPage(string _url)
        {
            string uurl = "";
            try
            {
                uurl = Uri.UnescapeDataString(_url);
                _url = uurl;
            }
            catch { };
            Init(_url);
        }
        #endregion
    }
    /// <summary>
    /// 主程序编写作者:大黑
    /// (WebPage类编写者另有其人,类Link的定义在他博文的留言区,这是他的博文地址http://blog.csdn.net/yysyangyangyangshan/article/details/6661886)
    /// 描述:由C#编写的多线程异步抓取网页的网络爬虫控制台程序
    /// 功能:目前只能提取网络链接,所用的两个记录文件并不需要很大。网页文本、图片、视频和html代码暂时不能抓取,请见谅。
    /// 但需要注意,网页的数目是非常庞大的,如下代码理论上大概可以把整个互联网网页链接都抓下来。
    /// 但事实上,由于处理器功能和网络条件(主要是网速)限制,一般的家用电脑最多能胜任12个线程左右的抓取任务,抓取速度有限。可以抓取,但需要时间和耐心。
    /// 当然,这个程序把所有链接抓下来是可能的,因为链接占系统空间并不多,而且有记录文件的帮助,已抓取网页的数量可以堆积下去,
    /// 甚至可以把所有的互联网网络链接都存取下来,当然,最好是分批次。建议设置maxNum为500-1000左右,慢慢累积下去。
    /// 另外因为是控制台程序,有时候显示字符过多会系统会暂停显示,这时候只要点击控制台按下回车键就可以了。程序假死的时候,可以按回车键(Enter)试试。
    /// 使用本程序,请确保已创建相应的记录文件,出于简化代码的考虑,本程序做的并不健壮,请见谅。
    /// 默认的文件创建在E盘根目录“已抓取网址.txt”和“待抓取网址.txt”这两个文本文件中,使用者需要自行创建这两个文件,注意后缀名不要搞错。
    /// 这两个文件里面的链接基本都是有效链接,可以单独处理使用。
    /// 本爬虫程序的速度如下:
    /// 10线程最快大概400个链接每分钟
    /// 6-8线程最快大概200-300个链接每分钟
    /// 2-4线程最快大概150-200个链接每分钟
    /// 单线程最快大概70-100个链接每分钟
    /// 之所以用多线程异步抓取完全是出于效率考虑,本程序多线程同步并不能带来速度的提升,只要抓取的网页不要太多重复和冗余就可以,异步并不意味着错误。
    /// </summary>
    class Program
    {
        //默认的种子网址,可以改动。尽量保持不同的网址,可以提高效率。
        //另外注意改的时候“,”符号要注意,别改错了,要英文逗号。
        //默认是3线程3个种子网址,最多12线程12个网址,如果要12个以上,可以自己在后面添加。线程设定为12个以上时,必须添加网址,不然会出错
        //每个网址对应一个线程,即6线程对应6网址,12线程对应12网址,改默认值的时候要注意parts和数组linkArray的对应关系。
        //实测当CPU是i5处理器时,最多运行8-10线程,会伴有严重的卡顿。线程数越高,对CPU要求越高。一般CPU 2-4线程就可以。
        static string[] linkArray = {"http://www.163.com/","http://www.sohu.com/","http://www.srcfans.com"};
        //设定存放已抓取网址列表的文件路径
        static string urlCapturedDir = @"E:已抓取网址.txt";
        //设定存放未抓取网址列表的文件路径
        static string myLinkDir = @"E:待抓取网址.txt";
        //同时抓取网址的线程数
        private static int parts=4;
        //目前抓取的网页数量
        private static int count=0;
        //设定的最多抓取网页的数量
        private static int maxNum=2000;
        //待抓取的网址链表
        private static List<string> myLink=new List<string>();
        //已抓取的网址链表
        private static List<string> urlCaptured=new List<string>();
        //用于临时存放待抓取的某一个网址
        private static string strLink = "";
        public Program(string link)
        {
             myLink.Add(link);
        }
        public void doWork()
        {
            Console.WriteLine("working...");
            while ((myLink.Count != 0) && (count < maxNum))
            {
                strLink = myLink[0];
                if (!urlCaptured.Contains(strLink))
                {
                    //加入已抓取队列
                    urlCaptured.Add(strLink);
                    //当前抓取的网页数量
                    count++;
                    //通过WebPage类抓取
                    WebPage tempWeb = new WebPage(strLink);
                    Console.WriteLine("网址" + strLink + "共有" + tempWeb.Links.Count + "个链接");
                    foreach (Link li in tempWeb.Links)
                    {
                        //确保不重复抓取
                        if (!myLink.Contains(li.NavigateUrl))
                            myLink.Add(li.NavigateUrl);
                    }
                    Console.WriteLine("待抓取网页的数目是:" + myLink.Count);
                    //确保不重复抓取,此句多线程运行会报错
                    while(myLink.Remove(strLink));
                }
                else
                {
                    //删除重复网址
                    while (myLink.Remove(strLink)) ;
                }
                Console.WriteLine("目前共抓取了" + count + "个网页");
            }
        }
        static void printLinks(List<string> links)
        {
            foreach(string s in  links)
            {
                Console.WriteLine(s);
            }
        }
        static void Main(string[] args)
        {
            //string seedUrl = "http://www.srcfans.com/";
            //WebPage webInfo = new WebPage(seedUrl);
            //Console.WriteLine( webInfo.Context);//不包含html标签的所有内容
            //Console.WriteLine( webInfo.M_html);//包含html标签的内容
            //urlCaptured.Add(seedUrl);
            //读取已处理网址列表(urlCaptured)
            string[] lines = System.IO.File.ReadAllLines(urlCapturedDir);
            int urlCapturedIndex = 0;
            foreach (string line in lines)
            {
                //if (!urlCaptured.Contains(line))
                {
                    urlCaptured.Add(line);
                    urlCapturedIndex++;
                    if(urlCapturedIndex%2000==0)
                    {
                        Console.WriteLine("已导入" + urlCapturedIndex + "项已抓取网址");
                    }
                }
            }
            // 显示已抓取网址列表
            Console.WriteLine("已读取已处理网址列表,共"+urlCaptured.Count+"条信息:");
            //foreach (string line in urlCaptured)
            //{
            //    Console.WriteLine(line);
            //}
            //Console.WriteLine("已处理网址列表显示完毕");
            //未抓取网址初始化
            string[] lines2 = System.IO.File.ReadAllLines(myLinkDir);
            int myLinkIndex = 0;
            foreach (string line in lines2)
            {
                //Contains方法太耗时
                //if (!myLink.Contains(line))
                {
                    myLink.Add(line);
                    myLinkIndex++;
                    if(myLinkIndex%2000==0)
                    {
                        Console.WriteLine("已导入" + myLinkIndex + "项待抓取网址");
                    }
                }
            }
            // 显示未抓取网址列表
            Console.WriteLine("已读取未抓取网址列表,共" + myLink.Count + "条信息:");
            //foreach (string line in myLink)
            //{
            //    Console.WriteLine(line);
            //}
            //Console.WriteLine("未抓取网址列表显示完毕");
            System.Threading.Thread[] threads = new System.Threading.Thread[parts];
            for (int i = 0; i < parts; i++)
            {
                    string str_l = linkArray[i];
                    Program p = new Program(str_l);
                    System.Threading.Thread t = new System.Threading.Thread(new System.Threading.ThreadStart(p.doWork));
                    threads[i] = t;
                    threads[i].Name = "Thread" + i.ToString();
            }
            for (int i = 0; i < parts; i++)
            {
                threads[i].Start();
            }
            //为了确保 Main 函数不会尝试在辅助线程有机会执行结束之前将它终止
            //Main 函数将一直循环,直到辅助线程对象的 IsAlive 属性设置为 false
            //必须用while,表示线程都已经执行完了
            for (int i = 0; i < parts; i++)
            {
                while (threads[i].IsAlive)
                {
                    ;
                }
            }
            Console.WriteLine("一共抓取了" + urlCaptured.Count + "个网址");
            Console.WriteLine("待处理的网址共有:" + myLink.Count + "个");
            Console.WriteLine("检查重复项....");
            List<string> temp_l = new List<string>();
            foreach (string str in urlCaptured)
            {
                if (!temp_l.Contains(str))
                {
                    temp_l.Add(str);
                }
                else
                {
                    Console.WriteLine("网址" + str + "重复");
                }
            }
            Console.WriteLine("共有" + (urlCaptured.Count - temp_l.Count) + "项是重复网址");
            urlCaptured = temp_l;
            Console.WriteLine("已抓取网址写入文件...");
            //对已抓取的网址进行排序
            urlCaptured.Sort();
            using (System.IO.StreamWriter file =
            new System.IO.StreamWriter(urlCapturedDir))
            {
                foreach(string s in urlCaptured)
                {
                    file.WriteLine(s);
                }
            }
            Console.WriteLine("已抓取网址写入完成");
            Console.WriteLine("待处理网址写入文件...");
            myLink.Sort();
            using (System.IO.StreamWriter file =
            new System.IO.StreamWriter(myLinkDir))
            {
                foreach (string s in myLink)
                {
                    file.WriteLine(s);
                }
            }
            Console.WriteLine("待抓取网址写入完成");
            //下面的句子表示保持显示当前控制台命令行,不然控制台会一闪而过
            Console.WriteLine("按任意键退出...");
            Console.ReadKey();
        }
    }
}

  

原文地址:https://www.cnblogs.com/jianghuluanke/p/7533320.html