C Realizes Web Page Crawling Class Instance Based on Regular Expression to Get All Information in Web Page

  • 2021-12-13 16:44:19
  • OfStack

In this paper, an example of C # based on the implementation of regular expressions to obtain all the information in the web page crawling class. Share it for your reference, as follows:

The code for the


using System;
using System.Data;
using System.Configuration;
using System.Net;
using System.IO;
using System.Text;
using System.Collections.Generic;
using System.Text.RegularExpressions;
using System.Threading;
using System.Web;
using System.Web.UI.MobileControls;
/// <summary>
///  Web page class 
/// </summary>
public class WebPage
{
    #region  Private member 
    private Uri m_uri;  //url
    private List<Link> m_links;  // Links on this page 
    private string m_title;    // Title 
    private string m_html;     //HTML Code 
    private string m_outstr;    // Plain text that can be output from a web page 
    private bool m_good;      // Availability of Web Page 
    private int m_pagesize;    // Size of Web Page 
    private static Dictionary<string, CookieContainer> webcookies = new Dictionary<string, CookieContainer>();// Object for storing all web pages Cookie
    #endregion
    #region  Attribute 
    /// <summary>
    ///  Use this property to get the URL of this page, read only 
    /// </summary>
    public string URL
    {
      get
      {
        return m_uri.AbsoluteUri;
      }
    }
    /// <summary>
    ///  Use this property to get the title of this page, read-only 
    /// </summary>
    public string Title
    {
      get
      {
        if (m_title == "")
        {
          Regex reg = new Regex(@"(?m)<title[^>]*>(?<title>(?:\w|\W)*?)</title[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase);
          Match mc = reg.Match(m_html);
          if (mc.Success)
            m_title = mc.Groups["title"].Value.Trim();
        }
        return m_title;
      }
    }
    public string M_html
    {
      get
      {
        if (m_html == null)
        {
          m_html = "";
        }
        return m_html;
      }
    }
    /// <summary>
    ///  This property gets all link information for this page, read-only 
    /// </summary>
    public List<Link> Links
    {
      get
      {
        if (m_links.Count == 0) getLinks();
        return m_links;
      }
    }
    /// <summary>
    ///  This property returns all plain text information for this page, read-only 
    /// </summary>
    public string Context
    {
      get
      {
        if (m_outstr == "") getContext(Int16.MaxValue);
        return m_outstr;
      }
    }
    /// <summary>
    ///  This property gets the size of this page 
    /// </summary>
    public int PageSize
    {
      get
      {
        return m_pagesize;
      }
    }
    /// <summary>
    ///  This property gets all in-site links to this page 
    /// </summary>
    public List<Link> InsiteLinks
    {
      get
      {
        return getSpecialLinksByUrl("^http://" + m_uri.Host, Int16.MaxValue);
      }
    }
    /// <summary>
    ///  This property indicates whether this page is available 
    /// </summary>
    public bool IsGood
    {
      get
      {
        return m_good;
      }
    }
    /// <summary>
    ///  This property represents the Web site where the Web page is located 
    /// </summary>
    public string Host
    {
      get
      {
        return m_uri.Host;
      }
    }
    #endregion
    /// <summary>
    ///  From HTML Link information is analyzed in the code 
    /// </summary>
    /// <returns>List<Link></returns>
    private List<Link> getLinks()
    {
      if (m_links.Count == 0)
      {
        Regex[] regex = new Regex[2];
        regex[0] = new Regex(@"<a\shref\s*=""(?<URL>[^""]*).*?>(?<title>[^<]*)</a>", RegexOptions.IgnoreCase | RegexOptions.Singleline);
        regex[1] = new Regex("<[i]*frame[^><]+src=(\"|')?(?<url>([^>\"'\\s)])+)(\"|')?[^>]*>", RegexOptions.IgnoreCase);
        for (int i = 0; i < 2; i++)
        {
          Match match = regex[i].Match(m_html);
          while (match.Success)
          {
            try
            {
              string url = HttpUtility.UrlDecode(new Uri(m_uri, match.Groups["URL"].Value).AbsoluteUri);
              string text = "";
              if (i == 0) text = new Regex("(<[^>]+>)|(\\s)|( )|&|\"", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(match.Groups["text"].Value, "");
              Link link = new Link();
              link.Text = text;
              link.NavigateUrl = url;
              m_links.Add(link);
            }
            catch (Exception ex) { Console.WriteLine(ex.Message); };
            match = match.NextMatch();
          }
        }
      }
      return m_links;
    }
    /// <summary>
    ///  This private method derives from the 1 Segment HTML Extracted from the text 1 Plain text with definite number of words 
    /// </summary>
    /// <param name="instr">HTML Code </param>
    /// <param name="firstN"> How many words are extracted from the head </param>
    /// <param name="withLink"> Do you want to link the words inside </param>
    /// <returns> Plain text </returns>
    private string getFirstNchar(string instr, int firstN, bool withLink)
    {
      if (m_outstr == "")
      {
        m_outstr = instr.Clone() as string;
        m_outstr = new Regex(@"(?m)<script[^>]*>(\w|\W)*?</script[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
        m_outstr = new Regex(@"(?m)<style[^>]*>(\w|\W)*?</style[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
        m_outstr = new Regex(@"(?m)<select[^>]*>(\w|\W)*?</select[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
        if (!withLink) m_outstr = new Regex(@"(?m)<a[^>]*>(\w|\W)*?</a[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
        Regex objReg = new System.Text.RegularExpressions.Regex("(<[^>]+?>)| ", RegexOptions.Multiline | RegexOptions.IgnoreCase);
        m_outstr = objReg.Replace(m_outstr, "");
        Regex objReg2 = new System.Text.RegularExpressions.Regex("(\\s)+", RegexOptions.Multiline | RegexOptions.IgnoreCase);
        m_outstr = objReg2.Replace(m_outstr, " ");
      }
      return m_outstr.Length > firstN ? m_outstr.Substring(0, firstN) : m_outstr;
    }
    #region  Public grammar 
    /// <summary>
    ///  This public method extracts the 1 Fixed-word plain text, including linked text 
    /// </summary>
    /// <param name="firstN"> Word count </param>
    /// <returns></returns>
    public string getContext(int firstN)
    {
      return getFirstNchar(m_html, firstN, true);
    }
    /// <summary>
    ///  This public method extracts from the links on this page 1 A fixed number of links, the URL Satisfy a regular formula 
    /// </summary>
    /// <param name="pattern"> Regular formula </param>
    /// <param name="count"> Number of links returned </param>
    /// <returns>List<Link></returns>
    public List<Link> getSpecialLinksByUrl(string pattern, int count)
    {
      if (m_links.Count == 0) getLinks();
      List<Link> SpecialLinks = new List<Link>();
      List<Link>.Enumerator i;
      i = m_links.GetEnumerator();
      int cnt = 0;
      while (i.MoveNext() && cnt < count)
      {
        if (new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase).Match(i.Current.NavigateUrl).Success)
        {
          SpecialLinks.Add(i.Current);
          cnt++;
        }
      }
      return SpecialLinks;
    }
    /// <summary>
    ///  This public method extracts from the links on this page 1 A fixed number of links whose text satisfies a regular formula 
    /// </summary>
    /// <param name="pattern"> Regular formula </param>
    /// <param name="count"> Number of links returned </param>
    /// <returns>List<Link></returns>
    public List<Link> getSpecialLinksByText(string pattern, int count)
    {
      if (m_links.Count == 0) getLinks();
      List<Link> SpecialLinks = new List<Link>();
      List<Link>.Enumerator i;
      i = m_links.GetEnumerator();
      int cnt = 0;
      while (i.MoveNext() && cnt < count)
      {
        if (new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase).Match(i.Current.Text).Success)
        {
          SpecialLinks.Add(i.Current);
          cnt++;
        }
      }
      return SpecialLinks;
    }
    /// <summary>
    ///  This public method extracts the text that satisfies a regular form from the plain text of this webpage  by  He asked 
    /// </summary>
    /// <param name="pattern"> Regular formula </param>
    /// <returns> Return text </returns>
    public string getSpecialWords(string pattern)
    {
      if (m_outstr == "") getContext(Int16.MaxValue);
      Regex regex = new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase);
      Match mc = regex.Match(m_outstr);
      if (mc.Success)
        return mc.Groups[1].Value;
      return string.Empty;
    }
    #endregion
    #region  Constructor 
    private void Init(string _url)
    {
      try
      {
        m_uri = new Uri(_url);
        m_links = new List<Link>();
        m_html = "";
        m_outstr = "";
        m_title = "";
        m_good = true;
        if (_url.EndsWith(".rar") || _url.EndsWith(".dat") || _url.EndsWith(".msi"))
        {
          m_good = false;
          return;
        }
        HttpWebRequest rqst = (HttpWebRequest)WebRequest.Create(m_uri);
        rqst.AllowAutoRedirect = true;
        rqst.MaximumAutomaticRedirections = 3;
        rqst.UserAgent = "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)";
        rqst.KeepAlive = true;
        rqst.Timeout = 10000;
        lock (WebPage.webcookies)
        {
          if (WebPage.webcookies.ContainsKey(m_uri.Host))
            rqst.CookieContainer = WebPage.webcookies[m_uri.Host];
          else
          {
            CookieContainer cc = new CookieContainer();
            WebPage.webcookies[m_uri.Host] = cc;
            rqst.CookieContainer = cc;
          }
        }
        HttpWebResponse rsps = (HttpWebResponse)rqst.GetResponse();
        Stream sm = rsps.GetResponseStream();
        if (!rsps.ContentType.ToLower().StartsWith("text/") || rsps.ContentLength > 1 << 22)
        {
          rsps.Close();
          m_good = false;
          return;
        }
        Encoding cding = System.Text.Encoding.Default;
        string contenttype = rsps.ContentType.ToLower();
        int ix = contenttype.IndexOf("charset=");
        if (ix != -1)
        {
          try
          {
            cding = System.Text.Encoding.GetEncoding(rsps.ContentType.Substring(ix + "charset".Length + 1));
          }
          catch
          {
            cding = Encoding.Default;
          }
          // The office is subject to the situation   Some need to be decoded 
          //m_html = HttpUtility.HtmlDecode(new StreamReader(sm, cding).ReadToEnd());
          m_html = new StreamReader(sm, cding).ReadToEnd();
        }
        else
        {
         // The office is subject to the situation   Some need to be decoded 
          //m_html = HttpUtility.HtmlDecode(new StreamReader(sm, cding).ReadToEnd());
          m_html = new StreamReader(sm, cding).ReadToEnd();
          Regex regex = new Regex("charset=(?<cding>[^=]+)?\"", RegexOptions.IgnoreCase);
          string strcding = regex.Match(m_html).Groups["cding"].Value;
          try
          {
            cding = Encoding.GetEncoding(strcding);
          }
          catch
          {
            cding = Encoding.Default;
          }
          byte[] bytes = Encoding.Default.GetBytes(m_html.ToCharArray());
          m_html = cding.GetString(bytes);
          if (m_html.Split('?').Length > 100)
          {
            m_html = Encoding.Default.GetString(bytes);
          }
        }
        m_pagesize = m_html.Length;
        m_uri = rsps.ResponseUri;
        rsps.Close();
      }
      catch (Exception ex)
      {
      }
    }
    public WebPage(string _url)
    {
      string uurl = "";
      try
      {
        uurl = Uri.UnescapeDataString(_url);
        _url = uurl;
      }
      catch { };
      Init(_url);
    }
    #endregion
}

Call:


WebPage webInfo = new WebPage("http://hovertree.net/");
webInfo.Context;// Does not contain html All the contents of the label 
webInfo.M_html;// Include html Contents of the label  by  He asked 

PS: Here are two very convenient regular expression tools for your reference:

JavaScript Regular Expression Online Test Tool:
http://tools.ofstack.com/regex/javascript

Regular expression online generation tool:
http://tools.ofstack.com/regex/create_reg

For more readers interested in C # related content, please check the topics on this site: "C # Regular Expression Usage Summary", "C # Coding Operation Skills Summary", "XML File Operation Skills Summary in C #", "C # Common Control Usage Tutorial", "WinForm Control Usage Summary", "C # Data Structure and Algorithm Tutorial", "C # Object-Oriented Programming Introduction Tutorial" and "C # Programming Thread Use Skills Summary"

I hope this article is helpful to everyone's C # programming.


Related articles: