[转]正则表达式相关:C# 抓取网页类(获取网页中所有信息)
- using System;
- using System.Data;
- using System.Configuration;
- using System.Net;
- using System.IO;
- using System.Text;
- using System.Collections.Generic;
- using System.Text.RegularExpressions;
- using System.Threading;
- using System.Web;
- using System.Web.UI.MobileControls;
- /// <summary>
- /// 网页类
- /// </summary>
- public class WebPage
- {
- #region 私有成员
- private Uri m_uri; //url
- private List<Link> m_links; //此网页上的链接
- private string m_title; //标题
- private string m_html; //HTML代码
- private string m_outstr; //网页可输出的纯文本
- private bool m_good; //网页是否可用
- private int m_pagesize; //网页的大小
- private static Dictionary<string, CookieContainer> webcookies = new Dictionary<string, CookieContainer>();//存放所有网页的Cookie
- #endregion
- #region 属性
- /// <summary>
- /// 通过此属性可获得本网页的网址,只读
- /// </summary>
- public string URL
- {
- get
- {
- return m_uri.AbsoluteUri;
- }
- }
- /// <summary>
- /// 通过此属性可获得本网页的标题,只读
- /// </summary>
- public string Title
- {
- get
- {
- if (m_title == "")
- {
- Regex reg = new Regex(@"(?m)<title[^>]*>(?<title>(?:\w|\W)*?)</title[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase);
- Match mc = reg.Match(m_html);
- if (mc.Success)
- m_title = mc.Groups["title"].Value.Trim();
- }
- return m_title;
- }
- }
- public string M_html
- {
- get
- {
- if (m_html == null)
- {
- m_html = "";
- }
- return m_html;
- }
- }
- /// <summary>
- /// 此属性获得本网页的所有链接信息,只读
- /// </summary>
- public List<Link> Links
- {
- get
- {
- if (m_links.Count == 0) getLinks();
- return m_links;
- }
- }
- /// <summary>
- /// 此属性返回本网页的全部纯文本信息,只读
- /// </summary>
- public string Context
- {
- get
- {
- if (m_outstr == "") getContext(Int16.MaxValue);
- return m_outstr;
- }
- }
- /// <summary>
- /// 此属性获得本网页的大小
- /// </summary>
- public int PageSize
- {
- get
- {
- return m_pagesize;
- }
- }
- /// <summary>
- /// 此属性获得本网页的所有站内链接
- /// </summary>
- public List<Link> InsiteLinks
- {
- get
- {
- return getSpecialLinksByUrl("^http://" + m_uri.Host, Int16.MaxValue);
- }
- }
- /// <summary>
- /// 此属性表示本网页是否可用
- /// </summary>
- public bool IsGood
- {
- get
- {
- return m_good;
- }
- }
- /// <summary>
- /// 此属性表示网页的所在的网站
- /// </summary>
- public string Host
- {
- get
- {
- return m_uri.Host;
- }
- }
- #endregion
- /// <summary>
- /// 从HTML代码中分析出链接信息
- /// </summary>
- /// <returns>List<Link></returns>
- private List<Link> getLinks()
- {
- if (m_links.Count == 0)
- {
- Regex[] regex = new Regex[2];
- regex[0] = new Regex(@"<a\shref\s*=""(?<URL>[^""]*).*?>(?<title>[^<]*)</a>", RegexOptions.IgnoreCase | RegexOptions.Singleline);
- regex[1] = new Regex("<[i]*frame[^><]+src=(\"|')?(?<url>([^>\"'\\s)])+)(\"|')?[^>]*>", RegexOptions.IgnoreCase);
- for (int i = 0; i < 2; i++)
- {
- Match match = regex[i].Match(m_html);
- while (match.Success)
- {
- try
- {
- string url = HttpUtility.UrlDecode(new Uri(m_uri, match.Groups["URL"].Value).AbsoluteUri);
- string text = "";
- if (i == 0) text = new Regex("(<[^>]+>)|(\\s)|( )|&|\"", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(match.Groups["text"].Value, "");
- Link link = new Link();
- link.Text = text;
- link.NavigateUrl = url;
- m_links.Add(link);
- }
- catch (Exception ex) { Console.WriteLine(ex.Message); };
- match = match.NextMatch();
- }
- }
- }
- return m_links;
- }
- /// <summary>
- /// 此私有方法从一段HTML文本中提取出一定字数的纯文本
- /// </summary>
- /// <param name="instr">HTML代码</param>
- /// <param name="firstN">提取从头数多少个字</param>
- /// <param name="withLink">是否要链接里面的字</param>
- /// <returns>纯文本</returns>
- private string getFirstNchar(string instr, int firstN, bool withLink)
- {
- if (m_outstr == "")
- {
- m_outstr = instr.Clone() as string;
- m_outstr = new Regex(@"(?m)<script[^>]*>(\w|\W)*?</script[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
- m_outstr = new Regex(@"(?m)<style[^>]*>(\w|\W)*?</style[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
- m_outstr = new Regex(@"(?m)<select[^>]*>(\w|\W)*?</select[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
- if (!withLink) m_outstr = new Regex(@"(?m)<a[^>]*>(\w|\W)*?</a[^>]*>", RegexOptions.Multiline | RegexOptions.IgnoreCase).Replace(m_outstr, "");
- Regex objReg = new System.Text.RegularExpressions.Regex("(<[^>]+?>)| ", RegexOptions.Multiline | RegexOptions.IgnoreCase);
- m_outstr = objReg.Replace(m_outstr, "");
- Regex objReg2 = new System.Text.RegularExpressions.Regex("(\\s)+", RegexOptions.Multiline | RegexOptions.IgnoreCase);
- m_outstr = objReg2.Replace(m_outstr, " ");
- }
- return m_outstr.Length > firstN ? m_outstr.Substring(0, firstN) : m_outstr;
- }
- #region 公有文法
- /// <summary>
- /// 此公有方法提取网页中一定字数的纯文本,包括链接文字
- /// </summary>
- /// <param name="firstN">字数</param>
- /// <returns></returns>
- public string getContext(int firstN)
- {
- return getFirstNchar(m_html, firstN, true);
- }
- /// <summary>
- /// 此公有方法从本网页的链接中提取一定数量的链接,该链接的URL满足某正则式
- /// </summary>
- /// <param name="pattern">正则式</param>
- /// <param name="count">返回的链接的个数</param>
- /// <returns>List<Link></returns>
- public List<Link> getSpecialLinksByUrl(string pattern, int count)
- {
- if (m_links.Count == 0) getLinks();
- List<Link> SpecialLinks = new List<Link>();
- List<Link>.Enumerator i;
- i = m_links.GetEnumerator();
- int cnt = 0;
- while (i.MoveNext() && cnt < count)
- {
- if (new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase).Match(i.Current.NavigateUrl).Success)
- {
- SpecialLinks.Add(i.Current);
- cnt++;
- }
- }
- return SpecialLinks;
- }
- /// <summary>
- /// 此公有方法从本网页的链接中提取一定数量的链接,该链接的文字满足某正则式
- /// </summary>
- /// <param name="pattern">正则式</param>
- /// <param name="count">返回的链接的个数</param>
- /// <returns>List<Link></returns>
- public List<Link> getSpecialLinksByText(string pattern, int count)
- {
- if (m_links.Count == 0) getLinks();
- List<Link> SpecialLinks = new List<Link>();
- List<Link>.Enumerator i;
- i = m_links.GetEnumerator();
- int cnt = 0;
- while (i.MoveNext() && cnt < count)
- {
- if (new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase).Match(i.Current.Text).Success)
- {
- SpecialLinks.Add(i.Current);
- cnt++;
- }
- }
- return SpecialLinks;
- }
- /// <summary>
- /// 这公有方法提取本网页的纯文本中满足某正则式的文字
- /// </summary>
- /// <param name="pattern">正则式</param>
- /// <returns>返回文字</returns>
- public string getSpecialWords(string pattern)
- {
- if (m_outstr == "") getContext(Int16.MaxValue);
- Regex regex = new Regex(pattern, RegexOptions.Multiline | RegexOptions.IgnoreCase);
- Match mc = regex.Match(m_outstr);
- if (mc.Success)
- return mc.Groups[1].Value;
- return string.Empty;
- }
- #endregion
- #region 构造函数
- private void Init(string _url)
- {
- try
- {
- m_uri = new Uri(_url);
- m_links = new List<Link>();
- m_html = "";
- m_outstr = "";
- m_title = "";
- m_good = true;
- if (_url.EndsWith(".rar") || _url.EndsWith(".dat") || _url.EndsWith(".msi"))
- {
- m_good = false;
- return;
- }
- HttpWebRequest rqst = (HttpWebRequest)WebRequest.Create(m_uri);
- rqst.AllowAutoRedirect = true;
- rqst.MaximumAutomaticRedirections = 3;
- rqst.UserAgent = "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)";
- rqst.KeepAlive = true;
- rqst.Timeout = 10000;
- lock (WebPage.webcookies)
- {
- if (WebPage.webcookies.ContainsKey(m_uri.Host))
- rqst.CookieContainer = WebPage.webcookies[m_uri.Host];
- else
- {
- CookieContainer cc = new CookieContainer();
- WebPage.webcookies[m_uri.Host] = cc;
- rqst.CookieContainer = cc;
- }
- }
- HttpWebResponse rsps = (HttpWebResponse)rqst.GetResponse();
- Stream sm = rsps.GetResponseStream();
- if (!rsps.ContentType.ToLower().StartsWith("text/") || rsps.ContentLength > 1 << 22)
- {
- rsps.Close();
- m_good = false;
- return;
- }
- Encoding cding = System.Text.Encoding.Default;
- string contenttype = rsps.ContentType.ToLower();
- int ix = contenttype.IndexOf("charset=");
- if (ix != -1)
- {
- try
- {
- cding = System.Text.Encoding.GetEncoding(rsps.ContentType.Substring(ix + "charset".Length + 1));
- }
- catch
- {
- cding = Encoding.Default;
- }
- //该处视情况而定 有的需要解码
- //m_html = HttpUtility.HtmlDecode(new StreamReader(sm, cding).ReadToEnd());
- m_html = new StreamReader(sm, cding).ReadToEnd();
- }
- else
- {
- //该处视情况而定 有的需要解码
- //m_html = HttpUtility.HtmlDecode(new StreamReader(sm, cding).ReadToEnd());
- m_html = new StreamReader(sm, cding).ReadToEnd();
- Regex regex = new Regex("charset=(?<cding>[^=]+)?\"", RegexOptions.IgnoreCase);
- string strcding = regex.Match(m_html).Groups["cding"].Value;
- try
- {
- cding = Encoding.GetEncoding(strcding);
- }
- catch
- {
- cding = Encoding.Default;
- }
- byte[] bytes = Encoding.Default.GetBytes(m_html.ToCharArray());
- m_html = cding.GetString(bytes);
- if (m_html.Split('?').Length > 100)
- {
- m_html = Encoding.Default.GetString(bytes);
- }
- }
- m_pagesize = m_html.Length;
- m_uri = rsps.ResponseUri;
- rsps.Close();
- }
- catch (Exception ex)
- {
- }
- }
- public WebPage(string _url)
- {
- string uurl = "";
- try
- {
- uurl = Uri.UnescapeDataString(_url);
- _url = uurl;
- }
- catch { };
- Init(_url);
- }
- #endregion
- }
调用
- WebPage webInfo = new WebPage("网址");
- webInfo.Context;//不包含html标签的所有内容
- webInfo.M_html;//包含html标签的内容
- ...参考属性
Original:http://blog.csdn.net/yysyangyangyangshan/article/details/6661886
[转]正则表达式相关:C# 抓取网页类(获取网页中所有信息)的更多相关文章
- Fiddler如何抓取HTTPS协议的网页
Fiddler默认只能抓取HTTP协议的网页,不能抓取HTTPS协议的网页,而我们很多时候,都需要抓HTTPS协议的网页,比如抓淘宝数据等.今天,韦玮老师会为大家讲解如何使用Fiddler抓取HTTP ...
- Fiddler: 如何抓取HTTPS协议的网页
作者:韦玮 转载请注明出处 Fiddler默认只能抓取HTTP协议的网页,不能抓取HTTPS协议的网页,而我们很多时候,都需要抓HTTPS协议的网页,比如抓淘宝数据等.今天,韦玮老师会为大家讲解如何 ...
- 如何让Python爬虫一天抓取100万张网页
前言 文的文字及图片来源于网络,仅供学习.交流使用,不具有任何商业用途,版权归原作者所有,如有问题请及时联系我们以作处理. 作者: 王平 源自:猿人学Python PS:如有需要Python学习资料的 ...
- 「拉勾网」薪资调查的小爬虫,并将抓取结果保存到excel中
学习Python也有一段时间了,各种理论知识大体上也算略知一二了,今天就进入实战演练:通过Python来编写一个拉勾网薪资调查的小爬虫. 第一步:分析网站的请求过程 我们在查看拉勾网上的招聘信息的时候 ...
- Windows环境中,通过Charles工具,抓取安卓手机、苹果手机中APP应用的http、https请求包信息
Windows环境中,通过Charles工具,抓取安卓手机.苹果手机中APP应用的http.https请求包信息1.抓取安卓手机中APP应用的http请求包信息1)在电脑上操作,查看Windows机器 ...
- java抓取动态生成的网页
最近在做项目的时候有一个需求:从网页面抓取数据,要求是首先抓取整个网页的html源码(后期更新要使用到).刚开始一看这个简单,然后就稀里哗啦的敲起了代码(在这之前使用过Hadoop平台的分布式爬虫框架 ...
- nutch 抓取需要登录的网页
题记:一步一坑,且行且珍惜 最近接到任务,要利用nutch去抓取公司内部系统的文章,可是需要登录才能抓到.对于一个做.net,不熟悉java,不知道hadoop,很少接触linux的我,这个过程真是艰 ...
- 网页抓取:PHP实现网页爬虫方式小结
来源:http://www.ido321.com/1158.html 抓取某一个网页中的内容,需要对DOM树进行解析,找到指定节点后,再抓取我们需要的内容,过程有点繁琐.LZ总结了几种常用的.易于实现 ...
- php正则表达式,在抓取内容进行匹配的时候表现不稳定
最近做了一个 抓取内容的程序,使用php的正则表达式对抓取的内容进行匹配,当进行大量匹配运算的时候,发现偶尔会出现匹配失败的情况.检查不出任何原因. 匹配失败导致匹配结果为空,最终导致写入数据库失败. ...
- python3下scrapy爬虫(第二卷:初步抓取网页内容之直接抓取网页)
上一卷中介绍了安装过程,现在我们开始使用这个神奇的框架 跟很多博主一样我也先选择一个非常好爬取的网站作为最初案例,那么我先用屌丝必备网站http://www.shaimn.com/xinggan/作为 ...
随机推荐
- 登录Cloudera Manager时报错org.hibernate.exception.GenericJDBCException: Could not open connection
去Cloudera Server上边看了一下日志: cat /opt/cloudera-manager/log/cloudera-scm-server/cloudera-scm-server.log ...
- Github排行榜
http://githubranking.com/ 中国区开发者排行榜: http://githubrank.com/ 也可以在官网查询: https://github.com/search?q=st ...
- C#之MemberwiseClone与Clone
MemberwiseClone 方法创建一个浅表副本,具体来说就是创建一个新对象,然后将当前对象的非静态字段复制到该新对象.如果字段是值类型的,则对该字段执行逐位复制.如果字段是引用类型,则复制引用但 ...
- Log4net源码View之Logger解析
1.简介 Logger是Log4net的三大核心载体之一,搞清楚它的意义很重大.另外两个分别是Appender和Layout.其对应关系为一个Logger对应多个Appender,一个Appender ...
- Android开源框架:AndroidAnnotations
AndroidAnnotations首页 github上的项目地址AndroidAnnotations Github. wiki:https://github.com/excilys/androida ...
- TopCoder SRM 588 DIV2 KeyDungeonDiv2
简单的题目 class KeyDungeonDiv2 { public: int countDoors(vector <int> doorR, vector <int> doo ...
- UVA 10325 - The Lottery(容斥)
以前做过的一个题,忘记/gcd了,看来需要把以前的东西看一下啊. #include <cstdio> #include <cstring> #include <iostr ...
- 用java删除文件夹里的所有文件
import java.io.File; public class Test { public static void main(String args[]){ Test t = new Test() ...
- com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Unknown column '??????' in 'field list'
严重: Servlet.service() for servlet jsp threw exceptioncom.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErro ...
- iOS -- MVC的理解
今天在写项目的时候困惑了一下 我在写一个应用的主界面,其实是很简单的,上面有几个控件,我在想把空间写到viewController里会不会有点冗杂 后来查了一下,发现貌似也不需要分开写,毕竟界面好简单 ...