国产探花免费观看_亚洲丰满少妇自慰呻吟_97日韩有码在线_资源在线日韩欧美_一区二区精品毛片,辰东完美世界有声小说,欢乐颂第一季,yy玄幻小说排行榜完本

首頁 > 編程 > C# > 正文

C#網絡爬蟲代碼分享 C#簡單的爬取工具

2020-01-24 01:02:45
字體:
來源:轉載
供稿:網友

公司編輯妹子需要爬取網頁內容,叫我幫忙做了一簡單的爬取工具

這是爬取網頁內容,像是這對大家來說都是不難得,但是在這里有一些小改動,代碼獻上,大家參考

private string GetHttpWebRequest(string url)     {       HttpWebResponse result;       string strHTML = string.Empty;       try       {         Uri uri = new Uri(url);         WebRequest webReq = WebRequest.Create(uri);         WebResponse webRes = webReq.GetResponse();          HttpWebRequest myReq = (HttpWebRequest)webReq;         myReq.UserAgent = "User-Agent:Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705";         myReq.Accept = "*/*";         myReq.KeepAlive = true;         myReq.Headers.Add("Accept-Language", "zh-cn,en-us;q=0.5");         result = (HttpWebResponse)myReq.GetResponse();         Stream receviceStream = result.GetResponseStream();         StreamReader readerOfStream = new StreamReader(receviceStream, System.Text.Encoding.GetEncoding("utf-8"));         strHTML = readerOfStream.ReadToEnd();         readerOfStream.Close();         receviceStream.Close();         result.Close();       }       catch       {         Uri uri = new Uri(url);         WebRequest webReq = WebRequest.Create(uri);         HttpWebRequest myReq = (HttpWebRequest)webReq;         myReq.UserAgent = "User-Agent:Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.2; .NET CLR 1.0.3705";         myReq.Accept = "*/*";         myReq.KeepAlive = true;         myReq.Headers.Add("Accept-Language", "zh-cn,en-us;q=0.5");         //result = (HttpWebResponse)myReq.GetResponse();         try         {           result = (HttpWebResponse)myReq.GetResponse();         }         catch (WebException ex)         {           result = (HttpWebResponse)ex.Response;         }         Stream receviceStream = result.GetResponseStream();         StreamReader readerOfStream = new StreamReader(receviceStream, System.Text.Encoding.GetEncoding("gb2312"));         strHTML = readerOfStream.ReadToEnd();         readerOfStream.Close();         receviceStream.Close();         result.Close();       }       return strHTML;     }

這是根據url爬取網頁遠嗎,有一些小改動,很多網頁有不同的編碼格式,甚至有些網站做了反爬取的防范,這個方法經過能夠改動也能爬去 

以下是爬取網頁所有的網址鏈接

 /// <summary>     /// 提取HTML代碼中的網址     /// </summary>     /// <param name="htmlCode"></param>     /// <returns></returns>     private static List<string> GetHyperLinks(string htmlCode, string url)     {       ArrayList al = new ArrayList();       bool IsGenxin = false;       StringBuilder weburlSB = new StringBuilder();//SQL       StringBuilder linkSb = new StringBuilder();//展示數據       List<string> Weburllistzx = new List<string>();//新增       List<string> Weburllist = new List<string>();//舊的       string ProductionContent = htmlCode;       Regex reg = new Regex(@"http(s)?://([/w-]+/.)+[/w-]+/?");       string wangzhanyuming = reg.Match(url, 0).Value;       MatchCollection mc = Regex.Matches(ProductionContent.Replace("href=/"/", "href=/"" + wangzhanyuming).Replace("href='/", "href='" + wangzhanyuming).Replace("href=/", "href=" + wangzhanyuming).Replace("href=/"./", "href=/"" + wangzhanyuming), @"<[aA][^>]* href=[^>]*>", RegexOptions.Singleline);       int Index = 1;       foreach (Match m in mc)       {         MatchCollection mc1 = Regex.Matches(m.Value, @"[a-zA-z]+://[^/s]*", RegexOptions.Singleline);         if (mc1.Count > 0)         {           foreach (Match m1 in mc1)           {             string linkurlstr = string.Empty;             linkurlstr = m1.Value.Replace("/"", "").Replace("'", "").Replace(">", "").Replace(";", "");             weburlSB.Append("$-$");             weburlSB.Append(linkurlstr);             weburlSB.Append("$_$");             if (!Weburllist.Contains(linkurlstr) && !Weburllistzx.Contains(linkurlstr))             {               IsGenxin = true;               Weburllistzx.Add(linkurlstr);               linkSb.AppendFormat("{0}<br/>", linkurlstr);             }           }         }         else         {           if (m.Value.IndexOf("javascript") == -1)           {             string amstr = string.Empty;             string wangzhanxiangduilujin = string.Empty;             wangzhanxiangduilujin = url.Substring(0, url.LastIndexOf("/") + 1);             amstr = m.Value.Replace("href=/"", "href=/"" + wangzhanxiangduilujin).Replace("href='", "href='" + wangzhanxiangduilujin);             MatchCollection mc11 = Regex.Matches(amstr, @"[a-zA-z]+://[^/s]*", RegexOptions.Singleline);             foreach (Match m1 in mc11)             {               string linkurlstr = string.Empty;               linkurlstr = m1.Value.Replace("/"", "").Replace("'", "").Replace(">", "").Replace(";", "");               weburlSB.Append("$-$");               weburlSB.Append(linkurlstr);               weburlSB.Append("$_$");               if (!Weburllist.Contains(linkurlstr) && !Weburllistzx.Contains(linkurlstr))               {                 IsGenxin = true;                 Weburllistzx.Add(linkurlstr);                 linkSb.AppendFormat("{0}<br/>", linkurlstr);               }             }           }         }         Index++;       }       return Weburllistzx;     }

這塊的技術其實就是簡單的使用了正則去匹配!接下來獻上獲取標題,以及存儲到xml文件的方法

/// <summary>     /// // 把網址寫入xml文件     /// </summary>     /// <param name="strURL"></param>     /// <param name="alHyperLinks"></param>     private static void WriteToXml(string strURL, List<string> alHyperLinks)     {       XmlTextWriter writer = new XmlTextWriter(@"D:/HyperLinks.xml", Encoding.UTF8);       writer.Formatting = Formatting.Indented;       writer.WriteStartDocument(false);       writer.WriteDocType("HyperLinks", null, "urls.dtd", null);       writer.WriteComment("提取自" + strURL + "的超鏈接");       writer.WriteStartElement("HyperLinks");       writer.WriteStartElement("HyperLinks", null);       writer.WriteAttributeString("DateTime", DateTime.Now.ToString());       foreach (string str in alHyperLinks)       {         string title = GetDomain(str);         string body = str;         writer.WriteElementString(title, null, body);       }       writer.WriteEndElement();       writer.WriteEndElement();       writer.Flush();       writer.Close();     }     /// <summary>     /// 獲取網址的域名后綴     /// </summary>     /// <param name="strURL"></param>     /// <returns></returns>     private static string GetDomain(string strURL)     {       string retVal;       string strRegex = @"(/.com/|/.net/|/.cn/|/.org/|/.gov/)";       Regex r = new Regex(strRegex, RegexOptions.IgnoreCase);       Match m = r.Match(strURL);       retVal = m.ToString();       strRegex = @"/.|/$";       retVal = Regex.Replace(retVal, strRegex, "").ToString();       if (retVal == "")         retVal = "other";       return retVal;     } /// <summary>     /// 獲取標題     /// </summary>     /// <param name="html"></param>     /// <returns></returns>     private static string GetTitle(string html)     {       string titleFilter = @"<title>[/s/S]*?</title>";       string h1Filter = @"<h1.*?>.*?</h1>";       string clearFilter = @"<.*?>";        string title = "";       Match match = Regex.Match(html, titleFilter, RegexOptions.IgnoreCase);       if (match.Success)       {         title = Regex.Replace(match.Groups[0].Value, clearFilter, "");       }        // 正文的標題一般在h1中,比title中的標題更干凈       match = Regex.Match(html, h1Filter, RegexOptions.IgnoreCase);       if (match.Success)       {         string h1 = Regex.Replace(match.Groups[0].Value, clearFilter, "");         if (!String.IsNullOrEmpty(h1) && title.StartsWith(h1))         {           title = h1;         }       }       return title;     }

這就是所用的全部方法,還是有很多需要改進之處!大家如果有發現不足之處還請指出,謝謝!

以上就是本文的全部內容,希望對大家的學習有所幫助,也希望大家多多支持武林網。

發表評論 共有條評論
用戶名: 密碼:
驗證碼: 匿名發表
主站蜘蛛池模板: 铁岭县| 板桥市| 枞阳县| 黎川县| 禹城市| 饶河县| 临夏市| 大竹县| 普兰县| 东安县| 缙云县| 博客| 钦州市| 长子县| 固始县| 城市| 威远县| 杭锦旗| 从江县| 辽宁省| 尚志市| 泰来县| 桃园县| 桃源县| 广饶县| 黑水县| 陇西县| 温泉县| 雅江县| 延川县| 庆元县| 兴宁市| 南皮县| 桐柏县| 岐山县| 秦安县| 赫章县| 平原县| 台北县| 砀山县| 安宁市|