之前用.NET做网页采集实现采用正则表达式去匹配解析,比较繁琐,花费时间较多,若是Html复杂的话真是欲哭无泪。
很早就听过包HtmlAgilityPack,其是在.NET下用XPath来解析的HTML的一个类库(包)。但是一直没时间尝试,简单了解了下HtmlAgilityPack的API后,发现真是HTML解析利器,于是花些时间做一个例子记录下。
本次是以下载博客园随笔分类文章为例,采用两部分实现,第一部分是将采集到的文章放到集合变量中,第二部分是通过操作集合变量将文章下载到本地,
这样做效率较低,因为可以直接边采集文章边下载。暂时没有考虑效率问题,仅仅只是实现功能。下面简单阐述下。
获取随笔分类
根据输入的博客名取得对应的随笔分类。
/// <summary> /// 获取博客分类 /// </summary> /// <param name=" uname"></param> /// <returns></returns> private static List< BlogType> GettBlogTypeList(string uname) { string url = "http://www.cnblogs.com/" + uname + "/mvc/blog/sidecolumn.aspx?blogApp=" + uname; string htmlStr = CommonHelper .GetRequestStr(url); HtmlDocument doc = new HtmlDocument(); doc.LoadHtml(htmlStr); var nodes = doc.DocumentNode.SelectNodes("//div[@id='sidebar_postcategory']//a"); //随笔分类 if (nodes == null || nodes.Count <= 0) return null ; List<BlogType > list = new List< BlogType>(); for (int i = 0; i < nodes.Count; i++) { var aUrl = nodes[i].Attributes["href" ].Value; var name = nodes[i].InnerText; list.Add( new BlogType () { BlogTypeUrl = aUrl, BlogTypeName = name.Contains( "(") ? name.Split('(')[0] : name,BlogTypeNameShow=name }); } return list; } public class BlogType { public string BlogTypeUrl { get; set; } public string BlogTypeName { get; set; } public string BlogTypeNameShow { get; set; } }
如获取到的随笔分类如下:
采集分类的文章
采用两步实现,第一步获取只包含标题和url的文章,第二步再获取文章内容。
/// <summary> /// 根据分类获取博客 /// </summary> /// <param name=" blogTypes"></param> /// <param name=" useTime"></param> /// <returns></returns> public static Dictionary< BlogType,List <BlogInfo>> GetBlogsByType( List<BlogType > blogTypes,out long useTime) { Stopwatch sw = new Stopwatch(); sw.Start(); Dictionary<BlogType , List< BlogInfo>> dic = new Dictionary< BlogType, List <BlogInfo>>(); foreach (var blogType in blogTypes) { List<BlogInfo > list = new List< BlogInfo>(); HtmlDocument doc = new HtmlDocument(); doc.LoadHtml( CommonHelper.GetRequestStr(blogType.BlogTypeUrl)); var typeNameNode = doc.DocumentNode.SelectSingleNode("//div[@class='entrylist']/h1"); string typeName = typeNameNode.InnerText; var listPosttitleNodes = doc.DocumentNode.SelectNodes("//div[@class='entrylistPosttitle']/a"); if (listPosttitleNodes != null && listPosttitleNodes.Count > 0) { for (int i = 0; i < listPosttitleNodes.Count; i++) { Console.WriteLine("正在爬取文章【{0}】..." , listPosttitleNodes[i].InnerText); list.Add( new BlogInfo () { BlogUrl = listPosttitleNodes[i].Attributes[ "href"].Value, BlogTitle = listPosttitleNodes[i].InnerText, BlogTypeName = typeName }); } } dic.Add(blogType,list); } sw.Stop(); useTime = sw.ElapsedMilliseconds; return dic; } /// <summary> /// 获取详细的博客信息 /// </summary> /// <param name=" dic"></param> /// <param name=" useTime"></param> /// <returns></returns> public static Dictionary< BlogType, List <BlogInfo>> GetBlogDetail( Dictionary<BlogType , List<BlogInfo >> dic, out long useTime) { Stopwatch sw = new Stopwatch(); sw.Start(); HtmlDocument doc = new HtmlDocument(); for(int k=0;k<dic.Keys.Count;k++) { var blogType = dic.Keys.ElementAt(k); var list = dic[blogType]; for (int i = 0; i < list.Count; i++) { Console.WriteLine("正在获取文章【{0}】内容..." , list[i].BlogTitle); doc.LoadHtml( CommonHelper.GetRequestStr(list[i].BlogUrl)); var bodyNode = doc.DocumentNode.SelectSingleNode("//div[@id='likecs_post_body']"); var dateNode = doc.DocumentNode.SelectSingleNode("//span[@id='post-date']"); var userNode = doc.DocumentNode.SelectSingleNode("//div[@class='postDesc']/a[1]"); list[i].BlogContent = bodyNode == null ? "内容获取失败" : bodyNode.InnerHtml; list[i].BlogPostTime = dateNode == null ? "发布时间获取失败" : dateNode.InnerText; list[i].BlogName = userNode == null ? "用户获取失败" : userNode.InnerText; } dic[blogType] = list; } sw.Stop(); useTime = sw.ElapsedMilliseconds; return dic; } public class BlogInfo { public string BlogUrl { get; set; } public string BlogName { get; set; } public string BlogTitle { get; set; } public string BlogContent { get; set; } public string BlogTypeName { get; set; } public string BlogPostTime { get; set; } }