网页新闻抓取(如何实现从各大网抓取新闻并经过格式处理现实到我们的新闻客户端呢?)

优采云 发布时间: 2021-12-02 10:14

  网页新闻抓取(如何实现从各大网抓取新闻并经过格式处理现实到我们的新闻客户端呢?)

  如何从主要网站获取新闻并将其格式化给我们的新闻客户?

  Android 客户端抓取和解析网页的方法有两种:

  一、使用jsoup

  没仔细研究,网上也有类似的,可以参考这两兄弟:

  二、使用htmlparser

  我在我的项目中使用htmlparser来抓取和解析腾讯新闻。代码如下:

<p><br /><br />public class NetUtil {<br /> public static List DATALIST = new ArrayList();<br /><br /> public static String[][] CHANNEL_URL = new String[][] {<br /> new String[]{"http://news.qq.com/world_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/society_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},<br /> };<br /><br /> public static int getTechNews(List techData, int cId) {<br /> int result = 0;<br /> try {<br /> NodeFilter filter = new AndFilter(new TagNameFilter("div"),<br /> new HasAttributeFilter("id", "listZone"));<br /> Parser parser = new Parser();<br /> parser.setURL(CHANNEL_URL[cId][0]);<br /> parser.setEncoding(parser.getEncoding());<br /><br /> NodeList list = parser.extractAllNodesThatMatch(filter);<br /> for (int i = 0; i < list.size(); i++) {<br /> Tag node = (Tag) list.elementAt(i);<br /> for (int j = 0; j < node.getChildren().size(); j++) {<br /> try {<br /> String textstr = node.getChildren().elementAt(j).toHtml();<br /> if (textstr.trim().length() > 0) {<br /> NodeFilter subFilter = new TagNameFilter("p");<br /> Parser subParser = new Parser();<br /> subParser.setResource(textstr);<br /> NodeList subList = subParser.extractAllNodesThatMatch(subFilter);<br /><br /> NodeFilter titleStrFilter = new AndFilter(new TagNameFilter("a"),<br /> new HasAttributeFilter("class", "linkto"));<br /> Parser titleStrParser = new Parser();<br /> titleStrParser.setResource(textstr);<br /> NodeList titleStrList = titleStrParser.extractAllNodesThatMatch(titleStrFilter);<br /><br /> int linkstart = titleStrList.toHtml().indexOf("href=\"");<br /> int linkend = titleStrList.toHtml().indexOf("\">");<br /> int titleend = titleStrList.toHtml().indexOf("</a>");<br /><br /> String link = CHANNEL_URL[cId][1]+titleStrList.toHtml().substring(linkstart+6, linkend);<br /> String title = titleStrList.toHtml().substring(linkend+2, titleend);<br /><br /> NewsBrief newsBrief = new NewsBrief();<br /> newsBrief.setTitle(title);<br /> newsBrief.setUrl(link);<br /> newsBrief.setSummary(subList.asString());<br /> techData.add(newsBrief);<br /> }<br /> } catch (Exception e) {<br /> e.printStackTrace();<br /> }<br /> }<br /> }<br /> } catch (Exception e) {<br /> result = 1;<br /> e.printStackTrace();<br /> }<br /> return result;<br /> }<br /><br /> public static int getTechNews2(List techData, int cId) {<br /> int result = 0;<br /> try {<br /> // 查询http://tech.qq.com/tech_yejie.htm 页面 滚动新闻的 标签 以及ID<br /> NodeFilter filter = new AndFilter(new TagNameFilter("div"),<br /> new HasAttributeFilter("id", "listZone"));<br /> Parser parser = new Parser();<br /> parser.setURL(CHANNEL_URL[cId][0]);<br /> parser.setEncoding(parser.getEncoding());<br /><br /> // 获取匹配的fileter的节点<br /> NodeList list = parser.extractAllNodesThatMatch(filter);<br /> StringBuilder NewsStr = new StringBuilder("");// 新闻表格字符串<br /> for (int i = 0; i < list.size(); i++) {<br /> Tag node = (Tag) list.elementAt(i);<br /> for (int j = 0; j < node.getChildren().size(); j++) {<br /> String textstr = node.getChildren().elementAt(j).toHtml()<br /> .trim();<br /> if (textstr.length() > 0) {<br /> int linkbegin = 0, linkend = 0, titlebegin = 0, titleend = 0;<br /> while (true) {<br /> linkbegin = textstr.indexOf("href=", titleend);// 截取链接字符串起始位置<br /><br /> // 如果不存在 href了 也就结束了<br /> if (linkbegin < 0)<br /> break;<br /><br /> linkend = textstr.indexOf("\">", linkbegin);// 截取链接字符串结束位置<br /> String sublink = textstr.substring(linkbegin + 6,linkend);<br /> String link = CHANNEL_URL[cId][1] + sublink;<br /><br /> titlebegin = textstr.indexOf("\">", linkend);<br /> titleend = textstr.indexOf("</a>", titlebegin);<br /> String title = textstr.substring(titlebegin + 2,titleend);<br /><br /> NewsStr.append("\r\n\r\n\t<a target=\"_blank\" href=\""br / + link + "\">");<br /> NewsStr.append(title);<br /> NewsStr.append("</a>");<br /><br /> NewsBrief newsBrief = new NewsBrief();<br /> newsBrief.setTitle(title);<br /> newsBrief.setUrl(link);<br /> techData.add(newsBrief);<br /> }<br /> }<br /> }<br /> }<br /> } catch (Exception e) {<br /> result = 1;<br /> e.printStackTrace();<br /> }<br /> return result;<br /> }<br /><br /> public static int parserURL(String url,NewsBrief newsBrief) {<br /> int result = 0;<br /> try {<br /> Parser parser = new Parser(url);<br /> NodeFilter contentFilter = new AndFilter(<br /> new TagNameFilter("div"),<br /> new HasAttributeFilter("id","Cnt-Main-Article-QQ"));<br /> NodeFilter newsdateFilter = new AndFilter(<br /> new TagNameFilter("span"),<br /> new HasAttributeFilter("class",<br /> "article-time"));<br /> NodeFilter newsauthorFilter = new AndFilter(<br /> new TagNameFilter("span"),<br /> new HasAttributeFilter("class",<br /> "color-a-1"));<br /> NodeFilter imgUrlFilter = new TagNameFilter("IMG");<br /><br /> newsBrief.setContent(parserContent(contentFilter,parser));<br /> parser.reset(); // 记得每次用完parser后,要重置一次parser。要不然就得不到我们想要的内容了。<br /><br /> newsBrief.setPubDate(parserDate(newsdateFilter,parser));<br /> parser.reset();<br /><br /> newsBrief.setSource(parserAuthor(newsauthorFilter, parser));<br /> parser.reset();<br /><br /> newsBrief.setImgUrl(parserImgUrl(contentFilter,imgUrlFilter, parser));<br /><br /> } catch (Exception e) {<br /> result=1;<br /> e.printStackTrace();<br /> }<br /> return result;<br /> }<br /><br /> private static String parserContent(NodeFilter filter, Parser parser) {<br /> String reslut = "";<br /> try {<br /> NodeList contentList = (NodeList) parser.parse(filter);<br /> // 将DIV中的标签都 去掉只留正文<br /> reslut = contentList.asString();<br /> } catch (Exception e) {<br /> e.printStackTrace();<br /> }<br /> return reslut;<br /> }<br /><br /> private static String parserDate(NodeFilter filter, Parser parser) {<br /> String reslut = "";<br /> try {<br /> NodeList datetList = (NodeList) parser.parse(filter);<br /> // 将DIV中的标签都 去掉只留正文<br /> reslut = datetList.asString();<br /> } catch (Exception e) {<br /> e.printStackTrace();<br /> }<br /> return reslut;<br /> }<br /><br /><br /> private static String parserAuthor(NodeFilter filter, Parser parser) {<br /> String reslut = "";<br /> try {<br /> NodeList authorList = (NodeList) parser.parse(filter);<br /> // 将DIV中的标签都 去掉只留正文<br /> reslut = authorList.asString();<br /> } catch (Exception e) {<br /> e.printStackTrace();<br /> }<br /> return reslut;<br /> }<br /><br /> private static List parserImgUrl(NodeFilter bodyfilter,NodeFilter filter, Parser parser) {<br /> List reslut = new ArrayList();<br /> try {<br /> NodeList bodyList = (NodeList) parser.parse(bodyfilter);<br /> Parser imgParser = new Parser();<br /> imgParser.setResource(bodyList.toHtml());<br /> NodeList imgList = imgParser.extractAllNodesThatMatch(filter);<br /> String bodyString = imgList.toHtml();<br /><br /> //正文包含图片<br /> if (bodyString.contains("

0 个评论

要回复文章请先登录注册


官方客服QQ群

微信人工客服

QQ人工客服


线