java爬虫抓取动态网页(一家出数据库表结构下面贴出爬虫的动态代理实现爬虫)
优采云 发布时间: 2022-02-09 19:23java爬虫抓取动态网页(一家出数据库表结构下面贴出爬虫的动态代理实现爬虫)
作者的公司是一个区块链门户网站,网站的很多信息、新闻、视频等数据都是通过爬取第三方网站获取的,需要获取从很多网站来爬取数据,如果每个数据源网站都需要编写单独的接口来爬取,工作量无疑是巨大的,因为作者想到了通过动态实现一套爬虫机制proxy,每次爬取一个新的数据源,只需要在数据库中添加一个数据源,不需要修改代码。
废话不多说,下面贴出数据库表结构
DROP TABLE IF EXISTS `yiyi_crawler_website`;
CREATE TABLE `yiyi_crawler_website` (
`id` bigint(16) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`gmt_create` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '更新时间',
`url` varchar(255) DEFAULT NULL COMMENT '网站链接(抓取内容的接口)',
`interval` bigint(16) DEFAULT '0' COMMENT '抓取时间间隔(以毫秒为单位)',
`website_type` tinyint(20) DEFAULT NULL COMMENT '网站类型(1、快讯)',
`website_name` varchar(32) DEFAULT NULL COMMENT '网站名',
`source_link` varchar(255) DEFAULT NULL COMMENT '来源链接',
`data_field` varchar(32) DEFAULT NULL COMMENT '数据所在字段,如果没有,为空则直接取数(多级以.连接,如果:data.items表示data下面的items为内容列表)',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;
DROP TABLE IF EXISTS `yiyi_crawler_website_content`;
CREATE TABLE `yiyi_crawler_website_content` (
`id` bigint(16) NOT NULL AUTO_INCREMENT,
`gmt_create` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`gmt_modified` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '更新时间',
`website_id` bigint(16) DEFAULT NULL COMMENT '网站ID',
`content_name` varchar(16) DEFAULT NULL COMMENT '内容名',
`table_name` varchar(32) DEFAULT NULL COMMENT '所属表名',
`column_name` varchar(32) DEFAULT NULL COMMENT '所属字段名',
`return_field` varchar(32) DEFAULT NULL COMMENT '当前要抓取的字段所返回的字段',
`field_type` tinyint(2) DEFAULT '2' COMMENT '字段类型(1、日期2、数值0、其他)',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;
下面贴出爬虫的动态代理实现,基于cglib框架。
/**
* 爬虫任务代理接口
*
* @author liyi
* @create 2018-03-17 16:58
**/
public interface CrawlerProxy {
/**
* 任务开始
* @param website
*/
void start(CrawlerWebsiteModelOut website);
}
/**
* 爬虫任务类
*
* @author liyi
* @create 2018-03-17 18:21
**/
public class CrawlerTask implements CrawlerProxy {
@Override
public void start(CrawlerWebsiteModelOut website) {
System.out.println("爬虫任务开始");
}
}
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.TypeReference;
import com.lynn.yiyi.http.Request;
import com.lynn.yiyi.http.WebUtils;
import com.lynn.yiyi.model.out.CrawlerWebsiteModelOut;
import java.util.*;
/**
* 爬虫定时任务
*
* @author liyi
* @create 2018-03-17 18:35
**/
public class CrawlerTimerTask extends TimerTask {
private CrawlerWebsiteModelOut website = null;
@Override
public void run() {
String json = WebUtils.executeHttp(Request.options().setMethod(com.lynn.yiyi.http.Method.GET).setUrl(website.getUrl()).build()).getJsonString();
String strs[] = website.getDataField().split(".");
List dataList = new ArrayList();
Arrays.stream(strs).forEach(s -> {
dataList.clear();
String data = JSON.parseObject(json, new TypeReference() {}.getType());
dataList.add(data);
});
}
public void setWebsite(CrawlerWebsiteModelOut website) {
this.website = website;
}
public CrawlerWebsiteModelOut getWebsite() {
return website;
}
}
import com.alibaba.fastjson.JSONObject;
import com.lynn.yiyi.http.Request;
import com.lynn.yiyi.http.WebUtils;
import com.lynn.yiyi.model.out.CrawlerWebsiteModelOut;
import com.lynn.yiyi.service.CrawlerService;
import com.lynn.yiyi.utils.SpringUtils;
import net.sf.cglib.proxy.Enhancer;
import net.sf.cglib.proxy.MethodInterceptor;
import net.sf.cglib.proxy.MethodProxy;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* 爬虫动态代理类
*
* @author liyi
* @create 2018-03-17 18:22
**/
public class CrawlerCglibProxy implements MethodInterceptor {
private Map timerMap = new HashMap();
private Enhancer enhancer = new Enhancer();
Object getProxy(Class clazz){
enhancer.setSuperclass(clazz);
enhancer.setCallback(this);
return enhancer.create();
}
@Override
public Object intercept(Object obj, Method method, Object[] args, MethodProxy proxy) throws Throwable {
Object o = proxy.invokeSuper(obj,args);
if("start".equals(method.getName())){
if(args[0] instanceof CrawlerWebsiteModelOut){
CrawlerWebsiteModelOut website = (CrawlerWebsiteModelOut)args[0];
if(timerMap.get(website.getId()) == null){
Timer timer = new Timer();
timer.schedule(new TimerTask() {
@Override
public void run() {
String data = WebUtils.executeHttp(Request.options().setMethod(com.lynn.yiyi.http.Method.GET).setUrl(website.getUrl()).build()).getJsonString();
String strs[] = website.getDataField().split("\\.");
for (String s : strs) {
JSONObject object = JSONObject.parseObject(data);
data = object.getString(s);
}
//TODO 这里将爬取到的数据写到数据库对应的表中
}
}, 0, website.getInterval());
timerMap.put(website.getId(),timer);
}
}
}
return o;
}
public static T create(Class cls){
CrawlerCglibProxy proxy = new CrawlerCglibProxy();
return (T)proxy.getProxy(cls);
}
}
import java.util.ArrayList;
import java.util.List;
/**
* 网站爬虫输出参数
*
* @author liyi
* @create 2018-03-17 17:04
**/
public class CrawlerWebsiteModelOut extends BaseModelOut {
private String url;
private Long interval;
private Integer websiteType;
private String websiteName;
private String sourceLink;
private String dataField;
private List contentList = new ArrayList();
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public Long getInterval() {
return interval;
}
public void setInterval(Long interval) {
this.interval = interval;
}
public Integer getWebsiteType() {
return websiteType;
}
public void setWebsiteType(Integer websiteType) {
this.websiteType = websiteType;
}
public String getWebsiteName() {
return websiteName;
}
public void setWebsiteName(String websiteName) {
this.websiteName = websiteName;
}
public String getSourceLink() {
return sourceLink;
}
public void setSourceLink(String sourceLink) {
this.sourceLink = sourceLink;
}
public String getDataField() {
return dataField;
}
public void setDataField(String dataField) {
this.dataField = dataField;
}
public List getContentList() {
return contentList;
}
public void setContentList(List contentList) {
this.contentList = contentList;
}
}
/**
* 爬虫网站内容输出参数
*/
public class CrawlerWebsiteContentModelOut extends BaseModelOut{
private Long websiteId;
private String contentName;
private String tableName;
private String columnName;
private String returnField;
private Integer fieldType;
public Long getWebsiteId() {
return websiteId;
}
public void setWebsiteId(Long websiteId) {
this.websiteId = websiteId;
}
public String getContentName() {
return contentName;
}
public void setContentName(String contentName) {
this.contentName = contentName;
}
public String getTableName() {
return tableName;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public String getColumnName() {
return columnName;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getReturnField() {
return returnField;
}
public void setReturnField(String returnField) {
this.returnField = returnField;
}
public Integer getFieldType() {
return fieldType;
}
public void setFieldType(Integer fieldType) {
this.fieldType = fieldType;
}
}
import java.util.Date;
/**
* 基础输出参数
*
* @author liyi
* @create 2018-03-17 17:02
**/
public abstract class BaseModelOut{
private Long id;
private Date create;
private Date modified;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Date getCreate() {
return create;
}
public void setCreate(Date create) {
this.create = create;
}
public Date getModified() {
return modified;
}
public void setModified(Date modified) {
this.modified = modified;
}
}
下面给出测试的主要方法
public static void main(String[] args) {
CrawlerProxy proxy = CrawlerFactory.create();
proxy.start(website);//website即当前要爬取的网站对象,可以从数据库中读取
}
调用main方法启动定时器,定时从指定网站爬取数据。