u采 采集(免费代理稳定性连同时切换代理附代理采集下载下载地址)
优采云 发布时间: 2021-12-05 14:25u采 采集(免费代理稳定性连同时切换代理附代理采集下载下载地址)
1、代理文件格式:(代理采集地址)
2、自由代理稳定性不可靠,使用装饰器重连切换代理同时进行
# coding: utf-8
# 笔趣阁 单篇小说采集 http://www.biquge.com.tw
# 替换第一章地址,总章节数。
# ip.txt 为代理池。
import urllib2
from bs4 import BeautifulSoup
import sys
import traceback
import random
reload(sys)
sys.setdefaultencoding('utf-8')
f = open("out.txt", "a+")
headers = {
"Host": "www.biquge.com.tw",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1"
}
url = "http://www.biquge.com.tw/17_17281/7647045.html" # 第一章网址
page = 1301 # 章节数
nextHref = url
ipPool = []
def IPpool():
reader = open('ip.txt')
line = reader.readline()
while line:
if line.strip() != '':
ipPool.append(line.split())
line = reader.readline()
reader.close()
RETRIES = 0
# 重试的次数
count = {"num": RETRIES}
def conn_try_again(function):
def wrapped(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception, err:
print("--重试访问,当前次数 %s ,(总次数5)--" % (count['num'] + 1))
if count['num'] < 5:
count['num'] += 1
return wrapped(*args, **kwargs)
else:
raise Exception(err)
return wrapped
bsObj = None
@conn_try_again
def getContent(url):
global nextHref, page, bsObj
# 定义一个代理开关
proxySwitch = True
try:
poolLen = len(ipPool)
if (poolLen > 0):
i = random.randint(0, poolLen - 1)
print(ipPool[i])
proxy_host = ipPool[i][2] + "://" + ipPool[i][0] + ":" + ipPool[i][1]
proxy_temp = {ipPool[i][2]: proxy_host}
proxy_support = urllib2.ProxyHandler(proxy_temp)
else:
print('--代理池当前无可用代理,使用本机地址访问--')
proxy_support = urllib2.ProxyHandler({})
nullproxy_handler = urllib2.ProxyHandler({})
if proxySwitch:
opener = urllib2.build_opener(proxy_support)
else:
opener = urllib2.build_opener(nullproxy_handler)
urllib2.install_opener(opener)
req = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(req, timeout=3)
# print(response.read())
bsObj = BeautifulSoup(response, 'lxml')
except Exception, err:
raise Exception(err)
contentDiv = bsObj.find('div', id='content')
content = bsObj.find('div', id='content').get_text()
preAndNextBar = bsObj.find('div', attrs={'class': 'bottem2'})
title = bsObj.find('div', attrs={'class': 'bookname'}).h1.get_text()
if ("下一章" in preAndNextBar.get_text()):
next = None
aList = preAndNextBar.findAll('a')
for i in aList:
if ("下一章" in i.get_text()):
next = i
if (next == None):
print("下一章为空")
return True
nextHref = "http://www.biquge.com.tw" + next.get('href')
print(title)
# print(content)
print(nextHref)
f.write("#####" + '\n')
f.write(title + '\n')
f.write(content + '\n')
count['num'] = 0
else:
return True
def main():
IPpool()
global page
try:
for num in range(1, page):
if (getContent(nextHref)):
break
print("--- end ---")
except Exception, e:
print(traceback.print_exc())
finally:
f.close()
main()
附件:代理采集
下载链接: