u采 采集(免费代理稳定性连同时切换代理附代理采集下载下载地址)

优采云 发布时间: 2021-12-05 14:25

  u采 采集(免费代理稳定性连同时切换代理附代理采集下载下载地址)

  1、代理文件格式:(代理采集地址)

  

  2、自由代理稳定性不可靠,使用装饰器重连切换代理同时进行

  # coding: utf-8

# 笔趣阁 单篇小说采集 http://www.biquge.com.tw

# 替换第一章地址,总章节数。

# ip.txt 为代理池。

import urllib2

from bs4 import BeautifulSoup

import sys

import traceback

import random

reload(sys)

sys.setdefaultencoding('utf-8')

f = open("out.txt", "a+")

headers = {

"Host": "www.biquge.com.tw",

"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",

"X-Requested-With": "XMLHttpRequest",

"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36",

"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",

"Connection": "keep-alive",

"Upgrade-Insecure-Requests": "1"

}

url = "http://www.biquge.com.tw/17_17281/7647045.html" # 第一章网址

page = 1301 # 章节数

nextHref = url

ipPool = []

def IPpool():

reader = open('ip.txt')

line = reader.readline()

while line:

if line.strip() != '':

ipPool.append(line.split())

line = reader.readline()

reader.close()

RETRIES = 0

# 重试的次数

count = {"num": RETRIES}

def conn_try_again(function):

def wrapped(*args, **kwargs):

try:

return function(*args, **kwargs)

except Exception, err:

print("--重试访问,当前次数 %s ,(总次数5)--" % (count['num'] + 1))

if count[&#39;num&#39;] < 5:

count[&#39;num&#39;] += 1

return wrapped(*args, **kwargs)

else:

raise Exception(err)

return wrapped

bsObj = None

@conn_try_again

def getContent(url):

global nextHref, page, bsObj

# 定义一个代理开关

proxySwitch = True

try:

poolLen = len(ipPool)

if (poolLen > 0):

i = random.randint(0, poolLen - 1)

print(ipPool[i])

proxy_host = ipPool[i][2] + "://" + ipPool[i][0] + ":" + ipPool[i][1]

proxy_temp = {ipPool[i][2]: proxy_host}

proxy_support = urllib2.ProxyHandler(proxy_temp)

else:

print(&#39;--代理池当前无可用代理,使用本机地址访问--&#39;)

proxy_support = urllib2.ProxyHandler({})

nullproxy_handler = urllib2.ProxyHandler({})

if proxySwitch:

opener = urllib2.build_opener(proxy_support)

else:

opener = urllib2.build_opener(nullproxy_handler)

urllib2.install_opener(opener)

req = urllib2.Request(url, headers=headers)

response = urllib2.urlopen(req, timeout=3)

# print(response.read())

bsObj = BeautifulSoup(response, &#39;lxml&#39;)

except Exception, err:

raise Exception(err)

contentDiv = bsObj.find(&#39;div&#39;, id=&#39;content&#39;)

content = bsObj.find(&#39;div&#39;, id=&#39;content&#39;).get_text()

preAndNextBar = bsObj.find(&#39;div&#39;, attrs={&#39;class&#39;: &#39;bottem2&#39;})

title = bsObj.find(&#39;div&#39;, attrs={&#39;class&#39;: &#39;bookname&#39;}).h1.get_text()

if ("下一章" in preAndNextBar.get_text()):

next = None

aList = preAndNextBar.findAll(&#39;a&#39;)

for i in aList:

if ("下一章" in i.get_text()):

next = i

if (next == None):

print("下一章为空")

return True

nextHref = "http://www.biquge.com.tw" + next.get(&#39;href&#39;)

print(title)

# print(content)

print(nextHref)

f.write("#####" + &#39;\n&#39;)

f.write(title + &#39;\n&#39;)

f.write(content + &#39;\n&#39;)

count[&#39;num&#39;] = 0

else:

return True

def main():

IPpool()

global page

try:

for num in range(1, page):

if (getContent(nextHref)):

break

print("--- end ---")

except Exception, e:

print(traceback.print_exc())

finally:

f.close()

main()

  附件:代理采集

  下载链接:

0 个评论

要回复文章请先登录注册


官方客服QQ群

微信人工客服

QQ人工客服


线