教程:Python与seo工具脚本,360/搜狗相关搜索词采集源码参考
优采云 发布时间: 2022-10-14 07:15教程:Python与seo工具脚本,360/搜狗相关搜索词采集源码参考
搜索引擎相关的搜索词应该是很多seoer都在寻找并选择使用的关键词扩展类别。除了流行的百度相关搜索词采集,当然还有360搜索引擎和搜狗搜索引擎,当然知道方法之后,python的应用基本一样,唯一的就是你需要关心的是词本身和反爬虫的局限性!
不,这是这个人渣第二次在360搜索上翻车,注意,这是第二次,第一次,*敏*感*词*翻车还是在采集360搜索题中翻车并回答,真的很好伤疤忘记了疼痛,太久了!!
360搜索大力出奇迹,不,大力出验证码。.
本渣渣通过使用正则来实现这里相关关键词的获取,参考了很多源码,使用正则更加方便快捷!
360搜索相关关键词key源码
re.findall(r'(.+?)</a>', html, re.S | re.I)
搜狗搜索相关关键词关键源码
re.findall(r'<p>(.+?)</a>', html, re.S | re.I)
</p>
大家可以参考自己的学习,毕竟没什么好说的!
附上360搜索相关关键词采集源码供大家参考学习!PS:我没写代码,我没用,怎么写?!
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
360相关搜索词挖掘脚本(多线程版)
基于python3.8
需要安装requests模块
@author:微信/huguo00289
"""
import re
from queue import Queue
from threading import Thread
import requests,random
class Qh360Spider(Thread):
result = {} # 保存结果字典
seen = set() # 表示在队列中的关键词(已抓取或待抓取)
def __init__(self, kw_queue, loop, failed):
super(Qh360Spider, self).__init__()
self.kw_queue = kw_queue # 关键词队列
self.loop = loop # 循环挖词拓展次数
self.failed = failed # 保存查询失败的关键词文件
self.ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36Chrome 17.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0Firefox 4.0.1',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
]
def run(self): # 程序的执行流程
while True:
# 从队列里面获取一个关键词及其对应的当前拓展次数
kw, cloop = self.kw_queue.get()
print('CurLoop:{} Checking: {}'.format(cloop, kw))
query = 'https://www.so.com/s?q={}'.format(kw) # 构建含关键词的url
try:
source = self.download(query, timeout=10)
<p>
# source = self.download(query,timeout=10,user_agent=self.ua)
if source:
kw_list = self.extract(source)
print(kw_list)
self.filter(cloop, kw_list)
else:
# 获取源码失败,保存查询失败的关键词
self.failed.write('{}\n'.format(kw))
finally:
self.kw_queue.task_done()
def download(self, url, timeout=5, proxy=None, num_retries=5):
"""
通用网页源码下载函数
:param url: 要下载的url
:param timeout: 请求超时时间,单位/秒。可能某些网站的反应速度很慢,所以需要一个连接超时变量来处理。
:param user_agent: 用户代理信息,可以自定义是爬虫还是模拟用户
:param proxy: ip代理(http代理),访问某些国外网站的时候需要用到。必须是双元素元组或列表(‘ip:端口’,‘http/https’)
:param num_retries: 失败重试次数
:return: HTML网页源码
"""
headers = {
"Cookie": "QiHooGUID=41F80B0CCE5D43A22EEF0305A12CDE3F.1596003342506; __guid=15484592.2994995584481314300.1596003341831.5723; soid=TjzBKt3zrO-Rh1S7fXSb0S!6kmX5TlEerB2URZz9v4; __md=667cb161f9515972323507763d8fa7dd643a65bd2e88034.9; dpr=1; isafe=1; webp=1; _uc_m2=886a48052dbb9e2291f80055746e0d4f1f110f922b2f; _uc_mid=7cb161f953d8fa7dd643a65bd2e88034; __huid=11xZqhEl%2FfVeqclI4j%2BdQeQvX63Oph%2F%2BCVM5vxqYGxQI4%3D; Q=u%3Duhthb002%26n%3D%26le%3DAwH0ZGV5ZGR3WGDjpKRhL29g%26m%3DZGH5WGWOWGWOWGWOWGWOWGWOZGL0%26qid%3D144048053%26im%3D1_t018c25fbb66797efb2%26src%3D360chrome%26t%3D1; T=s%3D2afa764886f737dd5d23421c30f87a1f%26t%3D1595934758%26lm%3D0-1%26lf%3D2%26sk%3De485bbde46ac34fc27f*敏*感*词*0215de76*敏*感*词*4%26mt%3D1595934758%26rc%3D1%26v%3D2.0%26a%3D1; _S=tg75a7e3fmv0mfdfkt8jlpfpj6; stc_ls_sohome=RRzRSR!RTR(RUR_RVR; gtHuid=1; homeopenad=1; _pp_wd=1; _ga=GA1.2.607533084.1598082638; _gid=GA1.2.1887117715.1598082638; count=6; erules=p1-9%7Cp2-11%7Cp4-3%7Cecl-2%7Ckd-1%7Cp3-2",
'User-Agent': random.choice(self.ua_list)
}
try:
# 打开网页并读取内容存入html变量中
resp = requests.get(url, headers=headers, proxies=proxy, timeout=timeout)
print(resp.status_code)
except requests.RequestException as err:
print('Download error:', err)
html = None # 如果有异常,那么html肯定是没获取到的,所以赋值None
if num_retries > 0:
return self.download(url, timeout, proxy, num_retries - 1)
else:
html = resp.content.decode('utf-8')
#print(html)
return html
@staticmethod
def extract(html):
'''
提取关键词
:param html:搜索结果源码
:return:提取出来的相关关键词列表
'''
return re.findall(r'(.+?)</a>', html, re.S | re.I)
def filter(self, current_loop, kwlist):
'''
关键词过滤和统计函数
:param current_loop: 当前拓展的次数
:param kwlist: 提取出来的关键词列表
:return: None
'''
for kw in kwlist:
# 判断关键词是不是已经被抓取或者已经存在关键词队列
# 判断当前的拓展次数是否已经超过指定值
if current_loop 0:
print("有东西")
print('111')
save.write(line)
save.flush() # 刷新缓存,避免中途出错
save.close()
print('done,完成挖掘')
</p>
如果您无法访问 ip 代理,那么协调起来非常容易。毕竟,你可以大力获取验证码并尝试一下。速度还可以,但是太容易被360搜索和反爬网封杀。想要正常稳定运行,不知道访问代理的ip状态。怎么样,同时还得有一个cookies库!
技术文章:Python与SEO,三大SEO网站查询工具关键词查询采集源码!
网站关键词查询挖掘,包括三个常用的网站seo查询工具站点,爱站、站长、5118,其中爱站和站长最多可查询50页,5118可查询100页。如需查询网站关键词的完整排名数据,需要充值购买会员。当然,免费查询也需要注册会员,否则无从查询。权限!
5118
网站地址和Cookie协议头必须自己填写,查询需要登录权限!
# 5118网站关键词采集
# -*- coding: utf-8 -*-
import requests
from lxml import etree
import time
import logging
logging.basicConfig(filename='s5118.log', level=logging.DEBUG,format='%(asctime)s - %(levelname)s - %(message)s')
#获取关键词
def get_keywords(site,page):
url="https://www.5118.com/seo/baidupc"
headers={
"Cookie":Cookie,
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
}
data={
"isPager": "true",
"viewtype": 2,
"days": 90,
"url": site,
"orderField": "Rank",
"orderDirection" : "sc",
"pageIndex": page,
"catalogName": "",
"referKeyword": "",
}
response=requests.post(url=url,data=data,headers=headers,timeout=10)
print(response.status_code)
html=response.content.decode('utf-8')
tree=etree.HTML(html)
keywords=tree.xpath('//td[@class="list-col justify-content "]/a[@class="w100 all_array"]/text()')
print(keywords)
save_txt(keywords, site)
return keywords
#存储为csv文件
def save_csv(keywords,site):
filename=site.replace("www.",'').replace(".com",'').replace(".cn",'').replace('https://','').replace('http://','')
for keyword in keywords:
with open(f'5118_{filename}.csv','a+',encoding='utf-8-sig') as f:
f.write(f'{keyword}\n')
print("保存关键词列表成功!")
#存储为txt文件
def save_txt(keywords,site):
filename=site.replace("www.",'').replace(".com",'').replace(".cn",'').replace('https://','').replace('http://','')
for keyword in keywords:
with open(f'5118_{filename}.txt','a+',encoding='utf-8') as f:
f.write(f'{keyword}\n')
print("保存关键词列表成功!")
def main(site):
logging.info(f"开始爬取网站{site}关键词数据..")
num = 100
keys=[]
for page in range(1,num+1):
print(f"正在爬取第{page}页数据..")
logging.info(f"正在爬取第{page}页数据..")
try:
keywords = get_keywords(site, page)
keys.extend(keywords)
time.sleep(8)
except Exception as e:
print(f"爬取第{page}页数据失败--错误代码:{e}")
logging.error(f"爬取第{page}页数据失败--错误代码:{e}")
time.sleep(10)
keys = set(keys) #去重
save_csv(keys, site)
<p>
if __name__ == '__main__':
site=""
main(site)</p>
爱站
网站地址和Cookie协议头必须自己填写,查询需要登录权限!
# 爱站网站关键词采集
# -*- coding: utf-8 -*-
import requests
from lxml import etree
import time
import logging
logging.basicConfig(filename='aizhan.log', level=logging.DEBUG,format='%(asctime)s - %(levelname)s - %(message)s')
#获取关键词
def get_keywords(site,page):
url=f"https://baidurank.aizhan.com/baidu/{site}/-1/0/{page}/position/1/"
headers = {
"Cookie":Cookie ,
}
response = requests.get(url=url,headers=headers, timeout=10)
print(response.status_code)
html = response.content.decode('utf-8')
tree = etree.HTML(html)
keywords = tree.xpath('//td[@class="title"]/a[@class="gray"]/@title')
print(keywords)
save_txt(keywords, site)
return keywords
#存储为csv文件
def save_csv(keywords,site):
filename=site.replace("www.",'').replace(".com",'').replace(".cn",'').replace('https://','').replace('http://','')
for keyword in keywords:
with open(f'aizhan_{filename}.csv','a+',encoding='utf-8-sig') as f:
f.write(f'{keyword}\n')
print("保存关键词列表成功!")
#存储为txt文件
def save_txt(keywords,site):
filename=site.replace("www.",'').replace(".com",'').replace(".cn",'').replace('https://','').replace('http://','')
for keyword in keywords:
with open(f'aizhan_{filename}.txt','a+',encoding='utf-8') as f:
f.write(f'{keyword}\n')
print("保存关键词列表成功!")
def main(site):
logging.info(f"开始爬取网站{site}关键词数据..")
num = 50
keys=[]
for page in range(1,num+1):
print(f"正在爬取第{page}页数据..")
logging.info(f"正在爬取第{page}页数据..")
try:
keywords = get_keywords(site, page)
keys.extend(keywords)
time.sleep(8)
except Exception as e:
print(f"爬取第{page}页数据失败--错误代码:{e}")
logging.error(f"爬取第{page}页数据失败--错误代码:{e}")
time.sleep(10)
keys = set(keys) #去重
save_csv(keys, site)
if __name__ == '__main__':
site=""
main(site)
站长
网站地址和Cookie协议头必须自己填写,查询需要登录权限!
# 站长之家网站关键词采集
# -*- coding: utf-8 -*-
import requests
from lxml import etree
<p>
import time
import logging
logging.basicConfig(filename='chinaz.log', level=logging.DEBUG,format='%(asctime)s - %(levelname)s - %(message)s')
#获取关键词
def get_keywords(site,page):
headers={
"Cookie":Cookie,
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
}
url=f"https://rank.chinaz.com/{site}-0---0-{page}"
response=requests.get(url=url,headers=headers,timeout=8)
print(response)
html=response.content.decode('utf-8')
tree=etree.HTML(html)
keywords=tree.xpath('//ul[@class="_chinaz-rank-new5b"]/li[@class="w230 "]/a/text()')
print(keywords)
save_txt(keywords, site)
return keywords
#存储为csv文件
def save_csv(keywords,site):
filename=site.replace("www.",'').replace(".com",'').replace(".cn",'').replace('https://','').replace('http://','')
for keyword in keywords:
with open(f'chinaz_{filename}.csv','a+',encoding='utf-8-sig') as f:
f.write(f'{keyword}\n')
print("保存关键词列表成功!")
#存储为txt文件
def save_txt(keywords,site):
filename=site.replace("www.",'').replace(".com",'').replace(".cn",'').replace('https://','').replace('http://','')
for keyword in keywords:
with open(f'chinaz_{filename}.txt','a+',encoding='utf-8') as f:
f.write(f'{keyword}\n')
print("保存关键词列表成功!")
def main(site):
logging.info(f"开始爬取网站{site}关键词数据..")
num = 50
keys=[]
for page in range(1,num+1):
print(f"正在爬取第{page}页数据..")
logging.info(f"正在爬取第{page}页数据..")
try:
keywords = get_keywords(site, page)
keys.extend(keywords)
time.sleep(8)
except Exception as e:
print(f"爬取第{page}页数据失败--错误代码:{e}")
logging.error(f"爬取第{page}页数据失败--错误代码:{e}")
time.sleep(10)
keys = set(keys) #去重
save_csv(keys, site)
if __name__ == '__main__':
site=""
main(site)</p>
过去推荐:
Python爬虫的三种分析方法为您带来360搜索排名查询
Python和seo工具全网搜索查询助手exe
Python百度下拉框关键词采集工具
Python制作sitemap.xml文件工具源码
Python调用翻译API接口实现“智能”伪原创
百度快队的 Python selenium 实现搜索访问目标网站
·······结尾·······
大家好,我是二叔
进城的革命老区农民工,
非早期非专业的互联网站长,
喜欢python,写,读,英文
不受欢迎的程序,自媒体,seo。. .
公众号不赚钱,只做一个网友。
读者交流群已成立,找我并备注“交流”,即可加入我们~
听说“看”的人更好看~
关注二叔~把python的内容分享给大家,写读读~