1. 首页
  2. 编程语言
  3. Python
  4. Scrapy框架简单应用:爬取免费的的西刺代理IP

Scrapy框架简单应用:爬取免费的的西刺代理IP

上传者: 2021-05-20 18:10:43上传 .ZIP文件 14.62 KB 热度 23次

利用Scrapy框架爬取免费的的西刺代理IP

内含模拟浏览器,利用代理池反反爬虫

from urllib.request import ProxyHandler, build_opener

import re

from urllib import request

''' 本段注释代码,将获取的信息保存到MongoDB数据库

client = MongoClient('localhost', 27017)

db_auth = client.admin db_auth.authenticate("root", "123")

db = client['代理'] collection = db['IP代理'] '''

'''以文本方式保存到本地''' f = open('可用代理.txt','w',encoding='utf-8')

class ProxyPipeline(object):

def process_item(self, item, spider):

for i in range(0,len(item["IP"])):

#print(item['name'][i])

proxy = item['IP'][i] ':' item["port"][i]

proxy_handler = ProxyHandler({'http': 'http://' proxy}) opener = build_opener(proxy_handler)

try: head = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36', } url = 'http://www.xdaili.cn/monitor'

req = request.Request(url, headers=head) response = opener.open(req) data = response.read().decode('utf-8') # print(data)

if data:

'''保存到数据库 ret = collection.find_one({'IP': item['IP'][i], '端口': item["port"][i]})

if ret: # print('已经存在了') pass

else: collection.save({'IP': item['IP'][i], '端口': item['port'][i], '匿名度': item['nmd'][i], '类型': item['type'][i], '位置': item['addr'][i], '最后验证时间': item['lastime'][i]}) # 向数据库插入一条记录 '''

f.write( proxy) f.write("\n")

print(proxy)

except Exception as e:

#print(proxy "不可用") pass

getip.py

import scrapy from proxy.items import ProxyItem import re class GetipSpider(scrapy.Spider): name = 'getip' allowed_domains = ['xicidaili.com'] start_urls = [ 'http://www.xicidaili.com/wn/', 'http://www.xicidaili.com/wt/', ] def parse(self, response): item = ProxyItem() item["IP"] = response.xpath("//tr[@class = 'odd']/td[2]/text()").extract() item["port"] = response.xpath("//tr[@class = 'odd']/td[3]/text()").extract() item["nmd"] = response.xpath("//tr[@class = 'odd']/td[5]/text()").extract() item["type"] = response.xpath("//tr[@class = 'odd']/td[6]/text()").extract() item["addr"] = response.xpath("//tr[@class = 'odd']/td[4]/a/text()").extract() item["lastime"] = response.xpath("//tr[@class = 'odd']/td[10]/text()").extract() #print(item) yield item #rang = response.xpath("//div[@id = 'listnav']/ul/li[6]/a/text()").extract() #下一页,前5页最后验证时间临近的IP #print(int(rang[0]) 1) type = response.xpath("//div[@class='pagination']/a[last()]/@href").extract()[0] type = re.findall(r'/(.*?)/',type,re.S) #print(type) for i in range(2,4): next_page = response.urljoin('http://www.xicidaili.com/%s/%s/') % (type[0],i) print(next_page) #next_page = response.urljoin('https://www.kuaidaili.com%s%s/') % type % i yield scrapy.Request(next_page, callback=self.parse)

下载地址
用户评论