python: 动态网页playwright 爬虫实践
喜欢上某个基金网站,但是数据基本上是动态网页的方式,看了一下selenium,requests方案,后面选择了playwright,折腾了一天,基本上可以出相关数据了。一、同步方案from asyncio.windows_events import NULLfrom playwright.sync_api import sync_playwrightimport pandas as pdimpor
特此声明:爬虫要遵守相关法规要求!
喜欢上某个基金网站,但是数据基本上是动态网页的方式,看了一下selenium,requests方案,后面选择了playwright,折腾了一天,基本上可以出相关数据了。
一、同步方案:A网站POC
from playwright.sync_api import sync_playwright
import pandas as pd
#import asyncio
#import re
from playwright.async_api import async_playwright
import random
import time
url = "http://fund.*****.com.cn//" #某基金网站
proxy_to_use = {
'server': "localhost:8080"
}
def get_index_info(data,str_code):
#data = {} #return data
code_url = url + str_code +"/interduce.html#interduce"
with sync_playwright() as p:
browser = p.chromium.launch( headless=False) ## 代理
page = browser.new_page()
page.goto(code_url)
#print(page.title())
all_items = page.query_selector_all('.g-dialog')
#print(f"all_items : {len(all_items)}")
for item in all_items:
text = item.inner_text()
text_trim = trim_multi_char(text) ## 去除多余的"\n"," "
contents = text_trim.split("\n")
data = trim_data_each(data,contents,str_code)
#print(f"values: {values}")
browser.close()
return data
def trim_multi_char(input_string):
strs = list(input_string)
strs_len = len(strs)
if strs_len < 1:
return input_string
out_strs = []
for i in range(strs_len):
if i == 0: ## 第一个
if strs[0] not in [" ",""]:
out_strs.append(strs[0])
else:
if (strs[i] != strs[i-1] and strs[i] not in [" ",""]) or ( strs[i] == strs[i-1] and strs[i] not in ["\n"," ",""]):
out_strs.append(strs[i])
return ''.join(out_strs)
def get_standard_list_codes(raw_codes):
codes = []
for _code in raw_codes:
fix_code = ""
str_code = str(_code)
if len(str_code)<6:
fix_code = "0"*(6-len(str_code)) + str_code
else:
fix_code = str_code
codes.append(fix_code)
return codes
def get_basket_index_raw_data(data,codes):
n = 1
for code in codes:
print(f" download code : {code} => 第 {n} 个, 总共 {len(codes)} 个")
rand_seconds= random.randint(0,9)
time.sleep(rand_seconds)
data = get_index_info(data,code)
n= n+1
if n%100 ==0:
path = "C:\\Users\\Administrator\\Desktop\\index_fund\\" + str(n) + ".csv"
df_temp = pd.DataFrame(data)
df_temp.to_csv(path,encoding="gbk")
return data
## [{code =>"xxx"},{code =>"xxx"}]
def trim_data(raw_data):
data = {} ##
for dict_code in raw_data: ## 每一个
for code in dict_code.keys():
raw = dict_code[code]
raw_trim = trim_multi_char(raw) ## 去除多余的"\n"," " ->只留一个\n
contents = raw_trim.split("\n")
assert (len(contents)%2 == 0),f"code :{code} items项key->value不对称错误,请确认!"
for i in range(len(contents)):
if i%2 ==0 and i < len(contents)-1:
_key = contents[i]
if _key == "":
continue
value = contents[i+1]
if "--" in value:
value = ""
if _key in data.keys():
data[_key].append(value)
else:
data[_key] = [value]
return data
def trim_data_each(data,contents,code): ##每一条
#raw_trim = trim_multi_char(raw_each) ## 去除多余的"\n"," " ->只留一个\n
#contents = raw_trim.split("\n")
assert (len(contents)%2 == 0),f"code :{code} items项key->value不对称错误,请确认!"
for i in range(len(contents)):
if i%2 ==0 and i < len(contents)-1:
_key = contents[i]
if _key == "":
continue
value = contents[i+1]
if "--" in value:
value = ""
if _key in data.keys():
data[_key].append(value)
else:
data[_key] = [value]
return data
data ={} #
index_fund_path = r"C:\Users\Administrator\Desktop\index_funds.csv"
funds_df = pd.read_csv(index_fund_path,encoding="gbk") ## dataframe
raw_codes = list(funds_df.code)
#raw_codes =[510050]
codes = get_standard_list_codes(raw_codes)
data = get_basket_index_raw_data(data,codes)
df_last = pd.DataFrame(data)
df_last.to_csv(r"C:\Users\Administrator\Desktop\index_fund\index_last.csv",encoding="gbk")
输出csv样例:总体上还是成功了。
二、同步方案B网站 POC
url = "http://fundf10.*****.com/****_168204.html" #B网站
def get_index_info():
data = {} #return data
code_url = url
with sync_playwright() as p:
browser = p.chromium.launch( headless=False) ## 代理
page = browser.new_page()
page.goto(code_url)
key_items = page.query_selector_all("table[class='info w790'] tr>th")#info w790
value_items = page.query_selector_all("table[class='info w790'] tr > td")#info w790
_keys =[]
_values =[]
for item in key_items:
text = item.inner_text()
_keys.append(text)
for _item in value_items:
_text = _item.inner_text()
_values.append(_text)
data = dict(list(zip(_keys,_values)))
browser.close()
return data
输出结果:
{‘基金全称’: ‘中融中证煤炭指数型证券投资基金(LOF)’, ‘基金简称’: ‘中融中证煤炭指数(LOF)’, ‘基金代码’: ‘168204(主代码)’, ‘基金类型’: ‘指数型-股票’, ‘发行日期’: ‘2015年05月27日’, ‘成立日期/规模’: ‘2015年06月25日 / 2.115亿份’, ‘资产规模’: ‘11.21亿元(截止至:2021年09月30日)’, ‘份额规模’: ‘6.6585亿份(截止至:2021年09月30日)’, ‘基金管理人’: ‘中融基金’, ‘基金托管人’: ‘海通证券’, ‘基金经
理人’: ‘赵菲’, ‘成立来分红’: ‘每份累计0.00元(0次)’, ‘管理费率’: ‘1.00%(每年)’, ‘托管费率’: ‘0.22%(每年)’, ‘销售服务费率’: ‘—(每年)’, ‘最高认购费率’: ‘1.00%(前端)’, ‘最高申购费率’: ‘0.00%(前端)’, ‘最高赎回费率’: ‘1.50%(前端)’, ‘业绩比较基准’: ‘95%×中证煤炭指数收益率+5%×同期银行活期存款利率(税后)’, ‘跟踪标的’: ‘中证煤炭指数’}
三、 难点复盘:
从A网站和B网站POC验证上来看,纯技术角度,playwright方案用来做爬虫是理论可行的。
1、playwright的安装
特别是在windows下的安装,可能大概率有折腾的可能。
2、找到所需要的节点
比如:
all_items = page.query_selector_all('.g-dialog')
这个过程可能需要不断试。建议对下面这个方法以及CSS 选择器需要有进一步了解为好:
HTML DOM querySelectorAll() 方法
四、后续改进:
1、异步
目前,异步方案还没有调通,因为playwright install还没有成功,是通过把下载几个浏览器文件 安装的,估计版本还有问题。
关于playwright的安装,还是有不少烦心事。折腾了好几个小时。
2、ip代理池
目前,还没有看到象requests库一样的代理池方案,目前还在研究中。在ip代理池没有弄好前,只好做少量的测试,担心ip被封。
当然,本身也就是学习,不用于其它目的。
更多推荐
所有评论(0)