python爬取动态网页,python 模拟浏览器selenium_Python使用Selenium模块模拟浏览器抓取斗鱼直播间信息示例...

 2023-09-23 阅读 34 评论 0

摘要:本文实例讲述了Python使用Selenium模块模拟浏览器抓取斗鱼直播间信息。分享给大家供大家参考,具体如下: import time from multiprocessing import Pool python爬取动态网页,from selenium import webdriver from selenium.webdriver.common.by import By from

本文实例讲述了Python使用Selenium模块模拟浏览器抓取斗鱼直播间信息。分享给大家供大家参考,具体如下:

import time

from multiprocessing import Pool

python爬取动态网页,from selenium import webdriver

from selenium.webdriver.common.by import By

from selenium.webdriver.support.ui import WebDriverWait

from selenium.webdriver.support import expected_conditions as EC

selenium怎么用。from selenium.common.exceptions import TimeoutException

from bs4 import BeautifulSoup

from pymongo import MongoClient

from pymongo.errors import PyMongoError

python自动抓取网页,# monogdb配置信息

MONGO_HOST = "localhost"

MONGO_DATABASE = "douyu"

MONGO_TABLE = "zhibo"

python安装selenium,client = MongoClient(host=MONGO_HOST)

db = client[MONGO_DATABASE]

# PhantomJS 命令行相关配置

# 参见 http://phantomjs.org/api/command-line.html

python urllib,SERVICE_ARGS = ['--disk-cache=true', '--load-images=false']

# driver = webdriver.Chrome() # 有界面

driver = webdriver.PhantomJS(service_args=SERVICE_ARGS) # 无界面

delay = 10

selenium,wait = WebDriverWait(driver, delay)

driver.maximize_window()

def get_total_pages():

url = 'https://www.douyu.com/directory/all'

python编程。driver.get(url)

pages = int(driver.find_element_by_css_selector(

'.shark-pager-dot + .shark-pager-item').text)

print("正在获取第1页数据")

python爬虫教程。room_list = get_rooms_by_beautifulsoup()

save_to_monogodb(room_list)

return pages

# 根据页码获取指定页数据,并将其保存到数据库中

python模拟浏览器。def parse_page(page_num):

print("正在获取第%d页数据" % page_num)

try:

page_num_box = wait.until(

EC.presence_of_element_located(

(By.CSS_SELECTOR, "input.jumptxt")))

go_btn = wait.until(EC.element_to_be_clickable(

(By.CSS_SELECTOR, 'a.shark-pager-submit')))

page_num_box.clear()

page_num_box.send_keys(page_num)

go_btn.click()

# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")

# time.sleep(0.1)

wait.until(

EC.text_to_be_present_in_element(

(By.CSS_SELECTOR,

'.shark-pager-item.current'),

str(page_num)))

# 对于By.CLASS_NAME invalid selector: Compound class names not permitted

room_list = get_rooms_by_beautifulsoup()

save_to_monogodb(room_list)

except TimeoutException:

print("请求第%d页失败" % page_num)

print("尝试重新获取第%d页" % page_num)

return parse_page(page_num)

# 通过bs4解析数据

def get_rooms_by_beautifulsoup():

'''

通过bs4库解析数据

获取直播间的名称,观看人数,标签,主播名

'''

wait.until(EC.presence_of_element_located(

(By.CSS_SELECTOR, "ul#live-list-contentbox > li")))

html = driver.page_source

soup = BeautifulSoup(html, 'lxml')

rooms = soup.select('ul#live-list-contentbox > li')

for room in rooms:

room_name = room.find(

'h3', attrs={

'class': 'ellipsis'}).get_text(

strip=True)

view_count = room.find('span', class_='dy-num fr').text

tag = room.find('span', class_='tag ellipsis').text

hostname = room.find('span', class_='dy-name ellipsis fl').text

#print("房间名: " + room_name + "\t观看人数: " + view_count + "\t标签: " + tag + "\t主播名: " + hostname)

yield {

'room_name': room_name,

'view_count': view_count,

'tag': tag,

'hostname': hostname,

}

def save_to_monogodb(room_list):

for room in room_list:

try:

db[MONGO_TABLE].insert(room) # insert支持插入多条数据

print("mongodb插入数据成功:", room)

except PyMongoError as e:

print("mongodb插入数据失败:", room, e)

if __name__ == '__main__':

try:

total_pages = get_total_pages()

for page_num in range(2, total_pages + 1):

parse_page(page_num)

except Exception as e:

print("出错了", e)

finally: # 确保 浏览器能正常关闭

print("共有%d页" % total_pages)

driver.close()

更多关于Python相关内容可查看本站专题:《》、《》、《》、《》、《》、《》及《》

希望本文所述对大家Python程序设计有所帮助。

希望与广大网友互动??

点此进行留言吧!

版权声明:本站所有资料均为网友推荐收集整理而来,仅供学习和研究交流使用。

原文链接:https://hbdhgg.com/4/86979.html

发表评论:

本站为非赢利网站,部分文章来源或改编自互联网及其他公众平台,主要目的在于分享信息,版权归原作者所有,内容仅供读者参考,如有侵权请联系我们删除!

Copyright © 2022 匯編語言學習筆記 Inc. 保留所有权利。

底部版权信息