Python爬虫实战:使用Requests和BeautifulSoup抓取视频
import requests
import os
import json
# from moviepy.editor import VideoFileClip, AudioFileClip
from bs4 import BeautifulSoup
# 数据的抓取
base_url = 'http://www.zkk78.com/index.php/user/ajax_ulog/?ac=set&mid=1&id=4721&sid=1&nid=1&type=4'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.55',
'Referer': 'http://www.zkk78.com/dongmanplay/1660-1-1.html',
'Cookie': 'first_h_kp=1689927842258; count_h_kp=1; first_m_kp=1689927842260; count_m_kp=1; __51cke__=; Hm_lvt_38ea8ed97fbe7c334fcc1878c579e5e0=1689927815; Hm_lvt_c11e70df18184f7263176ce90c8a9cc3=1689927817; PHPSESSID=2pnhv3rla9asc5i0l248i8dl83; user_id=20620; user_name=2143552649; group_id=2; group_name=%E9%BB%98%E8%AE%A4%E4%BC%9A%E5%91%98; user_check=b93515cf840072f8df045b0842e31513; user_portrait=%2Fstatic%2Fimages%2Ftouxiang.png; XLA_CI=43a5b7a031caf14cbacec3c121269bcf; first_h_kp=1689930340869; count_h_kp=1; first_m_kp=1689930340870; count_m_kp=1; history=%5B%7B%22name%22%3A%22%E4%BD%A0%E7%9A%84%E5%90%8D%E5%AD%97%22%2C%22pic%22%3A%22https%3A%2F%2Fcdn.yinghuazy.xyz%2Fupload%2Fvod%2F20210219-1%2F5e3b65ef9e6c582f09784d027cfb1923.jpg%22%2C%22link%22%3A%22%2Fdongmanplay%2F1660-1-1.html%22%2C%22part%22%3A%22%E5%85%A8%E9%9B%86%E6%97%A5%E8%AF%AD%22%7D%2C%7B%22name%22%3A%22%E5%9B%9E%E5%A4%8D%E6%9C%AF%E5%A3%AB%E7%9A%84%E9%87%8D%E6%9D%A5%E4%BA%BA%E7%94%9F%22%2C%22pic%22%3A%22https%3A%2F%2Fpic.rmb.bdstatic.com%2Fbjh%2F377f268028bb09e80035191cd9f15c58.jpeg%22%2C%22link%22%3A%22%2Fdongmanplay%2F4721-1-1.html%22%2C%22part%22%3A%22%E7%AC%AC01%E9%9B%86%22%7D%5D; Hm_lpvt_c11e70df18184f7263176ce90c8a9cc3=1689930351; Hm_lpvt_38ea8ed97fbe7c334fcc1878c579e5e0=1689930351; __tins__21589017=%7B%22sid%22%3A%201689930340967%2C%20%22vd%22%3A%206%2C%20%22expires%22%3A%201689932161945%7D; __51laig__=15',
}
response = requests.get(base_url, headers=headers) # 第一次请求
print(response.status_code)
data = response.text
print(data)
# 数据解析, jsonload转换为python格式
json_data = json.loads(data)
print(json_data)
json_list = [json_data['info']] # 将字典放入列表中
print(json_list)
for item in json_list:
print(item) # 打印item字典的内容,查看键和值
if 'ulog_play_url' in item:
video_title = str(item['ulog_rid']) + '.mp4' # 将'ulog_rid'的值转换为字符串类型
video_url = item['ulog_play_url']
print(video_title, video_url)
print('正在下载:', video_title)
# 第二次请求
video_data = requests.get(video_url, headers=headers).content
with open(r'./视频/' + video_title, 'wb') as f:
f.write(video_data)
print('下载完成\n')
else:
print('没有找到ulog_play_url键')
代码解析:
- 导入必要的库:
requests: 用于发送HTTP请求获取网页内容os: 用于处理文件和目录json: 用于解析JSON数据bs4 (BeautifulSoup): 用于解析HTML页面
- 设置目标URL和请求头:
base_url: 目标网页的URLheaders: 包含User-Agent, Referer, Cookie等信息的请求头,用于模拟浏览器访问
- 发送HTTP请求并获取数据:
- 使用
requests.get()方法发送GET请求到目标URL - 检查响应状态码
response.status_code是否为200,确保请求成功 - 使用
response.text获取网页的HTML内容
- 使用
- 解析JSON数据:
- 使用
json.loads()方法将JSON格式的字符串转换为Python字典 - 从字典中提取关键信息,例如视频链接
- 使用
- 下载视频:
- 构建视频文件名
- 发送另一个HTTP请求下载视频内容
- 将视频内容写入本地文件
注意事项:
- 爬取网页之前请先阅读网站的robots.txt文件,遵守网站的爬虫规则。
- 网站结构可能会发生变化,导致代码失效,需要根据实际情况修改代码。
- 请勿将爬取到的视频用于商业用途,以免侵犯版权。
希望这篇博客能够帮助你学习使用Python爬虫抓取视频。
原文地址: https://www.cveoy.top/t/topic/fRfp 著作权归作者所有。请勿转载和采集!