Python 爬虫抓取视频并保存 - 解决方案及代码优化
Python 爬虫抓取视频并保存 - 解决方案及代码优化
本文将展示使用 Python 爬虫抓取视频并保存的完整代码,同时针对代码中出现的错误进行分析和解决,并对代码进行优化,使代码更易于理解和维护。
代码示例
import requests
import os
import json
from moviepy.editor import VideoFileClip, AudioFileClip
from bs4 import BeautifulSoup
# 数据的抓取
base_url = 'http://www.zkk78.com/index.php/user/ajax_ulog/?ac=set&mid=1&id=4721&sid=1&nid=1&type=4'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.55',
'Referer': 'http://www.zkk78.com/dongmanplay/1660-1-1.html',
'Cookie': 'first_h_kp=1689927842258; count_h_kp=1; first_m_kp=1689927842260; count_m_kp=1; __51cke__=; Hm_lvt_38ea8ed97fbe7c334fcc1878c579e5e0=1689927815; Hm_lvt_c11e70df18184f7263176ce90c8a9cc3=1689927817; PHPSESSID=2pnhv3rla9asc5i0l248i8dl83; user_id=20620; user_name=2143552649; group_id=2; group_name=%E9%BB%98%E8%AE%A4%E4%BC%9A%E5%91%98; user_check=b93515cf840072f8df045b0842e31513; user_portrait=%2Fstatic%2Fimages%2Ftouxiang.png; XLA_CI=43a5b7a031caf14cbacec3c121269bcf; first_h_kp=1689930340869; count_h_kp=1; first_m_kp=1689930340870; count_m_kp=1; history=%5B%7B%22name%22%3A%22%E4%BD%A0%E7%9A%84%E5%90%8D%E5%AD%97%22%2C%22pic%22%3A%22https%3A%2F%2Fcdn.yinghuazy.xyz%2Fupload%2Fvod%2F20210219-1%2F5e3b65ef9e6c582f09784d027cfb1923.jpg%22%2C%22link%22%3A%22%2Fdongmanplay%2F1660-1-1.html%22%2C%22part%22%3A%22%E5%85%A8%E9%9B%86%E6%97%A5%E8%AF%AD%22%7D%2C%7B%22name%22%3A%22%E5%9B%9E%E5%A4%8D%E6%9C%AF%E5%A3%AB%E7%9A%84%E9%87%8D%E6%9D%A5%E4%BA%BA%E7%94%9F%22%2C%22pic%22%3A%22https%3A%2F%2Fpic.rmb.bdstatic.com%2Fbjh%2F377f268028bb09e80035191cd9f15c58.jpeg%22%2C%22link%22%3A%22%2Fdongmanplay%2F4721-1-1.html%22%2C%22part%22%3A%22%E7%AC%AC01%E9%9B%86%22%7D%5D; Hm_lpvt_c11e70df18184f7263176ce90c8a9cc3=1689930351; Hm_lpvt_38ea8ed97fbe7c334fcc1878c579e5e0=1689930351; __tins__21589017=%7B%22sid%22%3A%201689930340967%2C%20%22vd%22%3A%206%2C%20%22expires%22%3A%201689932161945%7D; __51laig__=15',
}
response = requests.get(base_url, headers=headers) # 第一次请求
print(response.status_code)
data = response.text
print(data)
# 数据解析, jsonload转换为python格式
json_data = json.loads(data)
print(json_data)
json_list = [json_data['info']] # 将字典放入列表中
print(json_list)
for item in json_list:
print(item) # 打印item字典的内容,查看键和值
video_title = str(item['ulog_rid']) + '.mp4' # 将'ulog_rid'的值转换为字符串类型
video_url = item['play_url']
print(video_title, video_url)
print('正在下载:', video_title)
# 第二次请求
video_data = requests.get(video_url, headers=headers).content
with open(r'./视频/' + video_title, 'wb') as f:
f.write(video_data)
print('下载完成
')
错误分析及解决
代码中出现的错误是 TypeError: unsupported operand type(s) for +: 'int' and 'str'
这个错误表示在以下代码中,将字典中的'ulog_rid'键的值与字符串'.mp4'进行拼接时,'ulog_rid'的值是一个整数类型,无法直接与字符串进行拼接。
video_title = item['ulog_rid'] + '.mp4' # 修改为正确的键名
解决方法是将'ulog_rid'的值转换为字符串类型后再进行拼接,修改后的代码如下:
video_title = str(item['ulog_rid']) + '.mp4' # 将'ulog_rid'的值转换为字符串类型
代码优化
- 提取请求头和URL到函数中,使代码更简洁。
- 使用try-except语句处理请求失败的情况。
- 使用logging模块记录日志信息。
优化后的代码
import requests
import os
import json
import logging
from moviepy.editor import VideoFileClip, AudioFileClip
from bs4 import BeautifulSoup
# 设置日志记录器
logging.basicConfig(filename='download.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# 定义请求头和URL函数
def get_headers_and_url():
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.55',
'Referer': 'http://www.zkk78.com/dongmanplay/1660-1-1.html',
'Cookie': 'first_h_kp=1689927842258; count_h_kp=1; first_m_kp=1689927842260; count_m_kp=1; __51cke__=; Hm_lvt_38ea8ed97fbe7c334fcc1878c579e5e0=1689927815; Hm_lvt_c11e70df18184f7263176ce90c8a9cc3=1689927817; PHPSESSID=2pnhv3rla9asc5i0l248i8dl83; user_id=20620; user_name=2143552649; group_id=2; group_name=%E9%BB%98%E8%AE%A4%E4%BC%9A%E5%91%98; user_check=b93515cf840072f8df045b0842e31513; user_portrait=%2Fstatic%2Fimages%2Ftouxiang.png; XLA_CI=43a5b7a031caf14cbacec3c121269bcf; first_h_kp=1689930340869; count_h_kp=1; first_m_kp=1689930340870; count_m_kp=1; history=%5B%7B%22name%22%3A%22%E4%BD%A0%E7%9A%84%E5%90%8D%E5%AD%97%22%2C%22pic%22%3A%22https%3A%2F%2Fcdn.yinghuazy.xyz%2Fupload%2Fvod%2F20210219-1%2F5e3b65ef9e6c582f09784d027cfb1923.jpg%22%2C%22link%22%3A%22%2Fdongmanplay%2F1660-1-1.html%22%2C%22part%22%3A%22%E5%85%A8%E9%9B%86%E6%97%A5%E8%AF%AD%22%7D%2C%7B%22name%22%3A%22%E5%9B%9E%E5%A4%8D%E6%9C%AF%E5%A3%AB%E7%9A%84%E9%87%8D%E6%9D%A5%E4%BA%BA%E7%94%9F%22%2C%22pic%22%3A%22https%3A%2F%2Fpic.rmb.bdstatic.com%2Fbjh%2F377f268028bb09e80035191cd9f15c58.jpeg%22%2C%22link%22%3A%22%2Fdongmanplay%2F4721-1-1.html%22%2C%22part%22%3A%22%E7%AC%AC01%E9%9B%86%22%7D%5D; Hm_lpvt_c11e70df18184f7263176ce90c8a9cc3=1689930351; Hm_lpvt_38ea8ed97fbe7c334fcc1878c579e5e0=1689930351; __tins__21589017=%7B%22sid%22%3A%201689930340967%2C%20%22vd%22%3A%206%2C%20%22expires%22%3A%201689932161945%7D; __51laig__=15',
}
base_url = 'http://www.zkk78.com/index.php/user/ajax_ulog/?ac=set&mid=1&id=4721&sid=1&nid=1&type=4'
return headers, base_url
# 主函数
def main():
headers, base_url = get_headers_and_url()
try:
response = requests.get(base_url, headers=headers) # 第一次请求
logging.info(f'请求状态码: {response.status_code}')
data = response.text
logging.info(f'响应数据: {data}')
# 数据解析, jsonload转换为python格式
json_data = json.loads(data)
logging.info(f'JSON 数据: {json_data}')
json_list = [json_data['info']] # 将字典放入列表中
logging.info(f'JSON 列表: {json_list}')
for item in json_list:
logging.info(f'当前条目: {item}') # 打印item字典的内容,查看键和值
video_title = str(item['ulog_rid']) + '.mp4' # 将'ulog_rid'的值转换为字符串类型
video_url = item['play_url']
logging.info(f'视频标题: {video_title}, 视频URL: {video_url}')
logging.info(f'正在下载: {video_title}')
# 第二次请求
video_data = requests.get(video_url, headers=headers).content
with open(r'./视频/' + video_title, 'wb') as f:
f.write(video_data)
logging.info(f'下载完成: {video_title}
')
except Exception as e:
logging.error(f'发生错误: {e}')
# 运行主函数
if __name__ == '__main__':
main()
注意事项
- 请确保在运行代码之前已经创建了一个名为"视频"的文件夹,用于保存下载的视频文件。
- 代码中的Cookie值是动态变化的,请根据实际情况进行修改。
- 使用logging模块记录日志信息可以方便地查看代码运行情况,并排查问题。
希望这篇文章对您有所帮助。
原文地址: https://www.cveoy.top/t/topic/fReJ 著作权归作者所有。请勿转载和采集!