以下是一个使用Python爬虫爬取英雄联盟所有皮肤图片的示例代码:

import requests
from bs4 import BeautifulSoup
import os

# 创建目录
def create_directory(path):
    if not os.path.exists(path):
        os.makedirs(path)

# 下载图片
def download_image(url, path):
    response = requests.get(url, stream=True)
    if response.status_code == 200:
        with open(path, 'wb') as file:
            for chunk in response.iter_content(1024):
                file.write(chunk)

# 获取皮肤图片链接
def get_skin_images(url):
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')
    images = soup.find_all('img', class_='skin-pic')
    skin_images = []
    for image in images:
        skin_images.append(image['data-img'])
    return skin_images

# 爬取皮肤图片
def crawl_skin_images():
    base_url = 'https://lol.qq.com/data/info-defail.shtml?id={}'
    hero_ids = [1, 2, 3, 4, 5]  # 假设只爬取5个英雄的皮肤图片
    save_directory = 'skins'
    create_directory(save_directory)

    for hero_id in hero_ids:
        url = base_url.format(hero_id)
        skin_images = get_skin_images(url)
        hero_directory = os.path.join(save_directory, str(hero_id))
        create_directory(hero_directory)

        for i, image_url in enumerate(skin_images):
            image_name = '{}_{}.jpg'.format(hero_id, i)
            image_path = os.path.join(hero_directory, image_name)
            download_image(image_url, image_path)

            print('已下载图片:{}'.format(image_path))

crawl_skin_images()

请注意,这只是一个示例代码,你可能需要根据具体的需求进行适当的修改。另外,爬取网站的数据可能涉及到版权和法律问题,请确保你有合法的权限和授权来进行该操作


原文地址: https://www.cveoy.top/t/topic/hOsx 著作权归作者所有。请勿转载和采集!

免费AI点我,无需注册和登录