166 lines
6.2 KiB
Python
166 lines
6.2 KiB
Python
"""
|
|
Script Name:
|
|
Description: 从 https://www.iafd.com 上获取信息。利用cloudscraper绕过cloudflare
|
|
detail_fetch.py 从本地已经保存的列表数据,逐个拉取详情,并输出到文件。
|
|
list_fetch_astro.py 按照星座拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
|
list_fetch_birth.py 按照生日拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
|
list_fetch_ethnic.py 按照人种拉取数据,获得演员的信息列表。数据量大,但详细字段很多无效的
|
|
list_merge.py 上面三个列表的数据,取交集,得到整体数据。
|
|
iafd_scrape.py 借助 https://github.com/stashapp/CommunityScrapers 实现的脚本,可以输入演员的 iafd链接,获取兼容 stashapp 格式的数据。(作用不大,因为国籍、照片等字段不匹配)
|
|
|
|
html_format.py 负责读取已经保存的html目录, 提取信息,格式化输出。
|
|
data_merge.py 负责合并数据,它把从 iafd, javhd, thelordofporn 以及搭建 stashapp, 从上面更新到的演员数据(需导出)进行合并;
|
|
stashdb_merge.py 负责把从stashapp中导出的单个演员的json文件, 批量合并并输出; 通常我们需要把stashapp中导出的批量文件压缩并传输到data/tmp目录,解压后合并
|
|
从而获取到一份完整的数据列表。
|
|
|
|
Author: [Your Name]
|
|
Created Date: YYYY-MM-DD
|
|
Last Modified: YYYY-MM-DD
|
|
Version: 1.0
|
|
|
|
Modification History:
|
|
- YYYY-MM-DD [Your Name]:
|
|
- YYYY-MM-DD [Your Name]:
|
|
- YYYY-MM-DD [Your Name]:
|
|
"""
|
|
|
|
import cloudscraper
|
|
import json
|
|
import time
|
|
import csv
|
|
from bs4 import BeautifulSoup
|
|
import logging
|
|
import config
|
|
|
|
config.setup_logging()
|
|
|
|
# 定义基础 URL 和可变参数
|
|
host_url = "https://www.iafd.com"
|
|
base_url = f"{host_url}/lookupethnic.rme/ethnic="
|
|
ethnic_list = ['caucasian', 'black', 'asian', 'latin', 'native american', 'middle eastern', 'mediteranean', 'indian', 'polynesian', 'multi-ethnic', 'ethnic', 'romani', 'eurasian', 'north african', 'south asian']
|
|
|
|
# 设置 headers 和 scraper
|
|
headers = {
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
|
}
|
|
scraper = cloudscraper.create_scraper()
|
|
|
|
# 结果路径
|
|
res_dir = './result'
|
|
|
|
# 记录 ethinc_map
|
|
ethnic_map = []
|
|
|
|
# 网络请求并解析 HTML
|
|
def fetch_page(url):
|
|
try:
|
|
response = scraper.get(url, headers=headers)
|
|
response.raise_for_status()
|
|
return response.text
|
|
except Exception as e:
|
|
logging.error(f"Failed to fetch {url}: {e}")
|
|
return None
|
|
|
|
# 解析 HTML 内容,提取需要的数据
|
|
def parse_page(html, ethnic):
|
|
# 手动修复 HTML 标签
|
|
html = html.replace('<br>', '').replace('<a ', '<a target="_blank" ') # 修复一些不规范标签
|
|
soup = BeautifulSoup(html, 'lxml') # 使用lxml解析器
|
|
|
|
#soup = BeautifulSoup(html, 'html.parser')
|
|
rows = soup.find_all('div', class_='row headshotrow')
|
|
flag = False
|
|
list_cnt = 0
|
|
|
|
for row in rows:
|
|
for col in row.find_all('div', class_='col-lg-2 col-md-3 col-sm-4 col-xs-6'):
|
|
link_tag = col.find('a')
|
|
img_tag = col.find('div', class_='pictag')
|
|
flag = True
|
|
|
|
if link_tag and img_tag:
|
|
href = host_url + link_tag['href']
|
|
person = img_tag.text.strip()
|
|
|
|
# 将数据存储到 ethnic_map
|
|
ethnic_map.append({
|
|
'ethnic': ethnic,
|
|
'person': person,
|
|
'href': href
|
|
})
|
|
list_cnt = list_cnt +1
|
|
if flag:
|
|
logging.info(f"get {list_cnt} persons from this page. total persons: {len(ethnic_map)}")
|
|
return soup
|
|
else:
|
|
return None
|
|
|
|
# 处理翻页
|
|
def handle_pagination(soup, ethnic):
|
|
next_page = soup.find('a', rel='next')
|
|
|
|
if next_page:
|
|
next_url = host_url + next_page['href']
|
|
logging.info(f"Found next page: {next_url}")
|
|
return next_url
|
|
else:
|
|
logging.info(f"All pages fetched for {ethnic}.")
|
|
return None
|
|
|
|
# 处理带空格的种族名
|
|
def format_ethnic(ethnic):
|
|
return ethnic.replace(' ', '+')
|
|
|
|
# 主逻辑函数:循环处理每个种族
|
|
def process_ethnic_data():
|
|
all_person = len(ethnic_map) # 应该为0
|
|
all_pages = 0
|
|
|
|
for ethnic in ethnic_list:
|
|
url = base_url + format_ethnic(ethnic)
|
|
next_url = url
|
|
cursor = int(all_person / 100)
|
|
pages = 0
|
|
logging.info(f"--------Fetching data for {ethnic}, url {url} ...")
|
|
|
|
while next_url:
|
|
html = fetch_page(next_url)
|
|
if html:
|
|
soup = parse_page(html, ethnic)
|
|
if soup:
|
|
next_url = handle_pagination(soup, ethnic)
|
|
pages = pages + 1
|
|
else:
|
|
logging.info(f"wrong html content. retring {next_url} ...")
|
|
# 统计,并定期保存结果
|
|
if len(ethnic_map) / 100 > cursor:
|
|
cursor = int(len(ethnic_map) / 100)
|
|
save_data()
|
|
time.sleep(2) # 控制访问频率
|
|
else:
|
|
logging.info(f"Retrying {next_url} ...")
|
|
time.sleep(5) # 等待后再重试
|
|
# 统计输出
|
|
ethnic_person = len(ethnic_map) - all_person
|
|
all_person = len(ethnic_map)
|
|
all_pages = all_pages + pages
|
|
logging.info(f"--------Fetching data for {ethnic} end. total pages: {pages}, total persons: {ethnic_person}, all persons fetched: {all_person}")
|
|
# 统计最后结果
|
|
logging.info(f"--------Fetching all data end. total ethnic: {len(ethnic_list)}, total pages: {all_pages}, total persons: {all_person}")
|
|
|
|
|
|
# 保存到文件
|
|
def save_data():
|
|
with open(f'{res_dir}/ethnic.json', 'w', encoding='utf-8') as json_file:
|
|
json.dump(ethnic_map, json_file, indent=4, ensure_ascii=False)
|
|
|
|
with open(f'{res_dir}/ethnic.csv', 'w', newline='', encoding='utf-8') as csv_file:
|
|
writer = csv.DictWriter(csv_file, fieldnames=['ethnic', 'person', 'href'])
|
|
writer.writeheader()
|
|
writer.writerows(ethnic_map)
|
|
|
|
# 执行主逻辑
|
|
if __name__ == '__main__':
|
|
process_ethnic_data()
|
|
save_data()
|
|
logging.info("Data fetching and saving completed.") |