Files
stock/scripts/iafd/html_format.py
2025-02-24 14:51:51 +08:00

91 lines
3.8 KiB
Python

"""
Script Name:
Description: 从 https://www.iafd.com 上获取信息。利用cloudscraper绕过cloudflare
detail_fetch.py 从本地已经保存的列表数据,逐个拉取详情,并输出到文件。
list_fetch_astro.py 按照星座拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
list_fetch_birth.py 按照生日拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
list_fetch_ethnic.py 按照人种拉取数据,获得演员的信息列表。数据量大,但详细字段很多无效的
list_merge.py 上面三个列表的数据,取交集,得到整体数据。
iafd_scrape.py 借助 https://github.com/stashapp/CommunityScrapers 实现的脚本,可以输入演员的 iafd链接,获取兼容 stashapp 格式的数据。(作用不大,因为国籍、照片等字段不匹配)
html_format.py 负责读取已经保存的html目录, 提取信息,格式化输出。
data_merge.py 负责合并数据,它把从 iafd, javhd, thelordofporn 以及搭建 stashapp, 从上面更新到的演员数据(需导出)进行合并;
stashdb_merge.py 负责把从stashapp中导出的单个演员的json文件, 批量合并并输出; 通常我们需要把stashapp中导出的批量文件压缩并传输到data/tmp目录,解压后合并
从而获取到一份完整的数据列表。
Author: [Your Name]
Created Date: YYYY-MM-DD
Last Modified: YYYY-MM-DD
Version: 1.0
Modification History:
- YYYY-MM-DD [Your Name]:
- YYYY-MM-DD [Your Name]:
- YYYY-MM-DD [Your Name]:
"""
import os
import json
import csv
from bs4 import BeautifulSoup
INPUT_DIR = "html"
OUTPUT_JSON = "./result/iafd_meta.json"
OUTPUT_CSV = "./result/iafd_meta.csv"
BASE_URL = "https://www.iafd.com"
def parse_html_file(filepath):
"""解析单个 HTML 文件,提取需要的信息。"""
person_list = []
filename = os.path.basename(filepath)
filename = os.path.splitext(filename)[0]
with open(filepath, "r", encoding="utf-8") as file:
soup = BeautifulSoup(file, "html.parser")
astro_div = soup.find("div", id="astro")
if not astro_div:
print(f"Warning: No 'astro' div found in {filename}")
return []
birth_date = None
for elem in astro_div.find_all(recursive=False):
if elem.name == "h3" and "astroday" in elem.get("class", []):
birth_date = elem.get_text(strip=True)
elif elem.name == "div" and "perficon" in elem.get("class", []):
a_tag = elem.find("a")
if a_tag:
href = BASE_URL + a_tag["href"]
name = a_tag.find("span", class_="perfname")
if name:
person_list.append({
"astrology": filename,
"birth_date": birth_date,
"person": name.get_text(strip=True),
"href": href
})
return person_list
def main():
all_persons = []
for filename in os.listdir(INPUT_DIR):
if filename.endswith(".html"):
filepath = os.path.join(INPUT_DIR, filename)
print(f"正在解析 {filename} ...")
all_persons.extend(parse_html_file(filepath))
# 保存 JSON
with open(OUTPUT_JSON, "w", encoding="utf-8") as json_file:
json.dump(all_persons, json_file, indent=4, ensure_ascii=False)
# 保存 CSV
with open(OUTPUT_CSV, "w", newline="", encoding="utf-8") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=["astrology", "birth_date", "person", "href"])
writer.writeheader()
writer.writerows(all_persons)
print(f"Data extracted and saved to {OUTPUT_JSON} and {OUTPUT_CSV}")
if __name__ == "__main__":
main()