add some scripts.
This commit is contained in:
99
scripts/iafd/list_merge.py
Normal file
99
scripts/iafd/list_merge.py
Normal file
@ -0,0 +1,99 @@
|
||||
"""
|
||||
Script Name:
|
||||
Description: 从 https://www.iafd.com 上获取信息。利用cloudscraper绕过cloudflare
|
||||
detail_fetch.py 从本地已经保存的列表数据,逐个拉取详情,并输出到文件。
|
||||
list_fetch_astro.py 按照星座拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_birth.py 按照生日拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_ethnic.py 按照人种拉取数据,获得演员的信息列表。数据量大,但详细字段很多无效的
|
||||
list_merge.py 上面三个列表的数据,取交集,得到整体数据。
|
||||
iafd_scrape.py 借助 https://github.com/stashapp/CommunityScrapers 实现的脚本,可以输入演员的 iafd链接,获取兼容 stashapp 格式的数据。(作用不大,因为国籍、照片等字段不匹配)
|
||||
|
||||
html_format.py 负责读取已经保存的html目录, 提取信息,格式化输出。
|
||||
data_merge.py 负责合并数据,它把从 iafd, javhd, thelordofporn 以及搭建 stashapp, 从上面更新到的演员数据(需导出)进行合并;
|
||||
stashdb_merge.py 负责把从stashapp中导出的单个演员的json文件, 批量合并并输出; 通常我们需要把stashapp中导出的批量文件压缩并传输到data/tmp目录,解压后合并
|
||||
从而获取到一份完整的数据列表。
|
||||
|
||||
Author: [Your Name]
|
||||
Created Date: YYYY-MM-DD
|
||||
Last Modified: YYYY-MM-DD
|
||||
Version: 1.0
|
||||
|
||||
Modification History:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
"""
|
||||
|
||||
import json
|
||||
import csv
|
||||
import os
|
||||
from collections import defaultdict
|
||||
|
||||
# 读取文件并返回内容
|
||||
def read_json(file_path):
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"文件 {file_path} 未找到.")
|
||||
return []
|
||||
except json.JSONDecodeError:
|
||||
print(f"文件 {file_path} 解析错误.")
|
||||
return []
|
||||
|
||||
# 处理数据,去重并合并 person 字段
|
||||
def process_data(files):
|
||||
href_map = defaultdict(list)
|
||||
|
||||
# 读取并处理每个文件
|
||||
for file in files:
|
||||
data = read_json(file['path'])
|
||||
for entry in data:
|
||||
href = entry.get('href')
|
||||
person = entry.get('person')
|
||||
if href:
|
||||
href_map[href].append(person)
|
||||
|
||||
# 合并相同 href 的 person,连接用 "|"
|
||||
result = []
|
||||
for href, persons in href_map.items():
|
||||
person = '|'.join(set(persons)) # 去重后合并
|
||||
result.append({'href': href, 'person': person})
|
||||
|
||||
return result
|
||||
|
||||
# 保存结果到JSON文件
|
||||
def save_to_json(data, output_file):
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, ensure_ascii=False, indent=4)
|
||||
|
||||
# 保存结果到CSV文件
|
||||
def save_to_csv(data, output_file):
|
||||
with open(output_file, 'w', newline='', encoding='utf-8') as f:
|
||||
writer = csv.DictWriter(f, fieldnames=['href', 'person'])
|
||||
writer.writeheader()
|
||||
writer.writerows(data)
|
||||
|
||||
# 主函数,执行数据处理并保存
|
||||
def main():
|
||||
# 定义需要处理的文件
|
||||
files = [
|
||||
{'path': 'result/birth.json', 'name': 'birth'},
|
||||
{'path': 'result/astro.json', 'name': 'astro'},
|
||||
{'path': 'result/ethnic.json', 'name': 'ethnic'}
|
||||
]
|
||||
|
||||
# 处理数据
|
||||
processed_data = process_data(files)
|
||||
|
||||
# 确保 result 目录存在
|
||||
os.makedirs('result', exist_ok=True)
|
||||
|
||||
# 输出结果到 JSON 和 CSV 文件
|
||||
save_to_json(processed_data, 'result/merged.json')
|
||||
save_to_csv(processed_data, 'result/merged.csv')
|
||||
|
||||
print("数据处理完成,结果已保存到 result/merged.json 和 result/merged.csv")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user