modify scripts
This commit is contained in:
236
iafd/tools/data_merge.py
Normal file
236
iafd/tools/data_merge.py
Normal file
@ -0,0 +1,236 @@
|
||||
"""
|
||||
Script Name:
|
||||
Description: 从 https://www.iafd.com 上获取信息。利用cloudscraper绕过cloudflare
|
||||
detail_fetch.py 从本地已经保存的列表数据,逐个拉取详情,并输出到文件。
|
||||
list_fetch_astro.py 按照星座拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_birth.py 按照生日拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_ethnic.py 按照人种拉取数据,获得演员的信息列表。数据量大,但详细字段很多无效的
|
||||
list_merge.py 上面三个列表的数据,取交集,得到整体数据。
|
||||
iafd_scrape.py 借助 https://github.com/stashapp/CommunityScrapers 实现的脚本,可以输入演员的 iafd链接,获取兼容 stashapp 格式的数据。(作用不大,因为国籍、照片等字段不匹配)
|
||||
|
||||
html_format.py 负责读取已经保存的html目录, 提取信息,格式化输出。
|
||||
data_merge.py 负责合并数据,它把从 iafd, javhd, thelordofporn 以及搭建 stashapp, 从上面更新到的演员数据(需导出)进行合并;
|
||||
stashdb_merge.py 负责把从stashapp中导出的单个演员的json文件, 批量合并并输出; 通常我们需要把stashapp中导出的批量文件压缩并传输到data/tmp目录,解压后合并
|
||||
从而获取到一份完整的数据列表。
|
||||
|
||||
Author: [Your Name]
|
||||
Created Date: YYYY-MM-DD
|
||||
Last Modified: YYYY-MM-DD
|
||||
Version: 1.0
|
||||
|
||||
Modification History:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import csv
|
||||
import logging
|
||||
|
||||
# 配置日志
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 输入目录和输出文件
|
||||
input_dir = 'data'
|
||||
output_json_file = f'{input_dir}/iafd_merge.json'
|
||||
output_csv_file = f'{input_dir}/iafd_merge.csv'
|
||||
output_person_txt = f'{input_dir}/all_person.txt'
|
||||
|
||||
# 读取iafd_meta.json
|
||||
try:
|
||||
with open(os.path.join(input_dir, 'iafd_meta.json'), 'r', encoding='utf-8') as file:
|
||||
iafd_data = json.load(file)
|
||||
logger.info("Loaded iafd_meta.json")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading iafd_meta.json: {e}")
|
||||
iafd_data = []
|
||||
|
||||
# 读取stashdb.json
|
||||
try:
|
||||
with open(os.path.join(input_dir, 'stashdb.json'), 'r', encoding='utf-8') as file:
|
||||
stashdb_data = json.load(file)
|
||||
logger.info("Loaded stashdb.json")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading stashdb.json: {e}")
|
||||
stashdb_data = []
|
||||
|
||||
# 读取javhd_meta.json
|
||||
try:
|
||||
with open(os.path.join(input_dir, 'javhd_meta.json'), 'r', encoding='utf-8') as file:
|
||||
javhd_data = json.load(file)
|
||||
logger.info("Loaded javhd_meta.json")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading javhd_meta.json: {e}")
|
||||
javhd_data = []
|
||||
|
||||
# 读取thelordofporn_meta.json
|
||||
try:
|
||||
with open(os.path.join(input_dir, 'thelordofporn_meta.json'), 'r', encoding='utf-8') as file:
|
||||
lordporn_data = json.load(file)
|
||||
logger.info("Loaded thelordofporn_meta.json")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading thelordofporn_meta.json: {e}")
|
||||
lordporn_data = []
|
||||
|
||||
# 构建all_meta_data,去重
|
||||
all_meta_data = set()
|
||||
|
||||
# 从各数据源提取unique的姓名数据
|
||||
for person_entry in iafd_data:
|
||||
all_meta_data.add(person_entry['person'])
|
||||
for stashdb_entry in stashdb_data:
|
||||
all_meta_data.add(stashdb_entry['name'])
|
||||
for javhd_entry in javhd_data:
|
||||
all_meta_data.add(javhd_entry['ja_name'])
|
||||
for lordporn_entry in lordporn_data:
|
||||
all_meta_data.add(lordporn_entry['pornstar'])
|
||||
|
||||
# 合并数据的列表
|
||||
merged_data = []
|
||||
|
||||
# 遍历all_meta_data,按规则合并
|
||||
for person in all_meta_data:
|
||||
# 初始化合并的数据结构体
|
||||
merged_entry = {
|
||||
'person': person
|
||||
}
|
||||
|
||||
# 初始化stashdb_entry,所有字段为空
|
||||
stashdb_entry = {
|
||||
'stashdb_gender': '',
|
||||
'stashdb_birthdate': '',
|
||||
'stashdb_ethnicity': '',
|
||||
'stashdb_country': '',
|
||||
'stashdb_height': '',
|
||||
'stashdb_measurements': '',
|
||||
'stashdb_fake_tits': '',
|
||||
'stashdb_career_length': '',
|
||||
'stashdb_aliases': ''
|
||||
}
|
||||
|
||||
# 初始化javhd_entry,所有字段为空
|
||||
javhd_entry = {
|
||||
'javhd_rank': '',
|
||||
'javhd_height': '',
|
||||
'javhd_weight': '',
|
||||
'javhd_breast_size': '',
|
||||
'javhd_breast_factor': '',
|
||||
'javhd_birth_date': '',
|
||||
'javhd_ethnicity': ''
|
||||
}
|
||||
|
||||
# 初始化lordporn_entry,所有字段为空
|
||||
lordporn_entry = {
|
||||
'lordporn_rating': '',
|
||||
'lordporn_rank': '',
|
||||
'lordporn_career_start': '',
|
||||
'lordporn_measurements': '',
|
||||
'lordporn_born': '',
|
||||
'lordporn_height': '',
|
||||
'lordporn_weight': ''
|
||||
}
|
||||
|
||||
# 初始化in_iafd字段,默认为N
|
||||
in_iafd = 'N'
|
||||
iafd_match = next((item for item in iafd_data if item.get('person') == person), None)
|
||||
if iafd_match:
|
||||
in_iafd = 'Y'
|
||||
|
||||
# 1. 检查是否存在于 stashdb 数据
|
||||
in_stashdb = 'N'
|
||||
stashdb_match = next((item for item in stashdb_data if item.get('name') == person), None)
|
||||
if stashdb_match:
|
||||
in_stashdb = 'Y'
|
||||
# 更新stashdb_entry字段
|
||||
stashdb_entry.update({
|
||||
'stashdb_gender': stashdb_match.get('gender', ''),
|
||||
'stashdb_birthdate': stashdb_match.get('birthdate', ''),
|
||||
'stashdb_ethnicity': stashdb_match.get('ethnicity', ''),
|
||||
'stashdb_country': stashdb_match.get('country', ''),
|
||||
'stashdb_height': stashdb_match.get('height', ''),
|
||||
'stashdb_measurements': stashdb_match.get('measurements', ''),
|
||||
'stashdb_fake_tits': stashdb_match.get('fake_tits', ''),
|
||||
'stashdb_career_length': stashdb_match.get('career_length', ''),
|
||||
'stashdb_aliases': stashdb_match.get('aliases', '')
|
||||
})
|
||||
|
||||
# 2. 检查是否存在于 javhd 数据
|
||||
in_javhd = 'N'
|
||||
javhd_match = next((item for item in javhd_data if item.get('ja_name') == person), None)
|
||||
if javhd_match:
|
||||
in_javhd = 'Y'
|
||||
# 更新javhd_entry字段
|
||||
javhd_entry.update({
|
||||
'javhd_rank': javhd_match.get('rank', ''),
|
||||
'javhd_height': javhd_match.get('height', ''),
|
||||
'javhd_weight': javhd_match.get('weight', ''),
|
||||
'javhd_breast_size': javhd_match.get('breast size', ''),
|
||||
'javhd_breast_factor': javhd_match.get('breast factor', ''),
|
||||
'javhd_birth_date': javhd_match.get('birth date', ''),
|
||||
'javhd_ethnicity': javhd_match.get('ethnicity', '')
|
||||
})
|
||||
|
||||
# 3. 检查是否存在于 thelordofporn 数据
|
||||
in_lordporn = 'N'
|
||||
lordporn_match = next((item for item in lordporn_data if item.get('pornstar') == person), None)
|
||||
if lordporn_match:
|
||||
in_lordporn = 'Y'
|
||||
# 更新lordporn_entry字段
|
||||
lordporn_entry.update({
|
||||
'lordporn_rating': lordporn_match.get('rating', ''),
|
||||
'lordporn_rank': lordporn_match.get('rank', ''),
|
||||
'lordporn_career_start': lordporn_match.get('career_start', ''),
|
||||
'lordporn_measurements': lordporn_match.get('measurements', ''),
|
||||
'lordporn_born': lordporn_match.get('born', ''),
|
||||
'lordporn_height': lordporn_match.get('height', ''),
|
||||
'lordporn_weight': lordporn_match.get('weight', '')
|
||||
})
|
||||
|
||||
# 添加 in_stashdb, in_javhd, in_lordporn 字段,确保都输出
|
||||
merged_entry.update({
|
||||
'in_iafd': in_iafd,
|
||||
'in_stashdb': in_stashdb,
|
||||
'in_javhd': in_javhd,
|
||||
'in_lordporn': in_lordporn
|
||||
})
|
||||
|
||||
# 将stashdb_entry, javhd_entry, lordporn_entry合并到结果中
|
||||
merged_entry.update(stashdb_entry)
|
||||
merged_entry.update(javhd_entry)
|
||||
merged_entry.update(lordporn_entry)
|
||||
|
||||
# 将合并后的条目加入到结果列表
|
||||
merged_data.append(merged_entry)
|
||||
|
||||
# 写入iafd_merge.json
|
||||
try:
|
||||
with open(output_json_file, 'w', encoding='utf-8') as json_file:
|
||||
json.dump(merged_data, json_file, ensure_ascii=False, indent=4)
|
||||
logger.info(f"Data successfully written to {output_json_file}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing {output_json_file}: {e}")
|
||||
|
||||
# 写入iafd_merge.csv
|
||||
try:
|
||||
with open(output_csv_file, 'w', newline='', encoding='utf-8') as csv_file:
|
||||
writer = csv.DictWriter(csv_file, fieldnames=merged_data[0].keys(), delimiter='\t')
|
||||
writer.writeheader()
|
||||
writer.writerows(merged_data)
|
||||
logger.info(f"Data successfully written to {output_csv_file}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing {output_csv_file}: {e}")
|
||||
|
||||
|
||||
# 输出 all_meta_data 到 all_person.txt,并按字母顺序排序
|
||||
try:
|
||||
# 排序 all_meta_data
|
||||
all_meta_data_list = sorted(list(all_meta_data)) # 将集合转换为列表并排序
|
||||
all_meta_data_str = ','.join(all_meta_data_list) # 使用逗号连接元素
|
||||
with open(output_person_txt, 'w', encoding='utf-8') as txt_file:
|
||||
txt_file.write(all_meta_data_str)
|
||||
logger.info(f"all_meta_data successfully written to all_person.txt")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing all_person.txt: {e}")
|
||||
163
iafd/tools/iafd_scrape.py
Normal file
163
iafd/tools/iafd_scrape.py
Normal file
@ -0,0 +1,163 @@
|
||||
"""
|
||||
Script Name:
|
||||
Description: 从 https://www.iafd.com 上获取信息。利用cloudscraper绕过cloudflare
|
||||
detail_fetch.py 从本地已经保存的列表数据,逐个拉取详情,并输出到文件。
|
||||
list_fetch_astro.py 按照星座拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_birth.py 按照生日拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_ethnic.py 按照人种拉取数据,获得演员的信息列表。数据量大,但详细字段很多无效的
|
||||
list_merge.py 上面三个列表的数据,取交集,得到整体数据。
|
||||
iafd_scrape.py 借助 https://github.com/stashapp/CommunityScrapers 实现的脚本,可以输入演员的 iafd链接,获取兼容 stashapp 格式的数据。(作用不大,因为国籍、照片等字段不匹配)
|
||||
|
||||
html_format.py 负责读取已经保存的html目录, 提取信息,格式化输出。
|
||||
data_merge.py 负责合并数据,它把从 iafd, javhd, thelordofporn 以及搭建 stashapp, 从上面更新到的演员数据(需导出)进行合并;
|
||||
stashdb_merge.py 负责把从stashapp中导出的单个演员的json文件, 批量合并并输出; 通常我们需要把stashapp中导出的批量文件压缩并传输到data/tmp目录,解压后合并
|
||||
从而获取到一份完整的数据列表。
|
||||
|
||||
Author: [Your Name]
|
||||
Created Date: YYYY-MM-DD
|
||||
Last Modified: YYYY-MM-DD
|
||||
Version: 1.0
|
||||
|
||||
Modification History:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
# 设置日志配置
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 预定义的 scrapers 目录
|
||||
scrapers_dir = "/root/gitlabs/stashapp_CommunityScrapers/scrapers"
|
||||
meta_file = "./data/iafd_meta.json"
|
||||
cursor_file = "./data/iafd_cursor.txt"
|
||||
output_dir = f"{scrapers_dir}/iafd_meta"
|
||||
|
||||
# 重试次数和间隔
|
||||
MAX_RETRIES = 10
|
||||
RETRY_DELAY = 5 # 5秒重试间隔
|
||||
|
||||
|
||||
# 创建输出目录
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
|
||||
def read_processed_hrefs() -> set:
|
||||
"""
|
||||
读取已经处理过的 href
|
||||
"""
|
||||
processed_hrefs = set()
|
||||
if os.path.exists(cursor_file):
|
||||
with open(cursor_file, "r", encoding="utf-8") as f:
|
||||
processed_hrefs = {line.strip().split(",")[1] for line in f if "," in line}
|
||||
return processed_hrefs
|
||||
|
||||
|
||||
def execute_scraper_command(href: str, idv: str) -> bool:
|
||||
"""
|
||||
执行命令抓取数据,成功则返回True,否则返回False。
|
||||
包含重试机制。
|
||||
"""
|
||||
command = f"cd {scrapers_dir}; python3 -m IAFD.IAFD performer {href} > {output_dir}/{idv}.json"
|
||||
attempt = 0
|
||||
while attempt < MAX_RETRIES:
|
||||
try:
|
||||
logger.info(f"执行命令: {command}")
|
||||
subprocess.run(command, shell=True, check=True)
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"执行命令失败: {e}. 重试 {attempt + 1}/{MAX_RETRIES}...")
|
||||
time.sleep(RETRY_DELAY)
|
||||
attempt += 1
|
||||
logger.error(f"命令执行失败,已尝试 {MAX_RETRIES} 次: {command}")
|
||||
return False
|
||||
|
||||
|
||||
def validate_json_file(idv: str) -> bool:
|
||||
"""
|
||||
校验 JSON 文件是否有效
|
||||
"""
|
||||
output_file = f"{output_dir}/{idv}.json"
|
||||
try:
|
||||
with open(output_file, "r", encoding="utf-8") as f:
|
||||
content = f.read().strip()
|
||||
json_data = json.loads(content) # 尝试解析 JSON
|
||||
if "name" not in json_data:
|
||||
raise ValueError("缺少 'name' 字段")
|
||||
return True
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
logger.error(f"解析失败,删除无效文件: {output_file}. 错误: {e}")
|
||||
os.remove(output_file)
|
||||
return False
|
||||
|
||||
|
||||
def process_iafd_meta(data: List[dict], processed_hrefs: set) -> None:
|
||||
"""
|
||||
处理 iafd_meta.json 中的数据
|
||||
"""
|
||||
for entry in data:
|
||||
person = entry.get("person")
|
||||
href = entry.get("href")
|
||||
|
||||
if not person or not href:
|
||||
logger.warning(f"跳过无效数据: {entry}")
|
||||
continue
|
||||
|
||||
# 解析 href 提取 id
|
||||
try:
|
||||
idv = href.split("id=")[-1]
|
||||
except IndexError:
|
||||
logger.error(f"无法解析 ID: {href}")
|
||||
continue
|
||||
|
||||
output_file = f"{output_dir}/{idv}.json"
|
||||
|
||||
# 跳过已处理的 href
|
||||
if href in processed_hrefs:
|
||||
logger.info(f"已处理,跳过: {person}, {href}")
|
||||
continue
|
||||
|
||||
# 执行数据抓取
|
||||
if not execute_scraper_command(href, idv):
|
||||
continue
|
||||
|
||||
# 校验 JSON 文件
|
||||
if not validate_json_file(idv):
|
||||
continue
|
||||
|
||||
# 记录已处理数据
|
||||
with open(cursor_file, "a", encoding="utf-8") as f:
|
||||
f.write(f"{person},{href}\n")
|
||||
|
||||
logger.info(f"成功处理: {person} - {href}")
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
主程序执行函数
|
||||
"""
|
||||
# 读取已处理的 href
|
||||
processed_hrefs = read_processed_hrefs()
|
||||
|
||||
# 读取 iafd_meta.json 数据
|
||||
try:
|
||||
with open(meta_file, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"读取 iafd_meta.json 错误: {e}")
|
||||
return
|
||||
|
||||
# 处理数据
|
||||
process_iafd_meta(data, processed_hrefs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
90
iafd/tools/stashdb_merge.py
Normal file
90
iafd/tools/stashdb_merge.py
Normal file
@ -0,0 +1,90 @@
|
||||
"""
|
||||
Script Name:
|
||||
Description: 从 https://www.iafd.com 上获取信息。利用cloudscraper绕过cloudflare
|
||||
detail_fetch.py 从本地已经保存的列表数据,逐个拉取详情,并输出到文件。
|
||||
list_fetch_astro.py 按照星座拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_birth.py 按照生日拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_ethnic.py 按照人种拉取数据,获得演员的信息列表。数据量大,但详细字段很多无效的
|
||||
list_merge.py 上面三个列表的数据,取交集,得到整体数据。
|
||||
iafd_scrape.py 借助 https://github.com/stashapp/CommunityScrapers 实现的脚本,可以输入演员的 iafd链接,获取兼容 stashapp 格式的数据。(作用不大,因为国籍、照片等字段不匹配)
|
||||
|
||||
html_format.py 负责读取已经保存的html目录, 提取信息,格式化输出。
|
||||
data_merge.py 负责合并数据,它把从 iafd, javhd, thelordofporn 以及搭建 stashapp, 从上面更新到的演员数据(需导出)进行合并;
|
||||
stashdb_merge.py 负责把从stashapp中导出的单个演员的json文件, 批量合并并输出; 通常我们需要把stashapp中导出的批量文件压缩并传输到data/tmp目录,解压后合并
|
||||
从而获取到一份完整的数据列表。
|
||||
|
||||
Author: [Your Name]
|
||||
Created Date: YYYY-MM-DD
|
||||
Last Modified: YYYY-MM-DD
|
||||
Version: 1.0
|
||||
|
||||
Modification History:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import csv
|
||||
import logging
|
||||
|
||||
# 配置日志
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 输入和输出目录
|
||||
input_dir = 'data/tmp' # 假设metadata目录在当前目录下
|
||||
output_json_file = 'stashdb.json'
|
||||
output_csv_file = 'stashdb.csv'
|
||||
|
||||
# 用于保存所有的条目
|
||||
data_list = []
|
||||
|
||||
# 遍历metadata文件夹,读取所有json文件
|
||||
for filename in os.listdir(input_dir):
|
||||
if filename.endswith('.json'):
|
||||
file_path = os.path.join(input_dir, filename)
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as file:
|
||||
data = json.load(file)
|
||||
|
||||
# 提取需要的字段
|
||||
person = {
|
||||
'name': data.get('name'),
|
||||
'gender': data.get('gender'),
|
||||
'birthdate': data.get('birthdate'),
|
||||
'ethnicity': data.get('ethnicity'),
|
||||
'country': data.get('country'),
|
||||
'height': data.get('height'),
|
||||
'measurements': data.get('measurements'),
|
||||
'fake_tits': data.get('fake_tits'),
|
||||
'career_length': data.get('career_length'),
|
||||
'aliases': ', '.join(data.get('aliases', [])) # 连接aliases数组元素
|
||||
}
|
||||
|
||||
# 将数据添加到列表中
|
||||
data_list.append(person)
|
||||
logger.info(f"Processed file: {filename}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing file {filename}: {e}")
|
||||
|
||||
# 输出到 JSON 文件
|
||||
try:
|
||||
with open(output_json_file, 'w', encoding='utf-8') as json_file:
|
||||
json.dump(data_list, json_file, ensure_ascii=False, indent=4)
|
||||
logger.info(f"Data successfully written to {output_json_file}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing JSON file: {e}")
|
||||
|
||||
# 输出到 CSV 文件
|
||||
try:
|
||||
with open(output_csv_file, 'w', newline='', encoding='utf-8') as csv_file:
|
||||
writer = csv.DictWriter(csv_file, fieldnames=data_list[0].keys())
|
||||
writer.writeheader()
|
||||
writer.writerows(data_list)
|
||||
logger.info(f"Data successfully written to {output_csv_file}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing CSV file: {e}")
|
||||
Reference in New Issue
Block a user