modify scripts
This commit is contained in:
101
iafd/merge/auto_tag.py
Normal file
101
iafd/merge/auto_tag.py
Normal file
@ -0,0 +1,101 @@
|
||||
import sqlite3
|
||||
import json
|
||||
import logging
|
||||
|
||||
# 配置日志
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# 数据库连接
|
||||
DB_PATH = 'your_database.db' # 数据库路径,修改为实际路径
|
||||
# 预定义标签,方便修改
|
||||
TAG_LIST = ['vixen', 'blacked', 'tushy', 'x-art']
|
||||
|
||||
# 预加载标签 ID
|
||||
def get_all_tag_ids():
|
||||
try:
|
||||
with sqlite3.connect(DB_PATH) as conn:
|
||||
cursor = conn.cursor()
|
||||
#cursor.execute("SELECT id, name FROM tags WHERE name IN ('vixen', 'blacked', 'tushy', 'x-art')")
|
||||
cursor.execute("SELECT id, name FROM tags WHERE name IN ({})".format(', '.join(['?']*len(TAG_LIST))), TAG_LIST)
|
||||
tags = cursor.fetchall()
|
||||
# 创建标签名到 tag_id 的映射
|
||||
return {tag_name.lower(): tag_id for tag_id, tag_name in tags}
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching tag IDs: {e}")
|
||||
return {}
|
||||
|
||||
# 批量查找 performers 的 performer_id
|
||||
def get_performers_ids(performer_names):
|
||||
try:
|
||||
with sqlite3.connect(DB_PATH) as conn:
|
||||
cursor = conn.cursor()
|
||||
query = "SELECT id, name FROM performers WHERE LOWER(name) IN ({})".format(
|
||||
','.join(['?'] * len(performer_names))
|
||||
)
|
||||
cursor.execute(query, [name.lower() for name in performer_names])
|
||||
performers = cursor.fetchall()
|
||||
return {performer_name.lower(): performer_id for performer_id, performer_name in performers}
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching performer IDs: {e}")
|
||||
return {}
|
||||
|
||||
# 插入数据到 performers_tags 表
|
||||
def insert_performer_tag(performer_id, tag_id):
|
||||
try:
|
||||
with sqlite3.connect(DB_PATH) as conn:
|
||||
cursor = conn.cursor()
|
||||
# 检查 performers_tags 中是否已有此条数据
|
||||
cursor.execute("SELECT 1 FROM performers_tags WHERE performer_id = ? AND tag_id = ?", (performer_id, tag_id))
|
||||
if not cursor.fetchone():
|
||||
cursor.execute("INSERT INTO performers_tags (performer_id, tag_id) VALUES (?, ?)", (performer_id, tag_id))
|
||||
conn.commit()
|
||||
logger.info(f"Inserted performer_id {performer_id} and tag_id {tag_id} into performers_tags.")
|
||||
else:
|
||||
logger.info(f"Entry for performer_id {performer_id} and tag_id {tag_id} already exists in performers_tags.")
|
||||
except Exception as e:
|
||||
logger.error(f"Error inserting into performers_tags: {e}")
|
||||
|
||||
# 处理 detail.json 文件
|
||||
def process_detail_json(detail_file):
|
||||
try:
|
||||
with open(detail_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# 获取所有标签的 ID
|
||||
tag_ids = get_all_tag_ids()
|
||||
|
||||
# 收集需要查询的 performers.name
|
||||
performer_names = [entry.get('person') for entry in data]
|
||||
|
||||
# 批量查询 performers.id
|
||||
performer_ids = get_performers_ids(performer_names)
|
||||
|
||||
for entry in data:
|
||||
person = entry.get('person')
|
||||
vixen_cnt = entry.get('vixen_cnt', 0)
|
||||
blacked_cnt = entry.get('blacked_cnt', 0)
|
||||
tushy_cnt = entry.get('tushy_cnt', 0)
|
||||
x_art_cnt = entry.get('x_art_cnt', 0)
|
||||
|
||||
# 获取 performer_id
|
||||
performer_id = performer_ids.get(person.lower())
|
||||
if not performer_id:
|
||||
continue # 如果找不到 performer_id,跳过此条数据
|
||||
|
||||
# 处理每个 tag(vixen, blacked, tushy, x-art)
|
||||
for tag_name, count in zip(TAG_LIST, [vixen_cnt, blacked_cnt, tushy_cnt, x_art_cnt]):
|
||||
if count > 0:
|
||||
tag_id = tag_ids.get(tag_name)
|
||||
if tag_id:
|
||||
insert_performer_tag(performer_id, tag_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing {detail_file}: {e}")
|
||||
|
||||
# 主函数
|
||||
def main():
|
||||
detail_file = 'detail.json' # 输入文件路径,可以替换成实际路径
|
||||
process_detail_json(detail_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
72
iafd/merge/json2csv.py
Normal file
72
iafd/merge/json2csv.py
Normal file
@ -0,0 +1,72 @@
|
||||
import json
|
||||
import csv
|
||||
|
||||
# 读取 detail_birth.json 文件
|
||||
def read_json(file_path):
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except FileNotFoundError:
|
||||
print(f"文件 {file_path} 未找到.")
|
||||
return []
|
||||
except json.JSONDecodeError:
|
||||
print(f"文件 {file_path} 解析错误.")
|
||||
return []
|
||||
|
||||
# 写入 CSV 文件
|
||||
def write_to_csv(data, output_file):
|
||||
with open(output_file, 'w', newline='', encoding='utf-8') as f:
|
||||
writer = csv.DictWriter(f, fieldnames=[
|
||||
'person', 'href', 'performer_aka', 'birthday', 'astrology', 'birthplace', 'gender',
|
||||
'years_active', 'ethnicity', 'nationality', 'hair_colors', 'eye_color', 'height',
|
||||
'weight', 'measurements', 'tattoos', 'piercings'
|
||||
])
|
||||
writer.writeheader()
|
||||
for entry in data:
|
||||
# 确保 performer_aka 始终为列表类型
|
||||
performer_aka = entry.get('performer_aka', [])
|
||||
|
||||
# 如果是 None 或非列表类型,转换为一个空列表
|
||||
if performer_aka is None:
|
||||
performer_aka = []
|
||||
elif not isinstance(performer_aka, list):
|
||||
performer_aka = [performer_aka]
|
||||
|
||||
# 写入每一行
|
||||
writer.writerow({
|
||||
'person': entry.get('person', ''),
|
||||
'href': entry.get('href', ''),
|
||||
'performer_aka': performer_aka,
|
||||
'birthday': entry.get('birthday', ''),
|
||||
'astrology': entry.get('astrology', ''),
|
||||
'birthplace': entry.get('birthplace', ''),
|
||||
'gender': entry.get('gender', ''),
|
||||
'years_active': entry.get('years_active', ''),
|
||||
'ethnicity': entry.get('ethnicity', ''),
|
||||
'nationality': entry.get('nationality', ''),
|
||||
'hair_colors': entry.get('hair_colors', ''),
|
||||
'eye_color': entry.get('eye_color', ''),
|
||||
'height': entry.get('height', ''),
|
||||
'weight': entry.get('weight', ''),
|
||||
'measurements': entry.get('measurements', ''),
|
||||
'tattoos': entry.get('tattoos', ''),
|
||||
'piercings': entry.get('piercings', '')
|
||||
})
|
||||
|
||||
# 主函数,执行转化操作
|
||||
def main():
|
||||
# 输入的 JSON 文件路径
|
||||
input_json_file = 'detail_birth.json'
|
||||
# 输出的 CSV 文件路径
|
||||
output_csv_file = 'detail_birth.csv'
|
||||
|
||||
# 读取 JSON 文件
|
||||
data = read_json(input_json_file)
|
||||
|
||||
# 将数据写入 CSV 文件
|
||||
write_to_csv(data, output_csv_file)
|
||||
|
||||
print(f"数据已保存到 {output_csv_file}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
120
iafd/merge/url_match.py
Normal file
120
iafd/merge/url_match.py
Normal file
@ -0,0 +1,120 @@
|
||||
import json
|
||||
import logging
|
||||
import cloudscraper
|
||||
import time
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
# 配置日志
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
test_flag = True
|
||||
|
||||
# 读取stashdb.json
|
||||
def read_json(file_path):
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as file:
|
||||
return json.load(file)
|
||||
except FileNotFoundError:
|
||||
logger.error(f"File {file_path} not found.")
|
||||
return []
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"Error decoding JSON from {file_path}.")
|
||||
return []
|
||||
|
||||
# 请求URL并获取重定向后的URL
|
||||
def fetch_real_url_2(url, scraper):
|
||||
try:
|
||||
response = scraper.get(url, allow_redirects=True)
|
||||
if response.status_code == 200:
|
||||
return response.url # 获取最终的URL
|
||||
else:
|
||||
logger.warning(f"Failed to fetch {url}, Status code: {response.status_code}")
|
||||
return None
|
||||
except RequestException as e:
|
||||
logger.error(f"Error fetching {url}: {e}")
|
||||
return None
|
||||
|
||||
def fetch_real_url(url, scraper):
|
||||
try:
|
||||
# 请求URL,禁止自动重定向
|
||||
response = scraper.get(url, allow_redirects=False)
|
||||
|
||||
# 检查是否是302响应,并获取Location头部的URL
|
||||
if response.status_code == 302 or response.status_code == 301:
|
||||
redirect_url = response.headers.get("Location")
|
||||
if redirect_url:
|
||||
logger.info(f"Redirected to: {redirect_url}")
|
||||
return redirect_url
|
||||
else:
|
||||
logger.warning(f"Redirect response received, but no Location header found for {url}")
|
||||
return None
|
||||
else:
|
||||
logger.warning(f"Failed to fetch {url}, Status code: {response.status_code}")
|
||||
return None
|
||||
except RequestException as e:
|
||||
logger.error(f"Error fetching {url}: {e}")
|
||||
return None
|
||||
|
||||
# 处理每个 URL
|
||||
def process_urls(data, scraper):
|
||||
loop = 0
|
||||
global test_flag
|
||||
|
||||
for entry in data:
|
||||
iafd_urls = entry.get('iafd_urls', [])
|
||||
real_urls = []
|
||||
|
||||
for url in iafd_urls:
|
||||
if 'perfid=' in url:
|
||||
# 如果是重定向链接,访问并获取重定向后的URL
|
||||
real_url = fetch_real_url(url, scraper)
|
||||
if real_url:
|
||||
real_urls.append(real_url)
|
||||
# 测试时,执行小批量数据
|
||||
loop = loop + 1
|
||||
if test_flag and loop >10:
|
||||
return data
|
||||
|
||||
elif 'person.rme/id=' in url:
|
||||
# 非perfid链接直接添加
|
||||
real_urls.append(url)
|
||||
else:
|
||||
# 非perfid链接直接添加
|
||||
real_urls.append(url)
|
||||
logger.warning(f"unkown url format: {url}")
|
||||
|
||||
# 更新iafd_real_url字段
|
||||
entry['iafd_real_url'] = real_urls
|
||||
|
||||
return data
|
||||
|
||||
# 保存处理后的结果到 result.json
|
||||
def save_to_json(data, output_file):
|
||||
try:
|
||||
with open(output_file, 'w', encoding='utf-8') as file:
|
||||
json.dump(data, file, ensure_ascii=False, indent=4)
|
||||
logger.info(f"Data saved to {output_file}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving to {output_file}: {e}")
|
||||
|
||||
# 主函数
|
||||
def main():
|
||||
# 读取输入文件
|
||||
input_file = 'stashdb.json'
|
||||
output_file = 'result.json'
|
||||
|
||||
# 创建cloudscraper对象
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
# 读取stashdb.json中的数据
|
||||
data = read_json(input_file)
|
||||
|
||||
# 处理每个 URL,获取重定向后的URL
|
||||
processed_data = process_urls(data, scraper)
|
||||
|
||||
# 保存结果到 result.json
|
||||
save_to_json(processed_data, output_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user