add some scripts.
This commit is contained in:
192
scripts/iafd/distributors_list_fetch.py
Normal file
192
scripts/iafd/distributors_list_fetch.py
Normal file
@ -0,0 +1,192 @@
|
||||
"""
|
||||
Script Name:
|
||||
Description: 从 https://www.iafd.com 上获取信息。利用cloudscraper绕过cloudflare
|
||||
detail_fetch.py 从本地已经保存的列表数据,逐个拉取详情,并输出到文件。
|
||||
list_fetch_astro.py 按照星座拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_birth.py 按照生日拉取数据,获得演员的信息列表。数据量适中,各详细字段较全
|
||||
list_fetch_ethnic.py 按照人种拉取数据,获得演员的信息列表。数据量大,但详细字段很多无效的
|
||||
list_merge.py 上面三个列表的数据,取交集,得到整体数据。
|
||||
iafd_scrape.py 借助 https://github.com/stashapp/CommunityScrapers 实现的脚本,可以输入演员的 iafd链接,获取兼容 stashapp 格式的数据。(作用不大,因为国籍、照片等字段不匹配)
|
||||
|
||||
html_format.py 负责读取已经保存的html目录, 提取信息,格式化输出。
|
||||
data_merge.py 负责合并数据,它把从 iafd, javhd, thelordofporn 以及搭建 stashapp, 从上面更新到的演员数据(需导出)进行合并;
|
||||
stashdb_merge.py 负责把从stashapp中导出的单个演员的json文件, 批量合并并输出; 通常我们需要把stashapp中导出的批量文件压缩并传输到data/tmp目录,解压后合并
|
||||
从而获取到一份完整的数据列表。
|
||||
|
||||
Author: [Your Name]
|
||||
Created Date: YYYY-MM-DD
|
||||
Last Modified: YYYY-MM-DD
|
||||
Version: 1.0
|
||||
|
||||
Modification History:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
- YYYY-MM-DD [Your Name]:
|
||||
"""
|
||||
|
||||
import cloudscraper
|
||||
import json
|
||||
import time
|
||||
import csv
|
||||
from bs4 import BeautifulSoup
|
||||
import logging
|
||||
import config
|
||||
|
||||
config.setup_logging()
|
||||
|
||||
# 定义基础 URL 和可变参数
|
||||
host_url = "https://www.iafd.com"
|
||||
base_url = f"{host_url}/distrib.rme/distrib="
|
||||
dist_list_url = f'{base_url}/distrib.asp'
|
||||
|
||||
distr_map = {
|
||||
6812 : 'nubilefilms.com',
|
||||
8563 : 'teenmegaworld network',
|
||||
6779 : 'x-art.com',
|
||||
7133 : 'tushy.com',
|
||||
6496 : 'blacked.com',
|
||||
7758 : 'vixen.com',
|
||||
6791 : 'teamskeet.com',
|
||||
12454: 'vip4k.com',
|
||||
13541: 'wow network',
|
||||
9702 : 'cum4k.com',
|
||||
6778 : 'tiny4k.com',
|
||||
12667: 'anal4k.com',
|
||||
7419 : 'exotic4k.com',
|
||||
13594: 'facials4k.com',
|
||||
13633: 'mom4k.com',
|
||||
12335: 'slim4k.com',
|
||||
16709: 'strippers4k.com',
|
||||
|
||||
}
|
||||
|
||||
# 设置 headers 和 scraper
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
||||
}
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
# 结果路径
|
||||
res_dir = './result'
|
||||
all_data = []
|
||||
|
||||
# 网络请求并解析 HTML
|
||||
def fetch_page(url):
|
||||
try:
|
||||
response = scraper.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.text
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to fetch {url}: {e}")
|
||||
return None
|
||||
|
||||
# 解析 HTML 内容,提取需要的数据
|
||||
def parse_page(html, name):
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
table = soup.find("table", id="distable")
|
||||
|
||||
if not table:
|
||||
logging.warning(f"Warning: No 'distable' table found in {name}")
|
||||
return None
|
||||
|
||||
# 找到thead并跳过
|
||||
thead = table.find('thead')
|
||||
if thead:
|
||||
thead.decompose() # 去掉thead部分,不需要解析
|
||||
|
||||
# 现在只剩下tbody部分
|
||||
tbody = table.find('tbody')
|
||||
rows = tbody.find_all('tr') if tbody else []
|
||||
|
||||
global all_data
|
||||
for row in rows:
|
||||
cols = row.find_all('td')
|
||||
if len(cols) >= 5:
|
||||
title = cols[0].text.strip()
|
||||
label = cols[1].text.strip()
|
||||
year = cols[2].text.strip()
|
||||
rev = cols[3].text.strip()
|
||||
a_href = cols[0].find('a')
|
||||
href = host_url + a_href['href'] if a_href else ''
|
||||
|
||||
all_data.append({
|
||||
'distributors': name,
|
||||
'title': title,
|
||||
'label': label,
|
||||
'year': year,
|
||||
'rev': rev,
|
||||
'href': href
|
||||
})
|
||||
return soup
|
||||
|
||||
# 处理翻页,星座的无需翻页
|
||||
def handle_pagination(soup, astro):
|
||||
return None
|
||||
|
||||
# 获取列表页
|
||||
def process_list_gage():
|
||||
global distr_map
|
||||
|
||||
logging.info(f"Fetching data for {dist_list_url} ...")
|
||||
select_element = None
|
||||
while True:
|
||||
html = fetch_page(dist_list_url)
|
||||
if html:
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
select_element = soup.find('select', {'name': 'Distrib'})
|
||||
if select_element :
|
||||
break
|
||||
else:
|
||||
logging.info(f"wrong html content. retring {dist_list_url} ...")
|
||||
else:
|
||||
logging.info(f"wrong html content. retring {dist_list_url} ...")
|
||||
|
||||
if not select_element:
|
||||
return None
|
||||
|
||||
options = select_element.find_all('option')
|
||||
for option in options:
|
||||
value = option.get('value') # 获取 value 属性
|
||||
text = option.text.strip() # 获取文本内容
|
||||
distr_map[int(value)] = text
|
||||
logging.info(f'fetch {dist_list_url} succ. total distributors: {len(distr_map)}')
|
||||
return True
|
||||
|
||||
# 主逻辑函数:循环处理每个种族
|
||||
def process_main_data():
|
||||
for dis_key, dis_name in distr_map.items():
|
||||
url = base_url + str(dis_key)
|
||||
next_url = url
|
||||
logging.info(f"Fetching data for {dis_name}, url {url} ...")
|
||||
|
||||
while next_url:
|
||||
html = fetch_page(next_url)
|
||||
if html:
|
||||
soup = parse_page(html, dis_name)
|
||||
if soup:
|
||||
next_url = handle_pagination(soup, dis_name)
|
||||
else:
|
||||
logging.info(f"wrong html content. retring {next_url} ...")
|
||||
# 定期保存结果
|
||||
save_data()
|
||||
time.sleep(2) # 控制访问频率
|
||||
else:
|
||||
logging.info(f"Retrying {next_url} ...")
|
||||
time.sleep(5) # 等待后再重试
|
||||
|
||||
# 保存到文件
|
||||
def save_data():
|
||||
with open(f'{res_dir}/distributors.json', 'w', encoding='utf-8') as json_file:
|
||||
json.dump(all_data, json_file, indent=4, ensure_ascii=False)
|
||||
|
||||
with open(f'{res_dir}/distributors.csv', 'w', newline='', encoding='utf-8') as csv_file:
|
||||
writer = csv.DictWriter(csv_file, fieldnames=['distributors', 'title', 'label', 'year', 'rev', 'href'])
|
||||
writer.writeheader()
|
||||
writer.writerows(all_data)
|
||||
|
||||
# 执行主逻辑
|
||||
if __name__ == '__main__':
|
||||
#process_list_gage()
|
||||
process_main_data()
|
||||
save_data()
|
||||
logging.info("Data fetching and saving completed.")
|
||||
Reference in New Issue
Block a user