65 lines
2.3 KiB
Python
65 lines
2.3 KiB
Python
import os
|
|
import json
|
|
import csv
|
|
from bs4 import BeautifulSoup
|
|
|
|
INPUT_DIR = "html"
|
|
OUTPUT_JSON = "./result/iafd_meta.json"
|
|
OUTPUT_CSV = "./result/iafd_meta.csv"
|
|
BASE_URL = "https://www.iafd.com"
|
|
|
|
def parse_html_file(filepath):
|
|
"""解析单个 HTML 文件,提取需要的信息。"""
|
|
person_list = []
|
|
filename = os.path.basename(filepath)
|
|
filename = os.path.splitext(filename)[0]
|
|
|
|
with open(filepath, "r", encoding="utf-8") as file:
|
|
soup = BeautifulSoup(file, "html.parser")
|
|
astro_div = soup.find("div", id="astro")
|
|
|
|
if not astro_div:
|
|
print(f"Warning: No 'astro' div found in {filename}")
|
|
return []
|
|
|
|
birth_date = None
|
|
for elem in astro_div.find_all(recursive=False):
|
|
if elem.name == "h3" and "astroday" in elem.get("class", []):
|
|
birth_date = elem.get_text(strip=True)
|
|
elif elem.name == "div" and "perficon" in elem.get("class", []):
|
|
a_tag = elem.find("a")
|
|
if a_tag:
|
|
href = BASE_URL + a_tag["href"]
|
|
name = a_tag.find("span", class_="perfname")
|
|
if name:
|
|
person_list.append({
|
|
"astrology": filename,
|
|
"birth_date": birth_date,
|
|
"person": name.get_text(strip=True),
|
|
"href": href
|
|
})
|
|
return person_list
|
|
|
|
def main():
|
|
all_persons = []
|
|
for filename in os.listdir(INPUT_DIR):
|
|
if filename.endswith(".html"):
|
|
filepath = os.path.join(INPUT_DIR, filename)
|
|
print(f"正在解析 {filename} ...")
|
|
all_persons.extend(parse_html_file(filepath))
|
|
|
|
# 保存 JSON
|
|
with open(OUTPUT_JSON, "w", encoding="utf-8") as json_file:
|
|
json.dump(all_persons, json_file, indent=4, ensure_ascii=False)
|
|
|
|
# 保存 CSV
|
|
with open(OUTPUT_CSV, "w", newline="", encoding="utf-8") as csv_file:
|
|
writer = csv.DictWriter(csv_file, fieldnames=["astrology", "birth_date", "person", "href"])
|
|
writer.writeheader()
|
|
writer.writerows(all_persons)
|
|
|
|
print(f"Data extracted and saved to {OUTPUT_JSON} and {OUTPUT_CSV}")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|