add some scripts.
This commit is contained in:
@ -104,7 +104,16 @@ def fetch_and_parse_page(url, scraper):
|
||||
x_art_cnt = 0
|
||||
table = soup.find('table', id='personal')
|
||||
if table:
|
||||
rows = table.find_all('tr', class_='we')
|
||||
# 找到thead并跳过
|
||||
thead = table.find('thead')
|
||||
if thead:
|
||||
thead.decompose() # 去掉thead部分,不需要解析
|
||||
|
||||
# 现在只剩下tbody部分
|
||||
tbody = table.find('tbody')
|
||||
rows = tbody.find_all('tr') if tbody else []
|
||||
|
||||
# rows = table.find_all('tr', class_='we')
|
||||
for row in rows:
|
||||
cols = row.find_all('td')
|
||||
if len(cols) >= 6:
|
||||
|
||||
Reference in New Issue
Block a user