📌 項目目標
本項目旨在自動化抓取 Bitcointalk 論壇中指定板塊的帖子數據(包括主貼和所有回復),并提取出結構化信息如標題、作者、發帖時間、用戶等級、活躍度、Merit 等,以便進一步分析或使用。
本項目只供科研學習使用
核心環境:
py==3.9,? Crawl4AI==0.6.3,beautifulsoup4==4.12.3
爬蟲框架:
crawl4ai
(基于異步爬蟲 + 瀏覽器模擬)HTML 解析:BeautifulSoup (
bs4
)異步進度顯示:
tqdm.asyncio
存儲:JSON 文件格式
兩個py代碼文件 bitcointalk_crawler.py? 和??main.py即可運行
直接給出完整代碼:
bitcointalk_crawler.py :
import osfrom bs4 import BeautifulSoup
from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, JsonCssExtractionStrategy, BrowserConfig, CacheMode
import json
import re
from pathlib import Pathfrom tqdm.asyncio import tqdm as async_tqdm
# 保存路徑
SAVE_DIR = "../bitcointalk/"
os.makedirs(SAVE_DIR, exist_ok=True)# url = "https://bitcointalk.org/index.php?board=77" # Bitcoin > Bitcoin Discussion > Press
# board = "Bitcoin Discussion_Press"
# board_url = "https://bitcointalk.org/index.php?board=74" # Bitcoin > Bitcoin Discussion > Legal
# board = "Bitcoin Discussion_Legal"board_url = "https://bitcointalk.org/index.php?board=6" # Bitcoin > Development_Technical_Discussion子板塊的url
board = "Bitcoin Development_Technical_Discussion" # 保存文件的名稱關鍵詞
bitcointalk_page = 346 # 設置每個子板塊爬取的頁數
##使用時只修改以上三個參數即可完美運行# board_url = "https://bitcointalk.org/index.php?board=8" # Economy > Trading Discussion
# board = " Economy Trading Discussion"URL_path=SAVE_DIR+board+"_bitcointalk_urls.json"
DONE_URLS_FILE = SAVE_DIR+board+"bitcointalk_done_urls.json"
RESULTS_FILE = SAVE_DIR+board+"bitcointalk_results.json"
# JavaScript:點擊“下一頁”并等待刷新
js_click_next_and_wait = """
(async () => {const getTopicTitles = () => {return Array.from(document.querySelectorAll('.tborder a')).map(a => a.textContent.trim()).join('||');};const initialTitles = getTopicTitles();const nextButton = Array.from(document.querySelectorAll('#bodyarea #toppages .prevnext a.navPages')).find(a => a.textContent.trim() === '?');if (nextButton) nextButton.click();while (true) {await new Promise(resolve => setTimeout(resolve, 200));const currentTitles = getTopicTitles();if (currentTitles !== initialTitles) break;}
})();
"""# schema 提取規則
schema = {"name": "BitcointalkList","baseSelector": "#bodyarea .tborder a[href*='topic=']","fields": [{"name": "title", "selector": "a", "type": "text", "transform": "strip"},{"name": "url", "selector": "a", "type": "attr:href"},],
}# 翻頁爬取url
async def crawl_bitcointalk_dynamic_list(board_url, max_pages: int = 3):print("開始收集url")browser_config = BrowserConfig(headless=True, java_script_enabled=True)async with AsyncWebCrawler(config=browser_config) as crawler:all_urls = []# url = "https://bitcointalk.org/index.php?board=77" Bitcoin Discussion > Press# url = "https://bitcointalk.org/index.php?board=74" Bitcoin Discussion > Legalsession_id = "bitcointalk_session"for page in range(max_pages):offset = page * 40page_url = f"{board_url}.{offset}"urls = []config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS,css_selector="#bodyarea .tborder .windowbg a[href*='topic=']",extraction_strategy=JsonCssExtractionStrategy(schema),# js_code=js_click_next_and_wait if page > 0 else None,# js_only=page > 0,session_id=session_id,)result = await crawler.arun(url=page_url, config=config)# print("首頁結果:", result.markdown)# print("首頁結果:", result)if result.success:html_content = result.html # 假設這里是原始 HTML 字符串urls = re.findall(r'href="(https://bitcointalk\.org/index\.php\?topic=\d+\.0)"', html_content)for url in urls:all_urls.append(url)else:print(f"?? 第 {page + 1} 頁抓取失敗")print(f"? 目前共 {len(all_urls)} 個url")with open(URL_path, "w", encoding="utf-8") as f:json.dump(all_urls, f, ensure_ascii=False, indent=2)print(f"爬取完畢? 共 {len(all_urls)} 個url")
# 更新 URL 為下一頁return all_urls# 進入每個帖子頁面抓取詳細內容async def crawl_bitcointalk_post_detail_with_replies(url: str) -> dict:"""輸入一個 Bitcointalk 帖子 URL,返回結構化的主貼與回復數據"""result_data = {"url": url,"time": "unknown","title": "","content": "","read_count": -1,"author": "unknown","rank": "unknown","activity": 0,"merit": 0,"replies_count": 0,"replies": []}schema = {"name": "Bitcointalk Thread","baseSelector": ".bordercolor .msgcl1", # 每個帖子(主貼 + 回復) .bordercolor"fields": [{"name": "author","selector": ".poster_info > b > a","type": "text",},{"name": "author_inf","selector": ".poster_info .smalltext","type": "text",},{"name": "time","selector": ".td_headerandpost .smalltext",#quickModForm > table.bordercolor > tbody > tr:nth-child(1) > td > table > tbody > tr > td > table > tbody > tr:nth-child(1) > td.td_headerandpost"type": "text",},{"name": "content","selector": ".td_headerandpost .post","type": "text",},],}browser_config = BrowserConfig(headless=True, java_script_enabled=True)crawler_config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS,extraction_strategy=JsonCssExtractionStrategy(schema),delay_before_return_html=1,magic=True,simulate_user=True,override_navigator=True,)try:# 啟動爬蟲async with AsyncWebCrawler(config=browser_config) as crawler:result = await crawler.arun(url=url,config=crawler_config,# js_code=js_click_next_and_wait if page > 0 else None,# js_only=page > 0,)# print(result)soup = BeautifulSoup(result.cleaned_html, "html.parser")# print(soup)# 帖子標題# 查找包含帖子標題和閱讀次數的 <td>topic_td = soup.find("td", string=re.compile(r"Topic:\s*\s*.+?\(Read\s+\d+\s+times\)"))# print(topic_td)if topic_td:# 假設你已經獲得了 td 的文本text = topic_td.get_text(strip=True)# 匹配標題和閱讀次數match = re.search(r"Topic:\s*(.+?)\s*\(Read\s+(\d+)\s+times\)", text)if match:title = match.group(1) # 帖子標題read_count = int(match.group(2)) # 閱讀次數# print("標題:", title)# print("閱讀次數:", read_count)else:print("? 無法匹配標題和閱讀次數")else:title = "unknown"read_count = -1# 保存結果result_data["title"] = titleresult_data["read_count"] = read_countraw_posts = json.loads(result.extracted_content)# print(raw_posts)print(f"? 成功提取 {len(raw_posts)} 條帖子")posts = []main_content = raw_posts[0].get("content", "")for i, raw_post in enumerate(raw_posts):post = {}author_inf = raw_post.get("author_inf", "")rank_match = re.search(r"^(Sr\. Member|Hero Member|Legendary|Full Member|Member|Newbie)", author_inf)activity_match = re.search(r"Activity:\s*(\d+)", author_inf)merit_match = re.search(r"Merit:\s*(\d+)", author_inf)post["author"] = raw_post.get("author", "")post["rank"] = rank_match.group(1) if rank_match else "unknown"post["activity"] = int(activity_match.group(1)) if activity_match else 0post["merit"] = int(merit_match.group(1)) if merit_match else 0post["time"] = raw_post.get("time", "unknown")# 如果是回復,并包含主貼內容,就移除主貼部分if i > 0 and main_content in raw_post.get("content", ""):cleaned_text = raw_post.get("content", "").replace(main_content, "").strip()post["content"] = cleaned_textelse:post["content"] = raw_post.get("content", "")# print(f"作者: {post['author']}, 時間: {post['time']}, 等級: {post['rank']}, 活動: {post['activity']}, Merit: {post['merit']}, 內容: {post['content'][:50]}...,")posts.append(post)# 主貼 + 回復整合if raw_posts:main_post = posts[0]result_data.update({"author": main_post["author"],"time": main_post["time"],"rank": main_post["rank"],"activity": main_post["activity"],"merit": main_post["merit"],"content": main_post["content"],"replies_count": len(posts) - 1, # 回復數量"replies": posts[1:]})# print(result_data)return result_dataexcept Exception as e:print(f"? 抓取失敗:{e}")return result_dataasync def load_urls(URL_path,board_url,pages):if os.path.exists(URL_path):print(f"? url文件已存在,跳過爬取url: {URL_path}")with open(URL_path, "r", encoding="utf-8") as f:ALL_URLS = json.load(f)else:ALL_URLS = await crawl_bitcointalk_dynamic_list(board_url, max_pages=pages) #獲取帖子url并保存到文件return ALL_URLSdef load_done_urls():if Path(DONE_URLS_FILE).exists():with open(DONE_URLS_FILE, "r", encoding="utf-8") as f:return set(json.load(f))return set()def save_done_urls(done_urls: set):with open(DONE_URLS_FILE, "w", encoding="utf-8") as f:json.dump(list(done_urls), f, ensure_ascii=False, indent=2)def append_post(post: dict):if not Path(RESULTS_FILE).exists():with open(RESULTS_FILE, "w", encoding="utf-8") as f:json.dump([post], f, ensure_ascii=False, indent=2)else:with open(RESULTS_FILE, "r+", encoding="utf-8") as f:data = json.load(f)data.append(post)f.seek(0)json.dump(data, f, ensure_ascii=False, indent=2)f.truncate()async def crawl_bitcointalk_by_keywords(pages=bitcointalk_page, board_url=board_url):ALL_URLS = await load_urls(URL_path,board_url,pages)all_done_urls = load_done_urls()new_done_urls = set()print(f"🔍 Bitcointalk - urls - start")for URL in async_tqdm(ALL_URLS, desc="📡 正在異步爬取"):if URL in all_done_urls:print(f"? 已完成跳過:{URL}")continuetry:print(f"📥 正在抓取內容:{URL}")final_post = await crawl_bitcointalk_post_detail_with_replies(URL)# ? 實時保存append_post(final_post)new_done_urls.add(URL)# ? 實時保存進度save_done_urls(all_done_urls.union(new_done_urls))print(f"? 已保存:{URL}")except Exception as e:print(f"? 錯誤跳過:{URL} - {e}")continueprint("🎉 全部關鍵詞抓取完畢")
main.py:
import asynciofrom bitcointalk_crawler import crawl_bitcointalk_by_keywordsasync def main():keywords = ["bitcoin", "crypto"]# 爬取 Bitcointalkprint("開始爬取 Bitcointalk...")await crawl_bitcointalk_by_keywords() # # 爬取 Twitter# print("開始爬取 Twitter...")# await crawl_twitter_by_keywords(keywords)## # 爬取 Reddit# print("開始爬取 Reddit...")# reddit_data = await crawl_reddit_by_keywords(keywords, pages)# save_data("Reddit", reddit_data)if __name__ == "__main__":asyncio.run(main())
爬取結果:
URLS_path | 保存所有帖子的 URL 列表的 JSON 文件 |
DONE_URLS_FILE | 已經爬取完成的 URL 列表,防止重復抓取 |
RESULTS_FILE | 保存結構化帖子內容的結果文件 |
🔁 爬取流程總覽
第一步:獲取帖子列表 URL
函數:crawl_bitcointalk_dynamic_list(board_url, max_pages)
訪問指定板塊的 URL(例如技術討論區)。
模擬翻頁抓取前
max_pages
頁的帖子鏈接。通過 CSS selector 提取帖子標題及 URL。
使用正則進一步篩選帖子鏈接。
將結果保存至
URL_path
指定的文件中。
🔗 示例結果:
[ "https://bitcointalk.org/index.php?topic=123456.0", "https://bitcointalk.org/index.php?topic=234567.0" ]
第二步:提取主貼與回復詳細內容
函數:crawl_bitcointalk_post_detail_with_replies(url)
對每個帖子 URL:
使用爬蟲打開頁面并等待加載完成。
提取原始 HTML,并用 BeautifulSoup 解析出標題與閱讀數。
使用 JSON CSS 提取策略,批量提取每個樓層的內容:
作者
作者信息(等級、Merit、Activity)
發布時間
帖子正文內容
將第一個帖子識別為主貼,后續為回復。
主貼與所有回復打包為結構化字典。
📌 關鍵正則解析邏輯
提取閱讀數和標題:
match = re.search(r"Topic:\s*(.+?)\s*\(Read\s+(\d+)\s+times\)", text)
提取用戶等級 / 活躍度 / Merit:
rank_match = re.search(r"^(Sr\. Member|Hero Member|Legendary|Full Member|Member|Newbie)", author_inf) activity_match = re.search(r"Activity:\s*(\d+)", author_inf) merit_match = re.search(r"Merit:\s*(\d+)", author_inf)
? 成功與失敗處理機制
成功后保存:
append_post()
和save_done_urls()
實時寫入文件如果請求或解析失敗,則打印錯誤并繼續下一個 URL(不會中斷全流程)
日志中提供清晰提示(?、?、? 等符號)
📚 示例數據輸出
保存在 RESULTS_FILE
中的 JSON 數組,每個元素是一個完整帖子的結構化數據,便于后續 NLP、分類、情感分析等處理。