86 lines
3.0 KiB
Python
86 lines
3.0 KiB
Python
import requests
|
|
from bs4 import BeautifulSoup
|
|
import time
|
|
import random
|
|
import re
|
|
import logging
|
|
from urllib.parse import urlparse
|
|
import os # 导入 os 模块用于文件操作
|
|
|
|
# 配置日志
|
|
logging.basicConfig(filename="Records.log", level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
def is_valid_url(url):
|
|
#验证 URL 是否有效
|
|
parsed = urlparse(url)
|
|
return bool(parsed.scheme in ["http", "https"] and parsed.netloc)
|
|
|
|
def sanitize_filename(filename):
|
|
#清理文件名中的非法字符
|
|
return re.sub(r"[^\w\s]", "_", filename)
|
|
|
|
def pachong(url):
|
|
#爬取网页中的所有链接
|
|
headers = {
|
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
|
|
}
|
|
|
|
try:
|
|
response = requests.get(url, headers=headers)
|
|
response.raise_for_status() # 检查请求是否成功
|
|
soup = BeautifulSoup(response.text, "html.parser")
|
|
|
|
# 提取所有链接
|
|
links = set() # 使用集合去重
|
|
for a_tag in soup.find_all("a", href=True):
|
|
link = a_tag["href"]
|
|
links.add(link) # 添加到集合中
|
|
|
|
logging.info(f"从 {url} 爬取到 {len(links)} 个链接")
|
|
print(f"从 {url} 爬取到 {len(links)} 个链接")
|
|
return links
|
|
|
|
except requests.RequestException as e:
|
|
logging.error(f"无法访问 {url},错误:{e}")
|
|
print(f"无法访问 {url},错误:{e}")
|
|
return []
|
|
|
|
def main():
|
|
#主函数
|
|
urls = input("请输入网站的 URL,超过两个用逗号分隔:").strip().split(",")
|
|
|
|
for url in urls:
|
|
url = url.strip() # 去除多余的空格
|
|
if not is_valid_url(url):
|
|
logging.warning(f"无效的 URL:{url},已跳过")
|
|
print(f"无效的 URL:{url},已跳过")
|
|
continue
|
|
|
|
print(f"正在爬取 {url}...")
|
|
logging.info(f"正在爬取 {url}...")
|
|
links = pachong(url)
|
|
|
|
if links:
|
|
# 为每个网站创建一个独立的文件
|
|
filename = sanitize_filename(f"{url.replace('https://', '').replace('www.', '').replace('/', '_')}.txt")
|
|
|
|
# 如果文件已存在,删除旧文件
|
|
if os.path.exists(filename):
|
|
logging.info(f"检测到旧文件 {filename},将用新内容更新。")
|
|
print(f"检测到旧文件 {filename},将用新内容更新")
|
|
os.remove(filename)
|
|
|
|
# 写入新的爬取内容
|
|
with open(filename, "w", encoding="utf-8") as file:
|
|
for link in links:
|
|
file.write(link + "\n")
|
|
|
|
logging.info(f"链接已保存到 {filename}")
|
|
print(f"链接已保存到 {filename}")
|
|
time.sleep(random.uniform(1, 3)) # 随机延迟,避免对服务器造成过大压力
|
|
|
|
print("所有网站的链接已分别保存到对应的文件中")
|
|
logging.info("所有网站的链接已分别保存到对应的文件中")
|
|
|
|
if __name__ == "__main__":
|
|
main() |