import requests
import re
import time
book_url = 'http://www.yuxuange.com/html/31192/index.html'
base_url = 'http://www.yuxuange.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.50',
}
response_1 = requests.get(book_url, headers=headers)
response_1.encoding = 'utf-8'
chapter = response_1.text
print(response_1.text)
regx = "<dd><a href='(.*)'</a></dd>"
chapter_href_list = re.findall(regx, response_1.text)
chapter_url_list = []
for i in chapter_href_list:
url = base_url + i
chapter_url_list.append(url)
content_regx = '&  (.*?)<br />'
title_regx = '<div class="atitle">(.*?)</div>'
save_path = '都市至尊仙帝.txt'
count = 0
with open(save_path, 'a+', encoding="utf-8") as f:
for x in chapter_url_list:
while 1:
response_2 = requests.get(x, headers=headers)
if '503 Service Temporarily Unavailable' not in response_2.text:
break
else:
print('漏数据了,3 秒之后继续爬')
time.sleep(3)
response_2.encoding = 'utf-8'
title = re.findall(title_regx, response_2.text)
content = re.findall(content_regx, response_2.text)
f.write('--------' + title[0] + '--------' + '\n')
for e in content:
f.write(e + '\n')
count += 1
print('第{}章 标题 : {} 爬取完毕!'.format(count, title[0]))
import re
import time
book_url = 'http://www.yuxuange.com/html/31192/index.html'
base_url = 'http://www.yuxuange.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.50',
}
response_1 = requests.get(book_url, headers=headers)
response_1.encoding = 'utf-8'
chapter = response_1.text
print(response_1.text)
regx = "<dd><a href='(.*)'</a></dd>"
chapter_href_list = re.findall(regx, response_1.text)
chapter_url_list = []
for i in chapter_href_list:
url = base_url + i
chapter_url_list.append(url)
content_regx = '&  (.*?)<br />'
title_regx = '<div class="atitle">(.*?)</div>'
save_path = '都市至尊仙帝.txt'
count = 0
with open(save_path, 'a+', encoding="utf-8") as f:
for x in chapter_url_list:
while 1:
response_2 = requests.get(x, headers=headers)
if '503 Service Temporarily Unavailable' not in response_2.text:
break
else:
print('漏数据了,3 秒之后继续爬')
time.sleep(3)
response_2.encoding = 'utf-8'
title = re.findall(title_regx, response_2.text)
content = re.findall(content_regx, response_2.text)
f.write('--------' + title[0] + '--------' + '\n')
for e in content:
f.write(e + '\n')
count += 1
print('第{}章 标题 : {} 爬取完毕!'.format(count, title[0]))