Update zhihu.py

This commit is contained in:
guanjihuan 2021-11-17 11:44:18 +08:00
parent a17e276459
commit 5c7d70614a

View File

@ -3,7 +3,6 @@ This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/17937 The newest version of this code is on the web page: https://www.guanjihuan.com/archives/17937
""" """
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from urllib.request import urlopen from urllib.request import urlopen
import re import re
@ -14,16 +13,7 @@ month = datetime.datetime.now().month
day = datetime.datetime.now().day day = datetime.datetime.now().day
# 获取链接 # 获取链接
try: match_href = []
with open('link_list.txt', 'r', encoding='UTF-8') as f: # 如果文件存在
link_list = f.read().split('\n') # 历史已经访问过的链接(数组类型)
except:
with open('link_list.txt', 'w', encoding='UTF-8') as f: # 如果文件不存在
link_list = []
f = open('link_list.txt', 'a', encoding='UTF-8') # 打开文件(补充)
f.write('\nLink list obtained on '+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+':\n')
match_href = [] # 在本次运行中满足条件的链接
# 由于没有模拟登录知乎,因此只能爬取到最新的两篇专栏博文 # 由于没有模拟登录知乎,因此只能爬取到最新的两篇专栏博文
authors = ["https://www.zhihu.com/people/g3508/posts", # Guan authors = ["https://www.zhihu.com/people/g3508/posts", # Guan
] ]
@ -37,8 +27,7 @@ for i0 in range(len(authors)):
if re.search('//zhuanlan.zhihu.com/p/', href): # 文章的链接 if re.search('//zhuanlan.zhihu.com/p/', href): # 文章的链接
if re.search('https:', href)==None: # 如果链接不是完整的,那么补充完整 if re.search('https:', href)==None: # 如果链接不是完整的,那么补充完整
href = 'https:'+ href href = 'https:'+ href
if href not in match_href and href not in link_list and re.search('\?', href)==None: # 链接不重复 match_href.append(href)
match_href.append(href)
# 对链接进行排序并写入文件 # 对链接进行排序并写入文件
numbers = [] numbers = []
match_href_new = [] match_href_new = []
@ -46,18 +35,12 @@ for href in match_href:
numbers.append(int(href[29:])) numbers.append(int(href[29:]))
numbers.sort(reverse = True) numbers.sort(reverse = True)
for n in numbers: for n in numbers:
f.write('https://zhuanlan.zhihu.com/p/'+str(n)+'\n')
match_href_new.append('https://zhuanlan.zhihu.com/p/'+str(n)) match_href_new.append('https://zhuanlan.zhihu.com/p/'+str(n))
f.close()
# 获取内容并写入文件 # 获取内容并写入文件
try: f = open('zhihu.html', 'w', encoding='UTF-8')
f_before = open('zhihu.txt', 'r', encoding='UTF-8') f.write('<meta charset="utf-8">')
data = f_before.read() f.write('<p>'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新</p>')
f = open('zhihu.txt', 'w', encoding='UTF-8')
except:
f = open('zhihu.txt', 'w', encoding='UTF-8')
for href in match_href_new: for href in match_href_new:
try: try:
html = urlopen(href).read().decode('utf-8') # 打开文章链接 html = urlopen(href).read().decode('utf-8') # 打开文章链接
@ -72,21 +55,6 @@ for href in match_href_new:
f.write(str(author.get_text()+'&nbsp;&nbsp;')) f.write(str(author.get_text()+'&nbsp;&nbsp;'))
post_time = soup.find("div", {"class" : "ContentItem-time"}) post_time = soup.find("div", {"class" : "ContentItem-time"})
f.write(str(post_time.get_text())+'</p>') f.write(str(post_time.get_text())+'</p>')
except: except:
pass pass
try:
f.write(data)
f_before.close()
except:
pass
f.close() f.close()
# 制作HTML
f_html = open('zhihu.html', 'w', encoding='UTF-8')
f_html.write('<meta charset="utf-8">')
f = open('zhihu.txt', 'r', encoding='UTF-8')
data = f.read()
f_html.write(data)
f.close()
f_html.close()