diff --git a/language_learning/2021.11.17_zhihu/nature_physics.py b/language_learning/2021.11.17_zhihu/nature_physics.py new file mode 100644 index 0000000..ca1744a --- /dev/null +++ b/language_learning/2021.11.17_zhihu/nature_physics.py @@ -0,0 +1,38 @@ +from bs4 import BeautifulSoup +from urllib.request import urlopen +import re +import datetime + + +year = datetime.datetime.now().year +month = datetime.datetime.now().month +day = datetime.datetime.now().day + + +f = open('nature_physics.html', 'w', encoding='UTF-8') +f.write('') +f.write('

'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新

') + +match_href = [] +start_link = "https://www.nature.com/nphys/research-articles" +html = urlopen(start_link).read().decode('utf-8') # 打开网页 +soup = BeautifulSoup(html, features='lxml') # 放入soup中 +all_article = soup.find_all('article', {"class":"u-full-height c-card c-card--flush"}) +for article in all_article: + all_a_tag = article.find_all('a', href=True) # 获取超链接标签 + for a_tag in all_a_tag: + href = a_tag['href'] # 超链接字符串 + if re.search('/articles/', href): # 文章的链接 + if re.search('https://www.nature.com', href)==None: # 如果链接不是完整的,那么补充完整 + href = 'https://www.nature.com'+ href + if href not in match_href and re.search('\?', href)==None: # 链接不重复 + match_href.append(href) + f.write('

') + f.write(a_tag.get_text()) + f.write('  ') + time = article.find('time', {"class": "c-meta__item c-meta__item--block-at-lg"}).get_text() + f.write(time+'

') +f.close() + diff --git a/language_learning/2021.11.17_zhihu/physics_magazine.py b/language_learning/2021.11.17_zhihu/physics_magazine.py new file mode 100644 index 0000000..f7612d3 --- /dev/null +++ b/language_learning/2021.11.17_zhihu/physics_magazine.py @@ -0,0 +1,36 @@ +from bs4 import BeautifulSoup +from urllib.request import urlopen +import re +import datetime + + +year = datetime.datetime.now().year +month = datetime.datetime.now().month +day = datetime.datetime.now().day + +f = open('physics_magazine.html', 'w', encoding='UTF-8') +f.write('') +f.write('

'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新

') + +match_href = [] +start_link = "https://physics.aps.org/" +html = urlopen(start_link).read().decode('utf-8') # 打开网页 +soup = BeautifulSoup(html, features='lxml') # 放入soup中 +all_articles = soup.find_all('div', {"class":"feed-item-details"}) +for article in all_articles: + all_a_tag = article.find_all('a', href=True) # 获取超链接标签 + for a_tag in all_a_tag: + href = a_tag['href'] # 超链接字符串 + if re.search('/articles/', href): # 文章的链接 + if re.search('https://physics.aps.org', href)==None: # 如果链接不是完整的,那么补充完整 + href = 'https://physics.aps.org'+ href + if href not in match_href: + match_href.append(href) + f.write('

') + f.write(a_tag.get_text()) + f.write('  ') + time = article.find('time', {"class": "feed-item-date"}).get_text() + f.write(time+'

') +f.close() \ No newline at end of file diff --git a/language_learning/2021.11.17_zhihu/prb.py b/language_learning/2021.11.17_zhihu/prb.py new file mode 100644 index 0000000..3a3c624 --- /dev/null +++ b/language_learning/2021.11.17_zhihu/prb.py @@ -0,0 +1,42 @@ +from bs4 import BeautifulSoup +from urllib.request import urlopen +import re +import datetime + + +year = datetime.datetime.now().year +month = datetime.datetime.now().month +day = datetime.datetime.now().day + + +f = open('prb.html', 'w', encoding='UTF-8') +f.write('') +f.write('

'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新

') + +match_href = [] +for loop in range(1): + if loop == 0: + start_link = "https://journals.aps.org/prb/recent" # 看第一页 + # elif loop == 1: + # start_link = "https://journals.aps.org/prb/recent?page=2" # 看第二页 + html = urlopen(start_link).read().decode('utf-8') # 打开网页 + soup = BeautifulSoup(html, features='lxml') # 放入soup中 + all_article = soup.find_all('div', {"class":"article panel article-result"}) + for article in all_article: + all_a_tag = article.find_all('a', href=True) # 获取超链接标签 + for a_tag in all_a_tag: + href = a_tag['href'] # 超链接字符串 + if re.search('/abstract/', href): # 文章的链接 + if re.search('https://journals.aps.org', href)==None: # 如果链接不是完整的,那么补充完整 + href = 'https://journals.aps.org'+ href + if href not in match_href and re.search('\?', href)==None: # 链接不重复 + match_href.append(href) + f.write('

') + f.write(a_tag.get_text()) + f.write('  ') + info = article.find('h6', {"class": "pub-info"}).get_text() + f.write(re.findall('– Published .*', info, re.S)[0][12:]+'

') +f.close() + diff --git a/language_learning/2021.11.17_zhihu/prl.py b/language_learning/2021.11.17_zhihu/prl.py new file mode 100644 index 0000000..2deacce --- /dev/null +++ b/language_learning/2021.11.17_zhihu/prl.py @@ -0,0 +1,42 @@ +from bs4 import BeautifulSoup +from urllib.request import urlopen +import re +import datetime + + +year = datetime.datetime.now().year +month = datetime.datetime.now().month +day = datetime.datetime.now().day + + +f = open('prl.html', 'w', encoding='UTF-8') +f.write('') +f.write('

'+str(year)+'.'+str(month).rjust(2,'0')+'.'+str(day).rjust(2,'0')+' 已更新

') + +match_href = [] +for loop in range(1): + if loop == 0: + start_link = "https://journals.aps.org/prl/recent" # 看第一页 + # elif loop == 1: + # start_link = "https://journals.aps.org/prl/recent?page=2" # 看第二页 + html = urlopen(start_link).read().decode('utf-8') # 打开网页 + soup = BeautifulSoup(html, features='lxml') # 放入soup中 + all_article = soup.find_all('div', {"class":"article panel article-result"}) + for article in all_article: + all_a_tag = article.find_all('a', href=True) # 获取超链接标签 + for a_tag in all_a_tag: + href = a_tag['href'] # 超链接字符串 + if re.search('/abstract/', href): # 文章的链接 + if re.search('https://journals.aps.org', href)==None: # 如果链接不是完整的,那么补充完整 + href = 'https://journals.aps.org'+ href + if href not in match_href and re.search('\?', href)==None: # 链接不重复 + match_href.append(href) + f.write('

') + f.write(a_tag.get_text()) + f.write('  ') + info = article.find('h6', {"class": "pub-info"}).get_text() + f.write(re.findall('– Published.*', info, re.S)[0][12:]+'

') +f.close() +