Skip to content
This repository has been archived by the owner on Jun 15, 2022. It is now read-only.

Commit

Permalink
Update news.yandex.ru
Browse files Browse the repository at this point in the history
  • Loading branch information
PaveTranquil committed Sep 29, 2019
1 parent 2e62899 commit 50131e9
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions news.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@
def refresh_gadgets():
global headers_gadgets, links_gadgets, descs_gadgets, originals_gadgets
headers_gadgets, links_gadgets, descs_gadgets, originals_gadgets = [], [], [], []
site = requests.get('https://m.news.yandex.ru/gadgets.html')
site = requests.get('https://news.yandex.ru/gadgets.html')
soup = bs(site.text, 'html.parser')
code_headers = soup.find(class_='page-content').find_all(class_='link link_theme_black i-bem')

for i in range(0, 8):
links_gadgets.append('https://m.news.yandex.ru' + code_headers[i].get('href'))
links_gadgets.append('https://yandex.ru' + code_headers[i].get('href'))
headers_gadgets.append(code_headers[i].contents[0])

for i in range(0, 8):
Expand All @@ -26,12 +26,12 @@ def refresh_gadgets():
def refresh_internet():
global headers_internet, links_internet, descs_internet, originals_internet
headers_internet, links_internet, descs_internet, originals_internet = [], [], [], []
site = requests.get('https://m.news.yandex.ru/internet.html')
site = requests.get('https://news.yandex.ru/internet.html')
soup = bs(site.text, 'html.parser')
code_headers = soup.find(class_='page-content').find_all(class_='link link_theme_black i-bem')

for i in range(0, 8):
links_internet.append('https://m.news.yandex.ru' + code_headers[i].get('href'))
links_internet.append('https://yandex.ru' + code_headers[i].get('href'))
headers_internet.append(code_headers[i].contents[0])

for i in range(0, 8):
Expand All @@ -44,12 +44,12 @@ def refresh_internet():
def refresh_games():
global headers_games, links_games, descs_games, originals_games
headers_games, links_games, descs_games, originals_games = [], [], [], []
site = requests.get('https://m.news.yandex.ru/games.html')
site = requests.get('https://news.yandex.ru/games.html')
soup = bs(site.text, 'html.parser')
code_headers = soup.find(class_='page-content').find_all(class_='link link_theme_black i-bem')

for i in range(0, 8):
links_games.append('https://m.news.yandex.ru' + code_headers[i].get('href'))
links_games.append('https://yandex.ru' + code_headers[i].get('href'))
headers_games.append(code_headers[i].contents[0])

for i in range(0, 8):
Expand Down

0 comments on commit 50131e9

Please sign in to comment.