Skip to content
Merged
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
abc60bc
Измениние VkAudio в соотвествии с изменениями на стороне ВК
qwertyadrian Feb 13, 2020
0e30675
Добавлена обработка ситуации, когда data_audio[13] имеет 6 элементов,…
qwertyadrian Feb 17, 2020
7b9bbe4
В вывозы функций scrap_data из класса VkAudio передается аргумент VkA…
qwertyadrian Feb 17, 2020
5ca54c2
В словарь с информацией о треке добавлен ключ, содержащий список ссыл…
qwertyadrian Feb 17, 2020
3f3a52a
Fixed TypeError
qwertyadrian Feb 17, 2020
62d2514
Отключена отправка плейлистов из поста из-за ненадобности.
qwertyadrian Feb 17, 2020
be2f78f
Увеличение значения TRACKS_PER_USER_PAGE до 100, так как при значении…
qwertyadrian Mar 9, 2020
775e9ec
Методы класса (кроме get_albums_iter, get_audio_by_id и get_post_audi…
qwertyadrian Jul 5, 2020
523b105
Исправлено некорректное поведение метода get_iter класса VkAudio, есл…
qwertyadrian Jul 5, 2020
3957e58
Добавлен метод для получения обновлений друзей.
qwertyadrian Jul 6, 2020
6ac9002
first commit lol
dashedman Jul 6, 2020
47a8250
Произошел ушиб очка
dashedman Jul 7, 2020
2cbff3d
Merge branch 'audio_patch' into master
dashedman Jul 7, 2020
cef0c7d
Исправлен способ получения популярных и новинок
dashedman Jul 8, 2020
57bebc6
Merge branch 'master' of https://github.com/dashedman/vk_api
dashedman Jul 8, 2020
da65def
ctrl+s забыл нажать
dashedman Jul 8, 2020
509fac5
fix infinite loop in audio search
dashedman May 3, 2021
493207e
Merge branch 'master' into master
dashedman May 3, 2021
b5d582d
Clean audio.py
python273 May 3, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 33 additions & 61 deletions vk_api/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,8 @@ def get_iter(self, owner_id=None, album_id=None, access_hash=None):
ids = scrap_ids(
response['data'][0]['list']
)
if not ids:
break

tracks = scrap_tracks(
ids,
Expand All @@ -136,9 +138,6 @@ def get_iter(self, owner_id=None, album_id=None, access_hash=None):
convert_m3u8_links=self.convert_m3u8_links
)

if not tracks:
break

for i in tracks:
yield i

Expand Down Expand Up @@ -290,6 +289,8 @@ def search_iter(self, q, offset=0):
ids = scrap_ids(
json_response['payload'][1][1]['playlist']['list']
)
if not ids:
break

if offset_left + len(ids) >= offset:
if offset_left < offset:
Expand All @@ -302,9 +303,6 @@ def search_iter(self, q, offset=0):
http=self._vk.http
)

if not tracks:
break

for track in tracks:
yield track

Expand Down Expand Up @@ -343,6 +341,8 @@ def get_updates_iter(self):
ids = scrap_ids(
[i[0] for i in updates if i]
)
if not ids:
break

tracks = scrap_tracks(
ids,
Expand All @@ -351,9 +351,6 @@ def get_updates_iter(self):
http=self._vk.http
)

if not tracks:
break

for track in tracks:
yield track

Expand Down Expand Up @@ -381,29 +378,21 @@ def get_popular_iter(self, offset=0):
'https://vk.com/audio',
data={
'block': 'chart',
'section': 'explore'
'section': 'recoms'
}
)
json_response = json.loads(scrap_json(response.text))

ids = scrap_ids(
json_response['sectionData']['explore']['playlist']['list']
json_response['sectionData']['recoms']['playlist']['list']
)

if offset:
tracks = scrap_tracks(
ids[offset:],
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)
else:
tracks = scrap_tracks(
ids,
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)
tracks = scrap_tracks(
ids[offset:] if offset else ids,
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)

for track in tracks:
yield track
Expand All @@ -420,30 +409,22 @@ def get_news_iter(self, offset=0):
'https://vk.com/audio',
data={
'block': 'new_songs',
'section': 'explore'
'section': 'recoms'
}
)
json_response = json.loads(scrap_json(response.text))

ids = scrap_ids(
json_response['sectionData']['explore']['playlist']['list']
json_response['sectionData']['recoms']['playlist']['list']
)

if offset_left + len(ids) >= offset:
if offset_left >= offset:
tracks = scrap_tracks(
ids,
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)
else:
tracks = scrap_tracks(
ids[offset - offset_left:],
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)
tracks = scrap_tracks(
ids if offset_left >= offset else ids[offset - offset_left:],
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)

for track in tracks:
yield track
Expand All @@ -456,8 +437,8 @@ def get_news_iter(self, offset=0):
data={
'al': 1,
'act': 'load_catalog_section',
'section_id': json_response['sectionData']['explore']['sectionId'],
'start_from': json_response['sectionData']['explore']['nextFrom']
'section_id': json_response['sectionData']['recoms']['sectionId'],
'start_from': json_response['sectionData']['recoms']['nextFrom']
}
)

Expand All @@ -466,25 +447,16 @@ def get_news_iter(self, offset=0):
ids = scrap_ids(
json_response['payload'][1][1]['playlist']['list']
)
if not ids:
break

if offset_left + len(ids) >= offset:
if offset_left >= offset:
tracks = scrap_tracks(
ids,
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)
else:
tracks = scrap_tracks(
ids[offset - offset_left:],
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)

if not tracks:
break
tracks = scrap_tracks(
ids if offset_left >= offset else ids[offset - offset_left:],
self.user_id,
convert_m3u8_links=self.convert_m3u8_links,
http=self._vk.http
)

for track in tracks:
yield track
Expand Down Expand Up @@ -561,7 +533,7 @@ def scrap_ids(audio_data):


def scrap_json(html_page):
""" Парсинг списка хэшей ауфдиозаписей новинок или популярных + nextFrom&sessionId """
""" Парсинг списка хэшей аудиозаписей новинок или популярных + nextFrom&sessionId """

find_json_pattern = r"new AudioPage\(.*?(\{.*\})"
fr = re.search(find_json_pattern, html_page).group(1)
Expand Down