Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cornac/data/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

import scipy.sparse as sp
import numpy as np
from tqdm import trange
from tqdm.auto import trange

from . import FeatureModality

Expand Down
14 changes: 7 additions & 7 deletions cornac/models/bpr/recom_bpr.cpp

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion cornac/models/bpr/recom_bpr.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ from libcpp.algorithm cimport binary_search

import numpy as np
cimport numpy as np
from tqdm import trange
from tqdm.auto import trange

from ..recommender import Recommender
from ...exception import ScoreException
Expand Down
33 changes: 17 additions & 16 deletions cornac/utils/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import tarfile
from urllib import request

from tqdm import tqdm
from tqdm.auto import tqdm


def _urlretrieve(url, fpath):
Expand All @@ -35,9 +35,10 @@ def _urlretrieve(url, fpath):

"""
opener = request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
opener.addheaders = [("User-agent", "Mozilla/5.0")]

with tqdm(unit="B", unit_scale=True) as progress:

with tqdm(unit='B', unit_scale=True) as progress:
def report(chunk, chunksize, total):
progress.total = total
progress.update(chunksize)
Expand All @@ -46,14 +47,14 @@ def report(chunk, chunksize, total):
request.urlretrieve(url, fpath, reporthook=report)


def _extract_archive(file_path, extract_path='.'):
def _extract_archive(file_path, extract_path="."):
"""Extracts an archive.
"""
for archive_type in ['zip', 'tar']:
if archive_type == 'zip':
for archive_type in ["zip", "tar"]:
if archive_type == "zip":
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
elif archive_type == 'tar':
elif archive_type == "tar":
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile

Expand All @@ -73,13 +74,13 @@ def _extract_archive(file_path, extract_path='.'):
def get_cache_path(relative_path, cache_dir=None):
"""Return the absolute path to the cached data file
"""
if cache_dir is None and os.access(os.path.expanduser('~'), os.W_OK):
cache_dir = os.path.join(os.path.expanduser('~'), '.cornac')
if cache_dir is None and os.access(os.path.expanduser("~"), os.W_OK):
cache_dir = os.path.join(os.path.expanduser("~"), ".cornac")
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)

if not os.access(cache_dir, os.W_OK):
cache_dir = os.path.join('/tmp', '.cornac')
cache_dir = os.path.join("/tmp", ".cornac")
cache_path = os.path.join(cache_dir, relative_path)

if not os.path.exists(os.path.dirname(cache_path)):
Expand Down Expand Up @@ -108,22 +109,22 @@ def cache(url, unzip=False, relative_path=None, cache_dir=None):

"""
if relative_path is None:
relative_path = url.split('/')[-1]
relative_path = url.split("/")[-1]
cache_path, cache_dir = get_cache_path(relative_path, cache_dir)
if os.path.exists(cache_path):
return cache_path

print('Data from', url)
print('will be cached into', cache_path)
print("Data from", url)
print("will be cached into", cache_path)

if unzip:
tmp_path = os.path.join(cache_dir, 'file.tmp')
tmp_path = os.path.join(cache_dir, "file.tmp")
_urlretrieve(url, tmp_path)
print('Unzipping ...')
print("Unzipping ...")
_extract_archive(tmp_path, cache_dir)
os.remove(tmp_path)
else:
_urlretrieve(url, cache_path)

print('File cached!')
print("File cached!")
return cache_path