Skip to content

Commit

Permalink
refactor: Updated build config and documentation theme (#213)
Browse files Browse the repository at this point in the history
* chore: Moved setup files to pyproject.toml

* chore: Fixed conda recipe

* refactor: Moved collect_env script

* style: Fixed black and mypy

* ci: Updated CI jobs

* docs: Updated main branch ref

* docs: Updated README badges

* chore: Updated dockerfiles

* docs: Updated documentation theme

* ci: Fixed CI jobs

* style: Fixed isort

* style: Fixed isort & black conflict
  • Loading branch information
frgfm committed Jun 2, 2022
1 parent 9928a84 commit 7f5741f
Show file tree
Hide file tree
Showing 114 changed files with 3,572 additions and 2,485 deletions.
2 changes: 1 addition & 1 deletion .conda/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ test:
about:
home: {{ data.get('url') }}
license: {{ data['license'] }}
license_url: https://github.com/frgfm/Holocron/blob/master/LICENSE
license_url: https://github.com/frgfm/Holocron/blob/main/LICENSE
license_file: LICENSE
summary: {{ data['description'] }}
description: |
Expand Down
2 changes: 0 additions & 2 deletions .coveragerc

This file was deleted.

2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[flake8]
max-line-length = 120
ignore = E402, E265, F403, W503, W504, E731
ignore = E203, E402, E265, F403, W503, W504, E731
exclude = .github, .git, venv*, docs, build
per-file-ignores = **/__init__.py:F401
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/bug_report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ body:
description: |
Please run the following command and paste the output below.
```sh
wget https://raw.githubusercontent.com/frgfm/Holocron/master/scripts/collect_env.py
wget https://raw.githubusercontent.com/frgfm/Holocron/main/.github/collect_env.py
# For security purposes, please check the contents of collect_env.py before running it.
python collect_env.py
```
Expand Down
4 changes: 2 additions & 2 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Closes # (issue)

## Before submitting
- [ ] Was this discussed/approved in a Github [issue](https://github.com/frgfm/Holocron/issues?q=is%3Aissue) or a [discussion](https://github.com/frgfm/Holocron/discussions)? Please add a link to it if that's the case.
- [ ] You have read the [contribution guidelines](https://github.com/frgfm/Holocron/blob/master/CONTRIBUTING.md#submitting-a-pull-request) and followed them in this PR.
- [ ] You have read the [contribution guidelines](https://github.com/frgfm/Holocron/blob/main/CONTRIBUTING.md#submitting-a-pull-request) and followed them in this PR.
- [ ] Did you make sure to update the documentation with your changes? Here are the
[documentation guidelines](https://github.com/frgm/Holocron/tree/master/docs).
[documentation guidelines](https://github.com/frgm/Holocron/tree/main/docs).
- [ ] Did you write any new necessary tests?
147 changes: 79 additions & 68 deletions scripts/collect_env.py → .github/collect_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,19 +20,22 @@

try:
import holocron

HOLOCRON_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
HOLOCRON_AVAILABLE = False

try:
import torch

TORCH_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
TORCH_AVAILABLE = False


try:
import torchvision

TORCHVISION_AVAILABLE = True
except (ImportError, NameError, AttributeError, OSError):
TORCHVISION_AVAILABLE = False
Expand All @@ -41,24 +44,26 @@


# System Environment Information
SystemEnv = namedtuple('SystemEnv', [
'holocron_version',
'torch_version',
'torchvision_version',
'os',
'python_version',
'is_cuda_available',
'cuda_runtime_version',
'nvidia_driver_version',
'nvidia_gpu_models',
'cudnn_version',
])
SystemEnv = namedtuple(
"SystemEnv",
[
"holocron_version",
"torch_version",
"torchvision_version",
"os",
"python_version",
"is_cuda_available",
"cuda_runtime_version",
"nvidia_driver_version",
"nvidia_gpu_models",
"cudnn_version",
],
)


def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
if PY3:
Expand Down Expand Up @@ -88,53 +93,52 @@ def run_and_parse_first_match(run_lambda, command, regex):


def get_nvidia_driver_version(run_lambda):
if get_platform() == 'darwin':
cmd = 'kextstat | grep -i cuda'
return run_and_parse_first_match(run_lambda, cmd,
r'com[.]nvidia[.]CUDA [(](.*?)[)]')
if get_platform() == "darwin":
cmd = "kextstat | grep -i cuda"
return run_and_parse_first_match(run_lambda, cmd, r"com[.]nvidia[.]CUDA [(](.*?)[)]")
smi = get_nvidia_smi()
return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
return run_and_parse_first_match(run_lambda, smi, r"Driver Version: (.*?) ")


def get_gpu_info(run_lambda):
if get_platform() == 'darwin':
if get_platform() == "darwin":
if TORCH_AVAILABLE and torch.cuda.is_available():
return torch.cuda.get_device_name(None)
return None
smi = get_nvidia_smi()
uuid_regex = re.compile(r' \(UUID: .+?\)')
rc, out, _ = run_lambda(smi + ' -L')
uuid_regex = re.compile(r" \(UUID: .+?\)")
rc, out, _ = run_lambda(smi + " -L")
if rc != 0:
return None
# Anonymize GPUs by removing their UUID
return re.sub(uuid_regex, '', out)
return re.sub(uuid_regex, "", out)


def get_running_cuda_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
return run_and_parse_first_match(run_lambda, "nvcc --version", r"release .+ V(.*)")


def get_cudnn_version(run_lambda):
"""This will return a list of libcudnn.so; it's hard to tell which one is being used"""
if get_platform() == 'win32':
if get_platform() == "win32":
cudnn_cmd = 'where /R "%CUDA_PATH%\\bin" cudnn*.dll'
elif get_platform() == 'darwin':
elif get_platform() == "darwin":
# CUDA libraries and drivers can be found in /usr/local/cuda/. See
# https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
# https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
# Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
cudnn_cmd = "ls /usr/local/cuda/lib/libcudnn*"
else:
cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
rc, out, _ = run_lambda(cudnn_cmd)
# find will return 1 if there are permission errors or if not found
if len(out) == 0 or rc not in (1, 0):
lib = os.environ.get('CUDNN_LIBRARY')
lib = os.environ.get("CUDNN_LIBRARY")
if lib is not None and os.path.isfile(lib):
return os.path.realpath(lib)
return None
files = set()
for fn in out.split('\n'):
for fn in out.split("\n"):
fn = os.path.realpath(fn) # eliminate symbolic links
if os.path.isfile(fn):
files.add(fn)
Expand All @@ -144,61 +148,68 @@ def get_cudnn_version(run_lambda):
files = list(sorted(files))
if len(files) == 1:
return files[0]
result = '\n'.join(files)
return 'Probably one of the following:\n{}'.format(result)
result = "\n".join(files)
return "Probably one of the following:\n{}".format(result)


def get_nvidia_smi():
# Note: nvidia-smi is currently available only on Windows and Linux
smi = 'nvidia-smi'
if get_platform() == 'win32':
smi = '"C:\\Program Files\\NVIDIA Corporation\\NVSMI\\%s"' % smi
smi = "nvidia-smi"
if get_platform() == "win32":
system_root = os.environ.get("SYSTEMROOT", "C:\\Windows")
program_files_root = os.environ.get("PROGRAMFILES", "C:\\Program Files")
legacy_path = os.path.join(program_files_root, "NVIDIA Corporation", "NVSMI", smi)
new_path = os.path.join(system_root, "System32", smi)
smis = [new_path, legacy_path]
for candidate_smi in smis:
if os.path.exists(candidate_smi):
smi = '"{}"'.format(candidate_smi)
break
return smi


def get_platform():
if sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('win32'):
return 'win32'
elif sys.platform.startswith('cygwin'):
return 'cygwin'
elif sys.platform.startswith('darwin'):
return 'darwin'
if sys.platform.startswith("linux"):
return "linux"
elif sys.platform.startswith("win32"):
return "win32"
elif sys.platform.startswith("cygwin"):
return "cygwin"
elif sys.platform.startswith("darwin"):
return "darwin"
else:
return sys.platform


def get_mac_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
return run_and_parse_first_match(run_lambda, "sw_vers -productVersion", r"(.*)")


def get_windows_version(run_lambda):
return run_and_read_all(run_lambda, 'wmic os get Caption | findstr /v Caption')
return run_and_read_all(run_lambda, "wmic os get Caption | findstr /v Caption")


def get_lsb_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
return run_and_parse_first_match(run_lambda, "lsb_release -a", r"Description:\t(.*)")


def check_release_file(run_lambda):
return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
r'PRETTY_NAME="(.*)"')
return run_and_parse_first_match(run_lambda, "cat /etc/*-release", r'PRETTY_NAME="(.*)"')


def get_os(run_lambda):
platform = get_platform()

if platform in ('win32', 'cygwin'):
if platform in ("win32", "cygwin"):
return get_windows_version(run_lambda)

if platform == 'darwin':
if platform == "darwin":
version = get_mac_version(run_lambda)
if version is None:
return None
return 'Mac OSX {}'.format(version)
return "Mac OSX {}".format(version)

if platform == 'linux':
if platform == "linux":
# Ubuntu/Debian based
desc = get_lsb_version(run_lambda)
if desc is not None:
Expand All @@ -221,18 +232,18 @@ def get_env_info():
if HOLOCRON_AVAILABLE:
holocron_str = holocron.__version__
else:
holocron_str = 'N/A'
holocron_str = "N/A"

if TORCH_AVAILABLE:
torch_str = torch.__version__
cuda_available_str = torch.cuda.is_available()
else:
torch_str = cuda_available_str = 'N/A'
torch_str = cuda_available_str = "N/A"

if TORCHVISION_AVAILABLE:
torchvision_str = torchvision.__version__
else:
torchvision_str = 'N/A'
torchvision_str = "N/A"

return SystemEnv(
holocron_version=holocron_str,
Expand All @@ -251,7 +262,9 @@ def get_env_info():
env_info_fmt = """
Holocron version: {holocron_version}
PyTorch version: {torch_version} (torchvision {torchvision_version})
OS: {os}
Python version: {python_version}
Is CUDA available: {is_cuda_available}
CUDA runtime version: {cuda_runtime_version}
Expand All @@ -262,14 +275,14 @@ def get_env_info():


def pretty_str(envinfo):
def replace_nones(dct, replacement='Could not collect'):
def replace_nones(dct, replacement="Could not collect"):
for key in dct.keys():
if dct[key] is not None:
continue
dct[key] = replacement
return dct

def replace_bools(dct, true='Yes', false='No'):
def replace_bools(dct, true="Yes", false="No"):
for key in dct.keys():
if dct[key] is True:
dct[key] = true
Expand All @@ -279,28 +292,26 @@ def replace_bools(dct, true='Yes', false='No'):

def maybe_start_on_next_line(string):
# If `string` is multiline, prepend a \n to it.
if string is not None and len(string.split('\n')) > 1:
return '\n{}\n'.format(string)
if string is not None and len(string.split("\n")) > 1:
return "\n{}\n".format(string)
return string

mutable_dict = envinfo._asdict()

# If nvidia_gpu_models is multiline, start on the next line
mutable_dict['nvidia_gpu_models'] = \
maybe_start_on_next_line(envinfo.nvidia_gpu_models)
mutable_dict["nvidia_gpu_models"] = maybe_start_on_next_line(envinfo.nvidia_gpu_models)

# If the machine doesn't have CUDA, report some fields as 'No CUDA'
dynamic_cuda_fields = [
'cuda_runtime_version',
'nvidia_gpu_models',
'nvidia_driver_version',
"cuda_runtime_version",
"nvidia_gpu_models",
"nvidia_driver_version",
]
all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
all_dynamic_cuda_fields_missing = all(
mutable_dict[field] is None for field in dynamic_cuda_fields)
all_cuda_fields = dynamic_cuda_fields + ["cudnn_version"]
all_dynamic_cuda_fields_missing = all(mutable_dict[field] is None for field in dynamic_cuda_fields)
if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
for field in all_cuda_fields:
mutable_dict[field] = 'No CUDA'
mutable_dict[field] = "No CUDA"

# Replace True with Yes, False with No
mutable_dict = replace_bools(mutable_dict)
Expand All @@ -326,5 +337,5 @@ def main():
print(output)


if __name__ == '__main__':
if __name__ == "__main__":
main()
Loading

0 comments on commit 7f5741f

Please sign in to comment.