From 8793eb692a02b0d2244c1c17360b5af4278e4c44 Mon Sep 17 00:00:00 2001 From: Pradeep Tammali Date: Thu, 25 Jan 2024 14:58:20 +0100 Subject: [PATCH 1/5] setup: initial setup --- .github/workflows/pre-commit-action.yaml | 35 ++ .gitignore | 4 + .pre-commit-config.yaml | 106 +++++ Makefile | 39 ++ Pipfile | 20 + Pipfile.lock | 473 +++++++++++++++++++++++ README.md | 55 +++ codecov/__init__.py | 0 codecov/badge.py | 75 ++++ codecov/coverage.py | 277 +++++++++++++ codecov/diff_grouper.py | 70 ++++ codecov/github.py | 169 ++++++++ codecov/github_client.py | 113 ++++++ codecov/groups.py | 60 +++ codecov/log.py | 8 + codecov/log_utils.py | 27 ++ codecov/main.py | 157 ++++++++ codecov/settings.py | 123 ++++++ codecov/subprocess.py | 76 ++++ codecov/template.py | 246 ++++++++++++ codecov/template_files/comment.md.j2 | 322 +++++++++++++++ pyproject.toml | 82 ++++ run.py | 10 + 23 files changed, 2547 insertions(+) create mode 100644 .github/workflows/pre-commit-action.yaml create mode 100644 .pre-commit-config.yaml create mode 100644 Makefile create mode 100644 Pipfile create mode 100644 Pipfile.lock create mode 100644 codecov/__init__.py create mode 100644 codecov/badge.py create mode 100644 codecov/coverage.py create mode 100644 codecov/diff_grouper.py create mode 100644 codecov/github.py create mode 100644 codecov/github_client.py create mode 100644 codecov/groups.py create mode 100644 codecov/log.py create mode 100644 codecov/log_utils.py create mode 100644 codecov/main.py create mode 100644 codecov/settings.py create mode 100644 codecov/subprocess.py create mode 100644 codecov/template.py create mode 100644 codecov/template_files/comment.md.j2 create mode 100644 pyproject.toml create mode 100644 run.py diff --git a/.github/workflows/pre-commit-action.yaml b/.github/workflows/pre-commit-action.yaml new file mode 100644 index 0000000..2177679 --- /dev/null +++ b/.github/workflows/pre-commit-action.yaml @@ -0,0 +1,35 @@ +name: CI + +on: + pull_request: + push: + branches: + - main + +permissions: + contents: read + +env: + SKIP: ${{ github.ref == 'refs/heads/main' && 'no-commit-to-branch' || '' }} + +jobs: + linting: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: "3.11" + - uses: pre-commit/action@v3.0.0 + testing: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + with: + python-version: "3.11" + - name: pytest + run: | + make setup-pipenv + make install-dev + make test diff --git a/.gitignore b/.gitignore index 68bc17f..4b4af2f 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,9 @@ share/python-wheels/ *.egg MANIFEST +# Ruff +.ruff* + # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. @@ -127,6 +130,7 @@ venv/ ENV/ env.bak/ venv.bak/ +.vscode # Spyder project settings .spyderproject diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..9545941 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,106 @@ +default_language_version: + python: python3.11 +default_install_hook_types: [pre-commit, pre-push, pre-rebase] +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-yaml + - id: check-toml + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-ast + - id: check-builtin-literals + - id: check-docstring-first + - id: debug-statements + - id: fix-encoding-pragma + - id: double-quote-string-fixer + - id: check-added-large-files + - id: check-merge-conflict + - id: name-tests-test + # ensure tests match `test_.*\.py` + # NOTE: arg should be `--pytest-test-first` but only the following works now + args: ['--django'] + - id: no-commit-to-branch + args: [--branch, master] +- repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.10.0 + hooks: + - id: python-use-type-annotations + - id: python-check-blanket-noqa + - id: python-check-blanket-type-ignore + - id: python-check-mock-methods + - id: python-no-eval + - id: python-no-log-warn +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.6 + hooks: + - id: ruff + - id: ruff-format + args: ['--check'] +- repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.11.0 + hooks: + - id: black + args: ['--check'] +- repo: https://github.com/hhatto/autopep8 + rev: v2.0.4 + hooks: + - id: autopep8 +- repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.7.0 + hooks: + - id: mypy + additional_dependencies: ['types-pytz', 'types-requests'] +- repo: https://github.com/PyCQA/flake8 + rev: 6.1.0 + hooks: + - id: flake8 + additional_dependencies: ['Flake8-pyproject==1.2.3'] +- repo: https://github.com/PyCQA/bandit + rev: 1.7.5 + hooks: + - id: bandit + args: ["-c", "pyproject.toml"] +- repo: https://github.com/pylint-dev/pylint + rev: v3.0.2 + hooks: + - id: pylint +- repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + args: ['--check-only'] +- repo: https://github.com/hadialqattan/pycln + rev: v2.4.0 + hooks: + - id: pycln +- repo: https://github.com/mrtazz/checkmake.git + rev: 0.2.2 + hooks: + - id: checkmake +- repo: https://github.com/tox-dev/pyproject-fmt + rev: "1.0.0" + hooks: + - id: pyproject-fmt + args: ['--check'] +- repo: local + hooks: + - id: commit-msg + name: Check commit message + language: pygrep + entry: '^(chore|test|setup|feature|fix|build|docs|refactor)!?: [a-zA-Z0-9-_ ]+[a-zA-Z0-9-_ ]+.*' + args: + - --negate # fails if the entry is NOT matched + stages: + - commit-msg +- repo: local + hooks: + - id: pytest + name: pytest + entry: make test + language: system + pass_filenames: false + always_run: true + stages: + - pre-push diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..72636de --- /dev/null +++ b/Makefile @@ -0,0 +1,39 @@ +SHELL := /bin/bash + +.PHONY: setup setup-pipenv install install-dev lint test run clean-setup clean-lint all clean + +setup: install-dev + pipenv run pre-commit install + pipenv run pre-commit install --hook-type commit-msg + +setup-pipenv: + python -m pip install --upgrade pip + pip install pipenv + +install: + pipenv sync + +install-dev: + pipenv sync --dev + +lint: + pipenv run pre-commit run --all-files + +test: + pipenv run pytest tests/* --cov + +run: + pipenv run python run.py + +clean-setup: + pipenv run pre-commit uninstall --hook-type commit-msg + pipenv run pre-commit uninstall + pipenv clean + +clean-lint: + pipenv run pre-commit clean + pipenv run pre-commit gc + +all: setup-pipenv setup lint run + +clean: clean-lint clean-setup diff --git a/Pipfile b/Pipfile new file mode 100644 index 0000000..238e06c --- /dev/null +++ b/Pipfile @@ -0,0 +1,20 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +python-coverage-comment = {file = ".", editable = true} +httpx = "*" +jinja2 = "*" + +[dev-packages] +ruff = "*" +mypy = "*" +pre-commit = "*" +pytest = "*" +pytest-env = "*" +pytest-cov = "*" + +[requires] +python_version = "3.11" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 0000000..390fe4c --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,473 @@ +{ + "_meta": { + "hash": { + "sha256": "4703ff707f807d679d8bf806e9ee6c5ccabc5346332c8d821cd1a69a8d31b093" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.11" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "anyio": { + "hashes": [ + "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee", + "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f" + ], + "markers": "python_version >= '3.8'", + "version": "==4.2.0" + }, + "certifi": { + "hashes": [ + "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1", + "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474" + ], + "markers": "python_version >= '3.6'", + "version": "==2023.11.17" + }, + "h11": { + "hashes": [ + "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d", + "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761" + ], + "markers": "python_version >= '3.7'", + "version": "==0.14.0" + }, + "httpcore": { + "hashes": [ + "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7", + "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535" + ], + "markers": "python_version >= '3.8'", + "version": "==1.0.2" + }, + "httpx": { + "hashes": [ + "sha256:451b55c30d5185ea6b23c2c793abf9bb237d2a7dfb901ced6ff69ad37ec1dfaf", + "sha256:8915f5a3627c4d47b73e8202457cb28f1266982d1159bd5779d86a80c0eab1cd" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==0.26.0" + }, + "idna": { + "hashes": [ + "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca", + "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f" + ], + "markers": "python_version >= '3.5'", + "version": "==3.6" + }, + "jinja2": { + "hashes": [ + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.1.3" + }, + "markupsafe": { + "hashes": [ + "sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69", + "sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0", + "sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d", + "sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec", + "sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5", + "sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411", + "sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3", + "sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74", + "sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0", + "sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949", + "sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d", + "sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279", + "sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f", + "sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6", + "sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc", + "sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e", + "sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954", + "sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656", + "sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc", + "sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518", + "sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56", + "sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc", + "sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa", + "sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565", + "sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4", + "sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb", + "sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250", + "sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4", + "sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959", + "sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc", + "sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474", + "sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863", + "sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8", + "sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f", + "sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2", + "sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e", + "sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e", + "sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb", + "sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f", + "sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a", + "sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26", + "sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d", + "sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2", + "sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131", + "sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789", + "sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6", + "sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a", + "sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858", + "sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e", + "sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb", + "sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e", + "sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84", + "sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7", + "sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea", + "sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b", + "sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6", + "sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475", + "sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74", + "sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a", + "sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.4" + }, + "python-coverage-comment": { + "editable": true, + "file": "." + }, + "sniffio": { + "hashes": [ + "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101", + "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384" + ], + "markers": "python_version >= '3.7'", + "version": "==1.3.0" + } + }, + "develop": { + "cfgv": { + "hashes": [ + "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", + "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560" + ], + "markers": "python_version >= '3.8'", + "version": "==3.4.0" + }, + "coverage": { + "extras": [ + "toml" + ], + "hashes": [ + "sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca", + "sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471", + "sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a", + "sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058", + "sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85", + "sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143", + "sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446", + "sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590", + "sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a", + "sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105", + "sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9", + "sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a", + "sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac", + "sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25", + "sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2", + "sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450", + "sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932", + "sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba", + "sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137", + "sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae", + "sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614", + "sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70", + "sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e", + "sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505", + "sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870", + "sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc", + "sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451", + "sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7", + "sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e", + "sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566", + "sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5", + "sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26", + "sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2", + "sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42", + "sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555", + "sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43", + "sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed", + "sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa", + "sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516", + "sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952", + "sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd", + "sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09", + "sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c", + "sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f", + "sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6", + "sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1", + "sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0", + "sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e", + "sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9", + "sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9", + "sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e", + "sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06" + ], + "markers": "python_version >= '3.8'", + "version": "==7.4.0" + }, + "distlib": { + "hashes": [ + "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784", + "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64" + ], + "version": "==0.3.8" + }, + "filelock": { + "hashes": [ + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" + ], + "markers": "python_version >= '3.8'", + "version": "==3.13.1" + }, + "identify": { + "hashes": [ + "sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d", + "sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34" + ], + "markers": "python_version >= '3.8'", + "version": "==2.5.33" + }, + "iniconfig": { + "hashes": [ + "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", + "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" + ], + "markers": "python_version >= '3.7'", + "version": "==2.0.0" + }, + "mypy": { + "hashes": [ + "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6", + "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d", + "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02", + "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d", + "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3", + "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3", + "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3", + "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66", + "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259", + "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835", + "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd", + "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d", + "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8", + "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07", + "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b", + "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e", + "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6", + "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae", + "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9", + "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d", + "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a", + "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592", + "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218", + "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817", + "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4", + "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410", + "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==1.8.0" + }, + "mypy-extensions": { + "hashes": [ + "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", + "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" + ], + "markers": "python_version >= '3.5'", + "version": "==1.0.0" + }, + "nodeenv": { + "hashes": [ + "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2", + "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6'", + "version": "==1.8.0" + }, + "packaging": { + "hashes": [ + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" + ], + "markers": "python_version >= '3.7'", + "version": "==23.2" + }, + "platformdirs": { + "hashes": [ + "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380", + "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420" + ], + "markers": "python_version >= '3.8'", + "version": "==4.1.0" + }, + "pluggy": { + "hashes": [ + "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12", + "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7" + ], + "markers": "python_version >= '3.8'", + "version": "==1.3.0" + }, + "pre-commit": { + "hashes": [ + "sha256:c255039ef399049a5544b6ce13d135caba8f2c28c3b4033277a788f434308376", + "sha256:d30bad9abf165f7785c15a21a1f46da7d0677cb00ee7ff4c579fd38922efe15d" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==3.6.0" + }, + "pytest": { + "hashes": [ + "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280", + "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==7.4.4" + }, + "pytest-cov": { + "hashes": [ + "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6", + "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==4.1.0" + }, + "pytest-env": { + "hashes": [ + "sha256:aada77e6d09fcfb04540a6e462c58533c37df35fa853da78707b17ec04d17dfc", + "sha256:fcd7dc23bb71efd3d35632bde1bbe5ee8c8dc4489d6617fb010674880d96216b" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==1.1.3" + }, + "pyyaml": { + "hashes": [ + "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", + "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", + "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", + "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", + "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", + "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", + "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", + "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", + "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", + "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", + "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", + "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", + "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", + "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", + "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", + "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", + "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", + "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", + "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", + "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", + "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", + "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", + "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", + "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", + "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", + "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", + "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", + "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", + "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", + "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", + "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", + "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", + "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", + "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", + "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", + "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", + "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", + "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", + "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", + "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", + "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", + "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", + "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", + "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", + "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", + "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", + "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", + "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", + "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", + "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" + ], + "markers": "python_version >= '3.6'", + "version": "==6.0.1" + }, + "ruff": { + "hashes": [ + "sha256:1c8eca1a47b4150dc0fbec7fe68fc91c695aed798532a18dbb1424e61e9b721f", + "sha256:2270504d629a0b064247983cbc495bed277f372fb9eaba41e5cf51f7ba705a6a", + "sha256:269302b31ade4cde6cf6f9dd58ea593773a37ed3f7b97e793c8594b262466b67", + "sha256:62ce2ae46303ee896fc6811f63d6dabf8d9c389da0f3e3f2bce8bc7f15ef5488", + "sha256:653230dd00aaf449eb5ff25d10a6e03bc3006813e2cb99799e568f55482e5cae", + "sha256:6b3dadc9522d0eccc060699a9816e8127b27addbb4697fc0c08611e4e6aeb8b5", + "sha256:7060156ecc572b8f984fd20fd8b0fcb692dd5d837b7606e968334ab7ff0090ab", + "sha256:722bafc299145575a63bbd6b5069cb643eaa62546a5b6398f82b3e4403329cab", + "sha256:80258bb3b8909b1700610dfabef7876423eed1bc930fe177c71c414921898efa", + "sha256:87b3acc6c4e6928459ba9eb7459dd4f0c4bf266a053c863d72a44c33246bfdbf", + "sha256:96f76536df9b26622755c12ed8680f159817be2f725c17ed9305b472a757cdbb", + "sha256:a53d8e35313d7b67eb3db15a66c08434809107659226a90dcd7acb2afa55faea", + "sha256:ab3f71f64498c7241123bb5a768544cf42821d2a537f894b22457a543d3ca7a9", + "sha256:ad3f8088b2dfd884820289a06ab718cde7d38b94972212cc4ba90d5fbc9955f3", + "sha256:b2027dde79d217b211d725fc833e8965dc90a16d0d3213f1298f97465956661b", + "sha256:bea9be712b8f5b4ebed40e1949379cfb2a7d907f42921cf9ab3aae07e6fba9eb", + "sha256:e3d241aa61f92b0805a7082bd89a9990826448e4d0398f0e2bc8f05c75c63d99" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==0.1.14" + }, + "setuptools": { + "hashes": [ + "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05", + "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78" + ], + "markers": "python_version >= '3.8'", + "version": "==69.0.3" + }, + "typing-extensions": { + "hashes": [ + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" + ], + "markers": "python_version >= '3.8'", + "version": "==4.9.0" + }, + "virtualenv": { + "hashes": [ + "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3", + "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b" + ], + "markers": "python_version >= '3.7'", + "version": "==20.25.0" + } + } +} diff --git a/README.md b/README.md index e8c9a1b..944aa96 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,57 @@ # python-coverage-comment Create a Coverage report comment on Github PR + + + +## Setting up Local Environment using Pipenv + +To get started, follow these steps: + +1. Clone the repository: + ``` + git clone + ``` + +2. Navigate to the cloned repository: + ``` + cd + ``` + +3. Build the project: + ``` + make all + ``` + +That's it! You have successfully cloned the repository and built the project. + +## Custom Installation: + +1. Install Python: Make sure you have Python installed on your system. You can download and install Python from the official Python website. + +2. Install Pipenv: Pipenv is a package manager that combines pip and virtualenv. You can install Pipenv using pip, the Python package installer. Open your terminal or command prompt and run the following command: + ``` + pip install pipenv + ``` + +4. Install project dependencies: To install the project dependencies specified in the Pipfile, run the following command: + ``` + pipenv install --dev + ``` + +5. Activate the virtual environment: To activate the virtual environment created by Pipenv, run the following command: + ``` + pipenv shell + ``` + +6. Run your project: You can now run your project using the activated virtual environment. For example, if your project has a run.py file, you can run it using the following command: + ``` + python run.py + ``` + +7. Install pre-commit hooks: To set up pre-commit hooks for your project, run the following command: + ``` + pipenv run pre-commit install + ``` + This will install and configure pre-commit hooks that will run before each commit to enforce code quality and style standards. + +That's it! You have successfully set up your local environment using Pipenv. diff --git a/codecov/__init__.py b/codecov/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/codecov/badge.py b/codecov/badge.py new file mode 100644 index 0000000..7a2c22d --- /dev/null +++ b/codecov/badge.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +""" +This module should contain only the things relevant to the badge being computed +by shields.io +""" +from __future__ import annotations + +import decimal +import json +import urllib.parse + +import httpx + + +def get_badge_color( + rate: decimal.Decimal, + minimum_green: decimal.Decimal, + minimum_orange: decimal.Decimal, +) -> str: + if rate >= minimum_green: + return 'brightgreen' + + if rate >= minimum_orange: + return 'orange' + + return 'red' + + +def compute_badge_endpoint_data( + line_rate: decimal.Decimal, + color: str, +) -> str: + badge = { + 'schemaVersion': 1, + 'label': 'Coverage', + 'message': f'{int(line_rate)}%', + 'color': color, + } + + return json.dumps(badge) + + +def compute_badge_image(line_rate: decimal.Decimal, color: str, http_session: httpx.Client) -> str: + return http_session.get( + 'https://img.shields.io/static/v1?' + + urllib.parse.urlencode( + { + 'label': 'Coverage', + 'message': f'{int(line_rate)}%', + 'color': color, + } + ) + ).text + + +def get_static_badge_url(label: str, message: str, color: str) -> str: + if not color or not message: + raise ValueError('color and message are required') + code = '-'.join(e.replace('_', '__').replace('-', '--') for e in (label, message, color) if e) + return 'https://img.shields.io/badge/' + urllib.parse.quote(f'{code}.svg') + + +def get_endpoint_url(endpoint_url: str) -> str: + return f'https://img.shields.io/endpoint?url={endpoint_url}' + + +def get_dynamic_url(endpoint_url: str) -> str: + return 'https://img.shields.io/badge/dynamic/json?' + urllib.parse.urlencode( + { + 'color': 'brightgreen', + 'label': 'coverage', + 'query': '$.message', + 'url': endpoint_url, + } + ) diff --git a/codecov/coverage.py b/codecov/coverage.py new file mode 100644 index 0000000..8c2830e --- /dev/null +++ b/codecov/coverage.py @@ -0,0 +1,277 @@ +# -*- coding: utf-8 -*- +from __future__ import annotations + +import dataclasses +import datetime +import decimal +import json +import pathlib +from collections.abc import Sequence + +from codecov import log, subprocess + + +# The dataclasses in this module are accessible in the template, which is overridable by the user. +# As a coutesy, we should do our best to keep the existing fields for backward compatibility, +# and if we really can't and can't add properties, at least bump the major version. +@dataclasses.dataclass +class CoverageMetadata: + version: str + timestamp: datetime.datetime + branch_coverage: bool + show_contexts: bool + + +@dataclasses.dataclass +class CoverageInfo: + covered_lines: int + num_statements: int + percent_covered: decimal.Decimal + percent_covered_display: str + missing_lines: int + excluded_lines: int + num_branches: int | None + num_partial_branches: int | None + covered_branches: int | None + missing_branches: int | None + + +@dataclasses.dataclass +class FileCoverage: + path: pathlib.Path + executed_lines: list[int] + missing_lines: list[int] + excluded_lines: list[int] + info: CoverageInfo + + +@dataclasses.dataclass +class Coverage: + meta: CoverageMetadata + info: CoverageInfo + files: dict[pathlib.Path, FileCoverage] + + +# The format for Diff Coverage objects may seem a little weird, because it +# was originally copied from diff-cover schema. + + +@dataclasses.dataclass +class FileDiffCoverage: + path: pathlib.Path + percent_covered: decimal.Decimal + covered_statements: list[int] + missing_statements: list[int] + added_statements: list[int] + # Added lines tracks all the lines that were added in the diff, not just + # the statements (so it includes comments, blank lines, etc.) + added_lines: list[int] + + # for backward compatibility + @property + def violation_lines(self) -> list[int]: + return self.missing_statements + + +@dataclasses.dataclass +class DiffCoverage: + total_num_lines: int + total_num_violations: int + total_percent_covered: decimal.Decimal + num_changed_lines: int + files: dict[pathlib.Path, FileDiffCoverage] + + +def compute_coverage(num_covered: int, num_total: int) -> decimal.Decimal: + if num_total == 0: + return decimal.Decimal('1') + return decimal.Decimal(num_covered) / decimal.Decimal(num_total) + + +def get_coverage_info(coverage_path: pathlib.Path) -> tuple[dict, Coverage]: + try: + with coverage_path.open() as coverage_data: + json_coverage = json.loads(coverage_data.read()) + except FileNotFoundError: + log.error('Coverage report file not found: %s', coverage_path) + raise + except json.JSONDecodeError: + log.error('Invalid JSON format in coverage report file: %s', coverage_path) + raise + + return json_coverage, extract_info(data=json_coverage) + + +def extract_info(data: dict) -> Coverage: + """ + { + "meta": { + "version": "5.5", + "timestamp": "2021-12-26T22:27:40.683570", + "branch_coverage": True, + "show_contexts": False, + }, + "files": { + "codebase/code.py": { + "executed_lines": [1, 2, 5, 6, 9], + "summary": { + "covered_lines": 42, + "num_statements": 46, + "percent_covered": 88.23529411764706, + "percent_covered_display": "88", + "missing_lines": 4, + "excluded_lines": 0, + "num_branches": 22, + "num_partial_branches": 4, + "covered_branches": 18, + "missing_branches": 4 + }, + "missing_lines": [7], + "excluded_lines": [], + } + }, + "totals": { + "covered_lines": 5, + "num_statements": 6, + "percent_covered": 75.0, + "percent_covered_display": "75", + "missing_lines": 1, + "excluded_lines": 0, + "num_branches": 2, + "num_partial_branches": 1, + "covered_branches": 1, + "missing_branches": 1, + }, + } + """ + return Coverage( + meta=CoverageMetadata( + version=data['meta']['version'], + timestamp=datetime.datetime.fromisoformat(data['meta']['timestamp']), + branch_coverage=data['meta']['branch_coverage'], + show_contexts=data['meta']['show_contexts'], + ), + files={ + pathlib.Path(path): FileCoverage( + path=pathlib.Path(path), + excluded_lines=file_data['excluded_lines'], + executed_lines=file_data['executed_lines'], + missing_lines=file_data['missing_lines'], + info=CoverageInfo( + covered_lines=file_data['summary']['covered_lines'], + num_statements=file_data['summary']['num_statements'], + percent_covered=file_data['summary']['percent_covered'], + percent_covered_display=file_data['summary']['percent_covered_display'], + missing_lines=file_data['summary']['missing_lines'], + excluded_lines=file_data['summary']['excluded_lines'], + num_branches=file_data['summary'].get('num_branches'), + num_partial_branches=file_data['summary'].get('num_partial_branches'), + covered_branches=file_data['summary'].get('covered_branches'), + missing_branches=file_data['summary'].get('missing_branches'), + ), + ) + for path, file_data in data['files'].items() + }, + info=CoverageInfo( + covered_lines=data['totals']['covered_lines'], + num_statements=data['totals']['num_statements'], + percent_covered=data['totals']['percent_covered'], + percent_covered_display=data['totals']['percent_covered_display'], + missing_lines=data['totals']['missing_lines'], + excluded_lines=data['totals']['excluded_lines'], + num_branches=data['totals'].get('num_branches'), + num_partial_branches=data['totals'].get('num_partial_branches'), + covered_branches=data['totals'].get('covered_branches'), + missing_branches=data['totals'].get('missing_branches'), + ), + ) + + +# pylint: disable=too-many-locals +def get_diff_coverage_info(added_lines: dict[pathlib.Path, list[int]], coverage: Coverage) -> DiffCoverage: + files = {} + total_num_lines = 0 + total_num_violations = 0 + num_changed_lines = 0 + + for path, added_lines_for_file in added_lines.items(): + num_changed_lines += len(added_lines_for_file) + + try: + file = coverage.files[path] + except KeyError: + continue + + executed = set(file.executed_lines) & set(added_lines_for_file) + count_executed = len(executed) + + missing = set(file.missing_lines) & set(added_lines_for_file) + count_missing = len(missing) + + added = executed | missing + count_total = len(added) + + total_num_lines += count_total + total_num_violations += count_missing + + percent_covered = compute_coverage(num_covered=count_executed, num_total=count_total) + + files[path] = FileDiffCoverage( + path=path, + percent_covered=percent_covered, + covered_statements=sorted(executed), + missing_statements=sorted(missing), + added_statements=sorted(added), + added_lines=added_lines_for_file, + ) + final_percentage = compute_coverage( + num_covered=total_num_lines - total_num_violations, + num_total=total_num_lines, + ) + + return DiffCoverage( + total_num_lines=total_num_lines, + total_num_violations=total_num_violations, + total_percent_covered=final_percentage, + num_changed_lines=num_changed_lines, + files=files, + ) + + +def get_added_lines(git: subprocess.Git, base_ref: str) -> dict[pathlib.Path, list[int]]: + # --unified=0 means we don't get any context lines for chunk, and we + # don't merge chunks. This means the headers that describe line number + # are always enough to derive what line numbers were added. + # TODO: check if it is possible to pull the diff changes from github API + # TODO: Since we don't want to checkout the code locally, it would be better to pull the diff from remote + git.fetch('origin', base_ref, '--depth=1000') + diff = git.diff('--unified=0', 'FETCH_HEAD', '--', '.') + return parse_diff_output(diff) + + +def parse_diff_output(diff: str) -> dict[pathlib.Path, list[int]]: + current_file: pathlib.Path | None = None + added_filename_prefix = '+++ b/' + result: dict[pathlib.Path, list[int]] = {} + for line in diff.splitlines(): + if line.startswith(added_filename_prefix): + current_file = pathlib.Path(line.removeprefix(added_filename_prefix)) + continue + if line.startswith('@@'): + lines = parse_line_number_diff_line(line) + if len(lines) > 0: + if current_file is None: + raise ValueError(f'Unexpected diff output format: \n{diff}') + result.setdefault(current_file, []).extend(lines) + + return result + + +def parse_line_number_diff_line(line: str) -> Sequence[int]: + """ + Parse the "added" part of the line number diff text: + @@ -60,0 +61 @@ def compute_files( -> [61] + @@ -60,0 +61,3 @@ def compute_files( -> [61, 62, 63] + """ + start, length = (int(i) for i in (line.split()[2][1:] + ',1').split(',')[:2]) + return range(start, start + length) diff --git a/codecov/diff_grouper.py b/codecov/diff_grouper.py new file mode 100644 index 0000000..44bfbd1 --- /dev/null +++ b/codecov/diff_grouper.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +from __future__ import annotations + +from collections.abc import Iterable + +from codecov import coverage as coverage_module +from codecov import groups + +MAX_ANNOTATION_GAP = 3 + + +def get_missing_groups( + coverage: coverage_module.Coverage, +) -> Iterable[groups.Group]: + for path, coverage_file in coverage.files.items(): + # Lines that are covered or excluded should not be considered for + # filling a gap between violation groups. + # (so, lines that can appear in a gap are lines that are missing, or + # lines that do not contain code: blank lines or lines containing comments) + separators = { + *coverage_file.executed_lines, + *coverage_file.excluded_lines, + } + # Lines that should be considered for filling a gap, unless + # they are separators. + joiners = set(range(1, coverage_file.info.num_statements)) - separators + + for start, end in groups.compute_contiguous_groups( + values=coverage_file.missing_lines, + separators=separators, + joiners=joiners, + max_gap=MAX_ANNOTATION_GAP, + ): + yield groups.Group( + file=path, + line_start=start, + line_end=end, + ) + + +def get_diff_missing_groups( + coverage: coverage_module.Coverage, + diff_coverage: coverage_module.DiffCoverage, +) -> Iterable[groups.Group]: + for path, diff_file in diff_coverage.files.items(): + coverage_file = coverage.files[path] + + # Lines that are covered or excluded should not be considered for + # filling a gap between violation groups. + # (so, lines that can appear in a gap are lines that are missing, or + # lines that do not contain code: blank lines or lines containing comments) + separators = { + *coverage_file.executed_lines, + *coverage_file.excluded_lines, + } + # Lines that are added should be considered for filling a gap, unless + # they are separators. + joiners = set(diff_file.added_lines) - separators + + for start, end in groups.compute_contiguous_groups( + values=diff_file.missing_statements, + separators=separators, + joiners=joiners, + max_gap=MAX_ANNOTATION_GAP, + ): + yield groups.Group( + file=path, + line_start=start, + line_end=end, + ) diff --git a/codecov/github.py b/codecov/github.py new file mode 100644 index 0000000..64483ec --- /dev/null +++ b/codecov/github.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +import dataclasses +import pathlib +import sys + +from codecov import github_client, log, settings + +GITHUB_ACTIONS_LOGIN = 'CI-codecov[bot]' + + +class CannotDeterminePR(Exception): + pass + + +class CannotPostComment(Exception): + pass + + +class CannotGetPullRequest(Exception): + pass + + +class NoArtifact(Exception): + pass + + +@dataclasses.dataclass +class RepositoryInfo: + default_branch: str + visibility: str + + def is_default_branch(self, ref: str) -> bool: + return f'refs/heads/{self.default_branch}' == ref + + def is_public(self) -> bool: + return self.visibility == 'public' + + +def get_repository_info(github: github_client.GitHub, repository: str) -> RepositoryInfo: + response = github.repos(repository).get() + + return RepositoryInfo(default_branch=response.default_branch, visibility=response.visibility) + + +def get_my_login(github: github_client.GitHub) -> str: + try: + response = github.user.get() + except github_client.Forbidden: + # The GitHub actions user cannot access its own details + # and I'm not sure there's a way to see that we're using + # the GitHub actions user except noting that it fails + return GITHUB_ACTIONS_LOGIN + + return response.login + + +def get_pr_number(github: github_client.GitHub, config: settings.Config) -> int: + if config.GITHUB_PR_NUMBER: + try: + pull_request = github.repos(config.GITHUB_REPOSITORY).pulls(config.GITHUB_PR_NUMBER).get() + if pull_request.state != 'open': + raise github_client.NotFound + + return pull_request.number + except github_client.Forbidden as exc: + raise CannotGetPullRequest from exc + except github_client.NotFound: + log.warning(f'Pull request #{config.GITHUB_PR_NUMBER} does not exist') + + # If we're not on a PR, we need to find the PR number from the branch name + if config.GITHUB_REF: + try: + pull_requests = github.repos(config.GITHUB_REPOSITORY).pulls.get(state='open', head=config.GITHUB_REF) + if len(pull_requests) != 1: + raise github_client.NotFound + + return pull_requests[0].number + except github_client.Forbidden as exc: + raise CannotGetPullRequest from exc + except github_client.NotFound as exc: + raise CannotGetPullRequest from exc + + raise CannotGetPullRequest( + 'This worflow is not triggered on a pull_request event, ' + "nor on a push event on a branch. Consequently, there's nothing to do. " + 'Exiting.' + ) + + +def post_comment( # pylint: disable=too-many-arguments + github: github_client.GitHub, + me: str, + repository: str, + pr_number: int, + contents: str, + marker: str, +) -> None: + issue_comments_path = github.repos(repository).issues(pr_number).comments + comments_path = github.repos(repository).issues.comments + + for comment in issue_comments_path.get(): + if comment.user.login == me and marker in comment.body: + log.info('Update previous comment') + try: + comments_path(comment.id).patch(body=contents) + except github_client.Forbidden as exc: + raise CannotPostComment from exc + except github_client.ApiError as exc: + raise CannotPostComment from exc + break + else: + log.info('Adding new comment') + try: + issue_comments_path.post(body=contents) + except github_client.Forbidden as exc: + raise CannotPostComment from exc + + +def escape_property(s: str) -> str: + return s.replace('%', '%25').replace('\r', '%0D').replace('\n', '%0A').replace(':', '%3A').replace(',', '%2C') + + +def escape_data(s: str) -> str: + return s.replace('%', '%25').replace('\r', '%0D').replace('\n', '%0A') + + +def get_workflow_command(command: str, command_value: str, **kwargs: str) -> str: + """ + Returns a string that can be printed to send a workflow command + https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions + """ + values_listed = [f'{key}={escape_property(value)}' for key, value in kwargs.items()] + + context = f" {','.join(values_listed)}" if values_listed else '' + return f'::{command}{context}::{escape_data(command_value)}' + + +def send_workflow_command(command: str, command_value: str, **kwargs: str) -> None: + print( + get_workflow_command(command=command, command_value=command_value, **kwargs), + file=sys.stderr, + ) + + +def create_missing_coverage_annotations(annotation_type: str, annotations: list[tuple[pathlib.Path, int, int]]): + """ + Create annotations for lines with missing coverage. + + annotation_type: The type of annotation to create. Can be either "error" or "warning". + annotations: A list of tuples of the form (file, line_start, line_end) + """ + send_workflow_command(command='group', command_value='Annotations of lines with missing coverage') + for file, line_start, line_end in annotations: + if line_start == line_end: + message = f'Missing coverage on line {line_start}' + else: + message = f'Missing coverage on lines {line_start}-{line_end}' + + send_workflow_command( + command=annotation_type, + command_value=message, + # This will produce \ paths when running on windows. + # GHA doc is unclear whether this is right or not. + file=str(file), + line=str(line_start), + endLine=str(line_end), + title='Missing coverage', + ) + send_workflow_command(command='endgroup', command_value='') diff --git a/codecov/github_client.py b/codecov/github_client.py new file mode 100644 index 0000000..2a2403c --- /dev/null +++ b/codecov/github_client.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import annotations + +import httpx + +TIMEOUT = 60 +BASE_URL = 'https://api.github.com' + + +class _Executable: + def __init__(self, _gh, _method, _path): + self._gh = _gh + self._method = _method + self._path = _path + + def __call__(self, **kw): + return self._gh._http(self._method, self._path, **kw) + + +class _Callable: + def __init__(self, _gh, _name): + self._gh = _gh + self._name = _name + + def __call__(self, *args): + if len(args) == 0: + return self + name = f'{self._name}/{"/".join([str(arg) for arg in args])}' + return _Callable(self._gh, name) + + def __getattr__(self, attr): + if attr in ['get', 'put', 'post', 'patch', 'delete']: + return _Executable(self._gh, attr, self._name) + name = f'{self._name}/{attr}' + return _Callable(self._gh, name) + + +class GitHub: + + """ + GitHub client. + """ + + def __init__(self, session: httpx.Client): + self.session = session + + def __getattr__(self, attr): + return _Callable(self, f'/{attr}') + + def _http(self, method, path, *, use_bytes=False, **kw): + _method = method.lower() + requests_kwargs = {} + if _method == 'get' and kw: + requests_kwargs = {'params': kw} + + elif _method in ['post', 'patch', 'put']: + requests_kwargs = {'json': kw} + + response = self.session.request( + _method.upper(), + path, + timeout=TIMEOUT, + **requests_kwargs, + ) + if use_bytes: + contents = response.content + else: + contents = response_contents(response) + + try: + response.raise_for_status() + except httpx.HTTPStatusError as exc: + cls: type[ApiError] = { + 403: Forbidden, + 404: NotFound, + }.get(exc.response.status_code, ApiError) + + raise cls(str(contents)) from exc + + return contents + + +def response_contents( + response: httpx.Response, +) -> JsonObject | bytes: + if response.headers.get('content-type', '').startswith('application/json'): + return response.json(object_hook=JsonObject) + return response.content + + +class JsonObject(dict): + """ + general json object that can bind any fields but also act as a dict. + """ + + def __getattr__(self, key): + try: + return self[key] + except KeyError as e: + raise AttributeError(f"'Dict' object has no attribute '{key}'") from e + + +class ApiError(Exception): + pass + + +class NotFound(ApiError): + pass + + +class Forbidden(ApiError): + pass diff --git a/codecov/groups.py b/codecov/groups.py new file mode 100644 index 0000000..4c87916 --- /dev/null +++ b/codecov/groups.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from __future__ import annotations + +import dataclasses +import functools +import itertools +import pathlib + + +@dataclasses.dataclass(frozen=True) +class Group: + file: pathlib.Path + line_start: int + line_end: int + + +def compute_contiguous_groups( + values: list[int], separators: set[int], joiners: set[int], max_gap: int +) -> list[tuple[int, int]]: + """ + Given a list of (sorted) values, a list of separators and a list of + joiners, return a list of ranges (start, included end) describing groups of + values. + + Groups are created by joining contiguous values together, and in some cases + by merging groups, enclosing a gap of values between them. Gaps that may be + enclosed are small gaps (<= max_gap values after removing all joiners) + where no line is a "separator" + """ + contiguous_groups: list[tuple[int, int]] = [] + for _, contiguous_group in itertools.groupby(zip(values, itertools.count(1)), lambda x: x[1] - x[0]): + grouped_values = (e[0] for e in contiguous_group) + first = next(grouped_values) + try: + *_, last = grouped_values + except ValueError: + last = first + contiguous_groups.append((first, last)) + + def reducer(acc: list[tuple[int, int]], group: tuple[int, int]) -> list[tuple[int, int]]: + if not acc: + return [group] + + last_group = acc[-1] + last_start, last_end = last_group + next_start, next_end = group + + gap = set(range(last_end + 1, next_start)) - joiners + + gap_is_small = len(gap) <= max_gap + gap_contains_separators = gap & separators + + if gap_is_small and not gap_contains_separators: + acc[-1] = (last_start, next_end) + return acc + + acc.append(group) + return acc + + return functools.reduce(reducer, contiguous_groups, []) diff --git a/codecov/log.py b/codecov/log.py new file mode 100644 index 0000000..d3f3b61 --- /dev/null +++ b/codecov/log.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +import logging + +logger = logging.getLogger('codecov') + + +def __getattr__(name): + return getattr(logger, name) diff --git a/codecov/log_utils.py b/codecov/log_utils.py new file mode 100644 index 0000000..3e2d0ee --- /dev/null +++ b/codecov/log_utils.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +import logging + +from codecov import github + +LEVEL_MAPPING = { + 50: 'error', + 40: 'error', + 30: 'warning', + 20: 'notice', + 10: 'debug', +} + + +class ConsoleFormatter(logging.Formatter): + def format(self, record) -> str: + log = super().format(record) + + return f'{int(record.created)} {record.levelname} {record.name} - {log}' + + +class GitHubFormatter(logging.Formatter): + def format(self, record) -> str: + log = super().format(record) + level = LEVEL_MAPPING[record.levelno] + + return github.get_workflow_command(command=level, command_value=log) diff --git a/codecov/main.py b/codecov/main.py new file mode 100644 index 0000000..8bf5ae9 --- /dev/null +++ b/codecov/main.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +import logging +import os +import sys + +import httpx + +from codecov import ( + coverage as coverage_module, +) +from codecov import ( + diff_grouper, + github, + github_client, + log, + log_utils, + settings, + subprocess, + template, +) + + +def main(): + try: + config = settings.Config.from_environ(environ=os.environ) + + logging.basicConfig(level='DEBUG') + logging.getLogger().handlers[0].formatter = ( + log_utils.ConsoleFormatter() if config.DEBUG else log_utils.GitHubFormatter() + ) + + log.info('Starting action') + github_session = httpx.Client( + base_url=github_client.BASE_URL, + follow_redirects=True, + headers={'Authorization': f'token {config.GITHUB_TOKEN}'}, + ) + git = subprocess.Git() + + exit_code = action(config=config, github_session=github_session, git=git) + log.info('Ending action') + sys.exit(exit_code) + + except Exception: # pylint: disable=broad-except + log.exception( + 'Critical error. This error possibly occurred because the permissions of the workflow are set incorrectly.' + ) + sys.exit(1) + + +def action(config: settings.Config, github_session: httpx.Client, git: subprocess.Git) -> int: + log.debug('Fetching Pull Request') + gh = github_client.GitHub(session=github_session) + try: + pr_number = github.get_pr_number(github=gh, config=config) + except github.CannotGetPullRequest: + log.debug('Cannot get pull request number. Exiting.', exc_info=True) + log.info( + 'This worflow is not triggered on a pull_request event, ' + "nor on a push event on a branch. Consequently, there's nothing to do. " + 'Exiting.' + ) + return 1 + + log.debug(f'Operating on Pull Request {pr_number}') + repo_info = github.get_repository_info(github=gh, repository=config.GITHUB_REPOSITORY) + + return process_pr( + config=config, + gh=gh, + repo_info=repo_info, + git=git, + pr_number=pr_number, + ) + + +def process_pr( + config: settings.Config, + gh: github_client.GitHub, + repo_info: github.RepositoryInfo, + git: subprocess.Git, + pr_number: int, +) -> int: + log.info('Generating comment for PR') + _, coverage = coverage_module.get_coverage_info( + coverage_path=config.COVERAGE_PATH, + ) + base_ref = config.GITHUB_BASE_REF or repo_info.default_branch + added_lines = coverage_module.get_added_lines(git=git, base_ref=base_ref) + diff_coverage = coverage_module.get_diff_coverage_info(coverage=coverage, added_lines=added_lines) + marker = template.get_marker(marker_id=config.SUBPROJECT_ID) + + files_info, count_files = template.select_files( + coverage=coverage, + diff_coverage=diff_coverage, + max_files=config.MAX_FILES_IN_COMMENT, + ) + try: + comment = template.get_comment_markdown( + coverage=coverage, + diff_coverage=diff_coverage, + files=files_info, + count_files=count_files, + max_files=config.MAX_FILES_IN_COMMENT, + minimum_green=config.MINIMUM_GREEN, + minimum_orange=config.MINIMUM_ORANGE, + repo_name=config.GITHUB_REPOSITORY, + pr_number=pr_number, + base_ref=base_ref, + base_template=template.read_template_file('comment.md.j2'), + marker=marker, + subproject_id=config.SUBPROJECT_ID, + complete_project_report=config.COMPLETE_PROJECT_REPORT, + ) + except template.MissingMarker: + log.error( + 'Marker not found. This error can happen if you defined a custom comment ' + "template that doesn't inherit the base template and you didn't include " + '``{{ marker }}``. The marker is necessary for this action to recognize ' + "its own comment and avoid making new comments or overwriting someone else's " + 'comment.' + ) + return 1 + except template.TemplateError: + log.exception( + 'There was a rendering error when computing the text of the comment to post ' + "on the PR. Please see the traceback, in particular if you're using a custom " + 'template.' + ) + return 1 + + # TODO: Disable this for now now and make it work through Github APIs + if pr_number and config.ANNOTATE_MISSING_LINES: + annotations = diff_grouper.get_diff_missing_groups(coverage=coverage, diff_coverage=diff_coverage) + github.create_missing_coverage_annotations( + annotation_type=config.ANNOTATION_TYPE, + annotations=[(annotation.file, annotation.line_start, annotation.line_end) for annotation in annotations], + ) + + try: + github.post_comment( + github=gh, + me=github.get_my_login(github=gh), + repository=config.GITHUB_REPOSITORY, + pr_number=pr_number, + contents=comment, + marker=marker, + ) + except github.CannotPostComment: + log.debug('Exception when posting comment', exc_info=True) + log.info( + 'Cannot post comment. This is probably because of body contents reached maximum allowed length in the comment' + ) + else: + log.debug('Comment created on PR') + + return 0 diff --git a/codecov/settings.py b/codecov/settings.py new file mode 100644 index 0000000..f7aef38 --- /dev/null +++ b/codecov/settings.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +from __future__ import annotations + +import dataclasses +import decimal +import inspect +import pathlib +from collections.abc import MutableMapping +from typing import Any + + +class MissingEnvironmentVariable(Exception): + pass + + +class InvalidAnnotationType(Exception): + pass + + +def path_below(path_str: str | pathlib.Path) -> pathlib.Path: + try: + path = pathlib.Path(path_str).resolve() + if not (path.exists() and path.is_file()): + raise ValueError('Path does not exist') + + if path.suffix != '.json': + raise ValueError('The file is not a JSON file.') + return path + except ValueError as exc: + raise ValueError('Path can not be resolved') from exc + + +def str_to_bool(value: str) -> bool: + return value.lower() in ('1', 'true', 'yes') + + +# pylint: disable=invalid-name, too-many-instance-attributes +@dataclasses.dataclass +class Config: + """This object defines the environment variables""" + + GITHUB_REPOSITORY: str + COVERAGE_PATH: pathlib.Path + GITHUB_TOKEN: str = dataclasses.field(repr=False) + GITHUB_PR_NUMBER: int | None = None + # Branch to run the action on (alternate to get PR number if not provided) + # Example Organisation:branch-name (Company:sample-branch) + GITHUB_REF: str | None = None + GITHUB_BASE_REF: str = 'main' + SUBPROJECT_ID: str | None = None + MINIMUM_GREEN: decimal.Decimal = decimal.Decimal('100') + MINIMUM_ORANGE: decimal.Decimal = decimal.Decimal('70') + ANNOTATE_MISSING_LINES: bool = False + ANNOTATION_TYPE: str = 'warning' + MAX_FILES_IN_COMMENT: int = 25 + COMPLETE_PROJECT_REPORT: bool = False + # Only for debugging, not exposed in the action + DEBUG: bool = False + + def __post_init__(self) -> None: + if self.GITHUB_PR_NUMBER is None and self.GITHUB_REF is None: + raise ValueError('Either GITHUB_PR_NUMBER or GITHUB_REF must be provided') + + # Clean methods + @classmethod + def clean_minimum_green(cls, value: str) -> decimal.Decimal: + return decimal.Decimal(value) + + @classmethod + def clean_minimum_orange(cls, value: str) -> decimal.Decimal: + return decimal.Decimal(value) + + @classmethod + def clean_annotate_missing_lines(cls, value: str) -> bool: + return str_to_bool(value) + + @classmethod + def clean_complete_project_report(cls, value: str) -> bool: + return str_to_bool(value) + + @classmethod + def clean_debug(cls, value: str) -> bool: + return str_to_bool(value) + + @classmethod + def clean_annotation_type(cls, value: str) -> str: + if value not in {'notice', 'warning', 'error'}: + raise InvalidAnnotationType( + f'The annotation type {value} is not valid. Please choose from notice, warning or error' + ) + return value + + @classmethod + def clean_github_pr_number(cls, value: str) -> int: + return int(value) + + @classmethod + def clean_coverage_path(cls, value: str) -> pathlib.Path: + return path_below(value) + + # We need to type environ as a MutableMapping because that's what + # os.environ is, and `dict[str, str]` is not enough + @classmethod + def from_environ(cls, environ: MutableMapping[str, str]) -> Config: + possible_variables = list(inspect.signature(cls).parameters) + config: dict[str, Any] = {k: v for k, v in environ.items() if k in possible_variables} + for key, value in list(config.items()): + if func := getattr(cls, f'clean_{key.lower()}', None): + try: + config[key] = func(value) + except ValueError as exc: + raise ValueError(f'{key}: {exc!s}') from exc + + try: + config_obj = cls(**config) + except TypeError as e: + missing = { + name + for name, param in inspect.signature(cls).parameters.items() + if param.default is inspect.Parameter.empty + } - set(environ) + raise MissingEnvironmentVariable(f" missing environment variable(s): {', '.join(missing)}") from e + return config_obj diff --git a/codecov/subprocess.py b/codecov/subprocess.py new file mode 100644 index 0000000..685c9ed --- /dev/null +++ b/codecov/subprocess.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +from __future__ import annotations + +import functools +import os +import pathlib +import shlex +import subprocess +from typing import Any + +from codecov import log + + +class SubProcessError(Exception): + pass + + +class GitError(SubProcessError): + pass + + +def run(*args, path: pathlib.Path, **kwargs) -> str: + try: + return subprocess.run( + [shlex.quote(arg) for arg in args], # noqa: S603 + cwd=path, + text=True, + # Only relates to DecodeErrors while decoding the output + errors='replace', + check=True, + capture_output=True, + **kwargs, + ).stdout + except subprocess.CalledProcessError as exc: + log.debug(f'Command failed: {args=} {path=} {kwargs=} {exc.stderr=} {exc.returncode=}') + raise SubProcessError('\n'.join([exc.stderr, exc.stdout])) from exc + + +class Git: + """ + Wrapper around calling git subprocesses in a way that reads a tiny bit like + Python code. + Call a method on git to call the corresponding subcommand (use `_` for `-`). + Add string parameters for the rest of the command line. + + Returns stdout or raise GitError + + >>> git = Git() + >>> git.clone(url) + >>> git.commit("-m", message) + >>> git.rev_parse("--short", "HEAD") + """ + + cwd = pathlib.Path('.') + + def _git(self, *args: str, env: dict[str, str] | None = None, **kwargs) -> str: + # When setting the `env` argument to run, instead of inheriting env + # vars from the current process, the whole environment of the + # subprocess is whatever we pass. In other words, we can either + # conditionally pass an `env` parameter, but it's less readable, + # or we can always pass an `env` parameter, but in this case, we + # need to always merge `os.environ` to it (and ensure our variables + # have precedence) + try: + return run( + 'git', + *args, + path=self.cwd, + env=os.environ | (env or {}), + **kwargs, + ) + except SubProcessError as exc: + raise GitError from exc + + def __getattr__(self, name: str) -> Any: + return functools.partial(self._git, name.replace('_', '-')) diff --git a/codecov/template.py b/codecov/template.py new file mode 100644 index 0000000..91e6613 --- /dev/null +++ b/codecov/template.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +from __future__ import annotations + +import dataclasses +import decimal +import functools +import hashlib +import itertools +import pathlib +from collections.abc import Callable +from importlib import resources + +import jinja2 +from jinja2.sandbox import SandboxedEnvironment + +from codecov import badge, coverage as coverage_module, diff_grouper + +MARKER = """""" + + +def uptodate(): + return True + + +class CommentLoader(jinja2.BaseLoader): + def __init__(self, base_template: str): + self.base_template = base_template + + def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str | None, Callable[..., bool]]: + if template == 'base': + return ( + self.base_template, + 'codecov/template_files/comment.md.j2', + uptodate, + ) + + raise jinja2.TemplateNotFound(template) + + +class MissingMarker(Exception): + pass + + +class TemplateError(Exception): + pass + + +def get_marker(marker_id: str | None): + return MARKER.format(id_part=f' (id: {marker_id})' if marker_id else '') + + +def pluralize(number, singular='', plural='s'): + if number == 1: + return singular + + return plural + + +def sign(val: int | decimal.Decimal) -> str: + return '+' if val > 0 else '' if val < 0 else '±' + + +def delta(val: int) -> str: + return f'({sign(val)}{val})' + + +def remove_exponent(val: decimal.Decimal) -> decimal.Decimal: + # From https://docs.python.org/3/library/decimal.html#decimal-faq + return val.quantize(decimal.Decimal(1)) if val == val.to_integral() else val.normalize() + + +def percentage_value(val: decimal.Decimal, precision: int = 2) -> decimal.Decimal: + return remove_exponent( + (decimal.Decimal('100') * val).quantize( + decimal.Decimal('1.' + ('0' * precision)), + rounding=decimal.ROUND_DOWN, + ) + ) + + +def pct(val: decimal.Decimal, precision: int = 2) -> str: + rounded = percentage_value(val=val, precision=precision) + return f'{rounded:f}%' + + +def x100(val: decimal.Decimal): + return val * 100 + + +@dataclasses.dataclass +class FileInfo: + path: pathlib.Path + coverage: coverage_module.FileCoverage + diff: coverage_module.FileDiffCoverage | None + + +def get_comment_markdown( + *, + coverage: coverage_module.Coverage, + diff_coverage: coverage_module.DiffCoverage, + files: list[FileInfo], + max_files: int | None, + count_files: int, + minimum_green: decimal.Decimal, + minimum_orange: decimal.Decimal, + repo_name: str, + pr_number: int, + base_ref: str, + base_template: str, + marker: str, + subproject_id: str | None = None, + complete_project_report: bool = False, +): + loader = CommentLoader(base_template=base_template) + env = SandboxedEnvironment(loader=loader) + env.filters['pct'] = pct + env.filters['delta'] = delta + env.filters['x100'] = x100 + env.filters['generate_badge'] = badge.get_static_badge_url + env.filters['pluralize'] = pluralize + env.filters['file_url'] = functools.partial(get_file_url, repo_name=repo_name, pr_number=pr_number) + env.filters['file_base_url'] = functools.partial(get_file_base_url, repo_name=repo_name, base_ref=base_ref) + env.filters['get_badge_color'] = functools.partial( + badge.get_badge_color, + minimum_green=minimum_green, + minimum_orange=minimum_orange, + ) + + missing_diff_lines = { + key: list(value) + for key, value in itertools.groupby( + diff_grouper.get_diff_missing_groups(coverage=coverage, diff_coverage=diff_coverage), + lambda x: x.file, + ) + } + + missing_lines_for_whole_project = { + key: list(value) + for key, value in itertools.groupby( + diff_grouper.get_missing_groups(coverage=coverage), + lambda x: x.file, + ) + } + try: + comment = env.get_template('base').render( + coverage=coverage, + diff_coverage=diff_coverage, + count_files=count_files, + max_files=max_files, + files=files, + missing_diff_lines=missing_diff_lines, + missing_lines_for_whole_project=missing_lines_for_whole_project, + subproject_id=subproject_id, + marker=marker, + complete_project_report=complete_project_report, + ) + except jinja2.exceptions.TemplateError as exc: + raise TemplateError from exc + + if marker not in comment: + raise MissingMarker() + + return comment + + +def select_files( + *, + coverage: coverage_module.Coverage, + diff_coverage: coverage_module.DiffCoverage, + max_files: int | None, +) -> tuple[list[FileInfo], int]: + """ + Selects the MAX_FILES files with the most new missing lines sorted by path + + """ + + files = [] + for path, coverage_file in coverage.files.items(): + diff_coverage_file = diff_coverage.files.get(path) + + file_info = FileInfo( + path=path, + coverage=coverage_file, + diff=diff_coverage_file, + ) + has_diff = bool(diff_coverage_file and diff_coverage_file.added_statements) + + if has_diff: + files.append(file_info) + + count_files = len(files) + files = sorted(files, key=sort_order, reverse=True) + if max_files is not None: + files = files[:max_files] + return sorted(files, key=lambda x: x.path), count_files + + +def sort_order(file_info: FileInfo) -> tuple[int, int, int]: + """ + Sort order for files: + 1. Files with the most new missing lines + 2. Files with the most added lines (from the diff) + 3. Files with the most new executed lines (including not in the diff) + """ + new_missing_lines = len(file_info.coverage.missing_lines) + added_statements = len(file_info.diff.added_statements) if file_info.diff else 0 + new_covered_lines = len(file_info.coverage.executed_lines) + + return abs(new_missing_lines), added_statements, abs(new_covered_lines) + + +def read_template_file(template: str) -> str: + return (resources.files('codecov') / 'template_files' / template).read_text() + + +def get_file_url( + filename: pathlib.Path, + lines: tuple[int, int] | None = None, + *, + repo_name: str, + pr_number: int, +) -> str: + # To link to a file in a PR, GitHub uses the link to the file overview combined with a SHA256 hash of the file path + s = f"https://github.com/{repo_name}/pull/{pr_number}/files#diff-{hashlib.sha256(str(filename).encode('utf-8')).hexdigest()}" + + if lines is not None: + # R stands for Right side of the diff. But since we generate these links for new code we only need the right side. + s += f'R{lines[0]}-R{lines[1]}' + + return s + + +def get_file_base_url( + filename: pathlib.Path, + lines: tuple[int, int] | None = None, + *, + repo_name: str, + base_ref: str, +) -> str: + s = f'https://github.com/{repo_name}/blob/{base_ref}/{str(filename)}' + + if lines is not None: + # L stands for Left side of the diff. But since we generate these links for base code we only need the left side. + s += f'#L{lines[0]}-L{lines[1]}' + + return s diff --git a/codecov/template_files/comment.md.j2 b/codecov/template_files/comment.md.j2 new file mode 100644 index 0000000..cf09772 --- /dev/null +++ b/codecov/template_files/comment.md.j2 @@ -0,0 +1,322 @@ +{%- block title -%}## Coverage report{%- if subproject_id %} ({{ subproject_id }}){%- endif -%}{%- endblock title%} + +{# Coverage evolution badge #} +{% block coverage_badges -%} +{%- block coverage_evolution_badge -%} +{%- if coverage %} +{%- set text = "Coverage of the whole project for this PR is" ~ coverage.info.percent_covered_display ~ "." -%} +{%- set color = coverage.info.percent_covered | get_badge_color -%} + + +{%- endif -%} +{%- endblock coverage_evolution_badge -%} + +{#- Coverage diff badge -#} +{#- space #} {# space -#} +{%- block diff_coverage_badge -%} +{%- set text = (diff_coverage.total_percent_covered | pct) ~ " of the statement lines added by this PR are covered" -%} + + +{%- endblock diff_coverage_badge -%} +{%- endblock coverage_badges -%} + +{%- macro statements_badge(path, statements_count) -%} +{% set text = "The " ~ path ~ " contains " ~ statements_count ~ " statement" ~ (statements_count | pluralize) ~"." -%} +{% set color = "007ec6" -%} + + +{%- endmacro -%} + + +{%- macro missing_lines_badge(path, missing_lines_count) -%} +{%- set text = "" ~ missing_lines_count ~ " statement" ~ (statements_count | pluralize) ~ " missing the coverage in " ~ path ~ "." -%} +{%- set color = 'red' -%} + + +{%- endmacro -%} + +{%- macro coverage_rate_badge(path, percent_covered, percent_covered_display, covered_statements_count, statements_count) -%} +{%- set text = "The coverage rate of " ~ path ~ " is " ~ percent_covered_display ~ " (" ~ covered_statements_count ~ "/" ~ statements_count ~ ")." -%} +{%- set message = "(" ~ covered_statements_count ~ "/" ~ statements_count ~ ")" -%} +{%- set color = percent_covered | get_badge_color -%} + + +{%- endmacro -%} + +{%- macro diff_coverage_rate_badge(path, added_statements_count, covered_statements_count, percent_covered) -%} +{% if added_statements_count -%} +{% set text = "In this PR, " ~ (added_statements_count) ~ " new statements are added to " ~ path ~ ", " ~ covered_statements_count ~ " of which are covered (" ~ (percent_covered | pct) ~ ")." -%} +{% set label = (percent_covered | pct(precision=0)) -%} +{% set message = "(" ~ covered_statements_count ~ "/" ~ added_statements_count ~ ")" -%} +{%- set color = (percent_covered | x100 | get_badge_color()) -%} +{% else -%} +{% set text = "This PR does not seem to add statements to " ~ path ~ "." -%} +{% set label = "" -%} +{%- set color = "grey" -%} +{% set message = "N/A" -%} +{% endif -%} + + +{%- endmacro -%} + + +{# Individual file report #} +{%- block coverage_by_file -%} +{%- if not files %} + +_This PR does not seem to contain any modification to coverable code._ +{%- else -%} +
Click to see coverage of changed files + + + + +{%- for parent, files_in_folder in files|groupby(attribute="path.parent") -%} + + + +{%- for file in files_in_folder -%} +{%- set path = file.coverage.path -%} + + + +{#- Statements cell -#} +{%- block statements_badge_cell scoped -%} +{{- statements_badge( + path=path, + statements_count=file.coverage.info.num_statements, +) -}} +{%- endblock statements_badge_cell-%} + +{#- Missing cell -#} +{%- block missing_lines_badge_cell scoped -%} +{{- missing_lines_badge( + path=path, + missing_lines_count=file.coverage.info.missing_lines, +) -}} +{%- endblock missing_lines_badge_cell -%} + +{#- Coverage rate -#} +{%- block coverage_rate_badge_cell scoped -%} +{{- coverage_rate_badge( + path=path, + percent_covered=file.coverage.info.percent_covered, + percent_covered_display=file.coverage.info.percent_covered_display, + covered_statements_count=file.coverage.info.covered_lines, + statements_count=file.coverage.info.num_statements, +) -}} +{%- endblock coverage_rate_badge_cell -%} + +{#- Coverage of added lines -#} +{%- block diff_coverage_rate_badge_cell scoped -%} +{{- diff_coverage_rate_badge( + path=path, + added_statements_count=((file.diff.added_statements | length) if file.diff else none), + covered_statements_count=((file.diff.covered_statements | length) if file.diff else none), + percent_covered=(file.diff.percent_covered if file.diff else none) +) -}} +{%- endblock diff_coverage_rate_badge_cell -%} + +{#- Link to missing lines -#} +{%- block link_to_missing_diff_lines_cell scoped -%} + + +{%- endblock link_to_missing_diff_lines_cell -%} +{%- endfor -%} +{%- endfor -%} + + + + + + +{#- Statements cell -#} +{%- block statements_badge_total_cell scoped -%} +{{- statements_badge( + path="whole project", + statements_count=coverage.info.num_statements, +) -}} +{%- endblock statements_badge_total_cell -%} + +{#- Missing cell -#} +{%- block missing_lines_badge_total_cell scoped -%} +{{- missing_lines_badge( + path="the whole project", + missing_lines_count=coverage.info.missing_lines, +) -}} +{%- endblock missing_lines_badge_total_cell -%} + +{#- Coverage rate -#} +{%- block coverage_rate_badge_total_cell scoped -%} +{{- coverage_rate_badge( + path="the whole project", + percent_covered=coverage.info.percent_covered, + percent_covered_display=coverage.info.percent_covered_display, + covered_statements_count=coverage.info.covered_lines, + statements_count=coverage.info.num_statements, +) -}} +{%- endblock coverage_rate_badge_total_cell -%} + +{# Coverage of added lines #} +{%- block diff_coverage_rate_badge_total_cell scoped -%} +{{- diff_coverage_rate_badge( + path="the whole project", + added_statements_count=diff_coverage.total_num_lines, + covered_statements_count=(diff_coverage.total_num_lines-diff_coverage.total_num_violations), + percent_covered=diff_coverage.total_percent_covered, +) -}} +{%- endblock diff_coverage_rate_badge_total_cell -%} + + + + +
FileStatementsMissingCoverageCoverage
(new stmts)
Lines missing
  {{ parent }}
  {{ path.name }} + +{%- set comma = joiner() -%} +{%- for group in missing_diff_lines.get(path, []) -%} +{{- comma() -}} + + +{{- group.line_start -}} +{%- if group.line_start != group.line_end -%} +- +{{- group.line_end -}} +{%- endif -%} + + +{%- endfor -%} +
Project Total 
+ +{%- if max_files and count_files > max_files %} + +_The report is truncated to {{ max_files }} files out of {{ count_files }}. + +{% endif %} + +{%- block footer %} + + + +This report was generated by [CI-codecov] + + +
+ +{% endblock footer -%} + +{%- endif -%} +{%- endblock coverage_by_file %} + + + +{# Whole project coverage file report TODO: #} +{%- block project_coverage_by_file -%} +{%- if complete_project_report %} +
Click to see whole project coverage + + + + +{%- for path, file_coverage in coverage.files.items() -%} + + + +{#- Statements cell -#} +{%- block project_statements_badge_cell scoped -%} +{{- statements_badge( + path=path, + statements_count=file_coverage.info.num_statements, +) -}} +{%- endblock project_statements_badge_cell-%} + +{#- Missing cell -#} +{%- block project_missing_lines_badge_cell scoped -%} +{{- missing_lines_badge( + path=path, + missing_lines_count=file_coverage.info.missing_lines, +) -}} +{%- endblock project_missing_lines_badge_cell -%} + +{#- Coverage rate -#} +{%- block project_coverage_rate_badge_cell scoped -%} +{{- coverage_rate_badge( + path=path, + percent_covered=file_coverage.info.percent_covered, + covered_statements_count=file_coverage.info.covered_lines, + statements_count=file_coverage.info.num_statements, +) -}} +{%- endblock project_coverage_rate_badge_cell -%} + +{#- Link to missing lines -#} +{%- block project_link_to_missing_lines_cell scoped -%} + + +{%- endblock project_link_to_missing_lines_cell -%} +{%- endfor -%} + + + + + + +{#- Statements cell -#} +{%- block project_statements_badge_total_cell scoped -%} +{{- statements_badge( + path="the whole project", + statements_count=coverage.info.num_statements, +) -}} +{%- endblock project_statements_badge_total_cell -%} + +{#- Missing cell -#} +{%- block project_missing_lines_badge_total_cell scoped -%} +{{- missing_lines_badge( + path="the whole project", + missing_lines_count=coverage.info.missing_lines, +) -}} +{%- endblock project_missing_lines_badge_total_cell -%} + +{#- Coverage rate -#} +{%- block project_coverage_rate_badge_total_cell scoped -%} +{{- coverage_rate_badge( + path="the whole project", + percent_covered=coverage.info.percent_covered, + covered_statements_count=coverage.info.covered_lines, + statements_count=coverage.info.num_statements, +) -}} +{%- endblock project_coverage_rate_badge_total_cell -%} + + + + +
FileStatementsMissingCoverageLines missing
  {{ path }} + +{%- set comma = joiner() -%} +{%- for group in missing_lines_for_whole_project.get(path, []) -%} +{{- comma() -}} + + +{{- group.line_start -}} +{%- if group.line_start != group.line_end -%} +- +{{- group.line_end -}} +{%- endif -%} + + +{%- endfor -%} +
Project Total 
+ +{%- block project_footer %} + + + +This report was generated by [CI-codecov] + + +
+ +{% endblock project_footer -%} + +{%- endif -%} +{%- endblock project_coverage_by_file %} + +{{ marker -}} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..630d63f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,82 @@ +[project] +name = "python-coverage-comment" +version = "1.0" +description = "Create a Coverage report comment on Github PR" +readme = "README.md" +maintainers = [ + { name = "Pradeep Tammali", email = "pradeepkumartammali@gmail.com" }, +] + +[tool.autopep8] +in-place = false +max_line_length = 120 + +[tool.black] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +ignore = [ + # format + 'E501', # line too long +] +line-length = 120 +select = [ + 'E', # Errors + 'W', # Warnings + 'F', # Formatting + 'S', # Bandit +] +target-version = 'py311' + +[tool.ruff.mccabe] +max-complexity = 10 + +[tool.ruff.format] +quote-style = 'single' + +[tool.ruff.per-file-ignores] +# Ignore assert usage in tests +'tests/*.py' = ['S101'] + +[tool.isort] +profile = "black" +line_length = 120 + +[tool.flake8] +ignore = ['E501'] + +[tool.pytest.ini_options] +env = ['APP_ENVIRONMENT = unittest'] + +[tool.coverage.run] +include = ['codecov/*'] + +[tool.coverage.report] +show_missing = true +exclude_lines = ['if TYPE_CHECKING:'] + +[[tool.mypy.overrides]] +ignore_missing_imports = true +implicit_optional = true +module = 'codecov.*' + +[tool.pylint.MASTER] +ignore-paths = ['tests/*'] + +[tool.pylint.FORMAT] +indent-string = ' ' +max-line-length = 120 + +[tool.pylint."MESSAGES CONTROL"] +disable = ''' + missing-docstring, + line-too-long, + import-error, + fixme, + no-value-for-parameter, + too-few-public-methods +''' + +[tool.bandit] +exclude_dirs = ["tests/*"] diff --git a/run.py b/run.py new file mode 100644 index 0000000..6ee7211 --- /dev/null +++ b/run.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from codecov import main + + +def main_call(name): + if name == '__main__': + main.main() + + +main_call(name=__name__) From 186ee8f8bb993c79450592f1d8dfc547df77585b Mon Sep 17 00:00:00 2001 From: Pradeep Tammali Date: Thu, 25 Jan 2024 14:58:20 +0100 Subject: [PATCH 2/5] setup: project level report --- codecov/diff_grouper.py | 3 +- codecov/github.py | 3 + codecov/main.py | 9 ++- codecov/template.py | 83 +++++++++++++++++++--------- codecov/template_files/comment.md.j2 | 51 ++++++++++------- 5 files changed, 101 insertions(+), 48 deletions(-) diff --git a/codecov/diff_grouper.py b/codecov/diff_grouper.py index 44bfbd1..0b2a0fd 100644 --- a/codecov/diff_grouper.py +++ b/codecov/diff_grouper.py @@ -23,7 +23,8 @@ def get_missing_groups( } # Lines that should be considered for filling a gap, unless # they are separators. - joiners = set(range(1, coverage_file.info.num_statements)) - separators + # The first line is UTF-8 encoding declaration, which is not a separator. + joiners = set(range(2, coverage_file.info.num_statements)) - separators for start, end in groups.compute_contiguous_groups( values=coverage_file.missing_lines, diff --git a/codecov/github.py b/codecov/github.py index 64483ec..840587b 100644 --- a/codecov/github.py +++ b/codecov/github.py @@ -95,6 +95,9 @@ def post_comment( # pylint: disable=too-many-arguments contents: str, marker: str, ) -> None: + if len(contents) > 65536: + raise CannotPostComment('Comment exceeds allowed size(65536)') + issue_comments_path = github.repos(repository).issues(pr_number).comments comments_path = github.repos(repository).issues.comments diff --git a/codecov/main.py b/codecov/main.py index 8bf5ae9..63c5f52 100644 --- a/codecov/main.py +++ b/codecov/main.py @@ -90,17 +90,24 @@ def process_pr( diff_coverage = coverage_module.get_diff_coverage_info(coverage=coverage, added_lines=added_lines) marker = template.get_marker(marker_id=config.SUBPROJECT_ID) - files_info, count_files = template.select_files( + files_info, count_files, changed_files_info = template.select_changed_files( coverage=coverage, diff_coverage=diff_coverage, max_files=config.MAX_FILES_IN_COMMENT, ) + coverage_files_info, count_coverage_files = template.select_files( + coverage=coverage, + changed_files_info=changed_files_info, + max_files=config.MAX_FILES_IN_COMMENT - count_files, # Truncate the report to MAX_FILES_IN_COMMENT + ) try: comment = template.get_comment_markdown( coverage=coverage, diff_coverage=diff_coverage, files=files_info, count_files=count_files, + coverage_files=coverage_files_info, + count_coverage_files=count_coverage_files, max_files=config.MAX_FILES_IN_COMMENT, minimum_green=config.MINIMUM_GREEN, minimum_orange=config.MINIMUM_ORANGE, diff --git a/codecov/template.py b/codecov/template.py index 91e6613..e7b01ff 100644 --- a/codecov/template.py +++ b/codecov/template.py @@ -13,7 +13,8 @@ import jinja2 from jinja2.sandbox import SandboxedEnvironment -from codecov import badge, coverage as coverage_module, diff_grouper +from codecov import badge, diff_grouper +from codecov import coverage as coverage_module MARKER = """""" @@ -99,8 +100,10 @@ def get_comment_markdown( coverage: coverage_module.Coverage, diff_coverage: coverage_module.DiffCoverage, files: list[FileInfo], - max_files: int | None, count_files: int, + coverage_files: list[FileInfo], + count_coverage_files: int, + max_files: int | None, minimum_green: decimal.Decimal, minimum_orange: decimal.Decimal, repo_name: str, @@ -118,8 +121,9 @@ def get_comment_markdown( env.filters['x100'] = x100 env.filters['generate_badge'] = badge.get_static_badge_url env.filters['pluralize'] = pluralize - env.filters['file_url'] = functools.partial(get_file_url, repo_name=repo_name, pr_number=pr_number) - env.filters['file_base_url'] = functools.partial(get_file_base_url, repo_name=repo_name, base_ref=base_ref) + env.filters['file_url'] = functools.partial( + get_file_url, repo_name=repo_name, pr_number=pr_number, base_ref=base_ref + ) env.filters['get_badge_color'] = functools.partial( badge.get_badge_color, minimum_green=minimum_green, @@ -145,9 +149,11 @@ def get_comment_markdown( comment = env.get_template('base').render( coverage=coverage, diff_coverage=diff_coverage, - count_files=count_files, max_files=max_files, files=files, + count_files=count_files, + coverage_files=coverage_files, + count_coverage_files=count_coverage_files, missing_diff_lines=missing_diff_lines, missing_lines_for_whole_project=missing_lines_for_whole_project, subproject_id=subproject_id, @@ -163,14 +169,15 @@ def get_comment_markdown( return comment -def select_files( +def select_changed_files( *, coverage: coverage_module.Coverage, diff_coverage: coverage_module.DiffCoverage, max_files: int | None, -) -> tuple[list[FileInfo], int]: +) -> tuple[list[FileInfo], int, list[FileInfo]]: """ Selects the MAX_FILES files with the most new missing lines sorted by path + These are the files which have been modified in the PR """ @@ -188,11 +195,45 @@ def select_files( if has_diff: files.append(file_info) - count_files = len(files) + return sort_and_trucate_files(files=files, max_files=max_files), len(files), files + + +def select_files( + *, + coverage: coverage_module.Coverage, + changed_files_info: list[FileInfo], + max_files: int | None, +) -> tuple[list[FileInfo], int]: + """ + Selects the MAX_FILES files with the most new missing lines sorted by path + Selects the files from the whole project coverage + Selects only files which are not in changed files report + Select only files which have statements (not empty files) + """ + + files = [] + changed_files_path = [file.path for file in changed_files_info] + for path, coverage_file in coverage.files.items(): + # Don't show the report for files that have been modified in the PR + # This is gonne be covered in the changed files report + if path in changed_files_path: + continue + + # Don't show the report for files that have no statements + if coverage_file.info.num_statements == 0: + continue + + file_info = FileInfo(path=path, coverage=coverage_file, diff=None) + files.append(file_info) + + return sort_and_trucate_files(files=files, max_files=max_files), len(files) + + +def sort_and_trucate_files(files: list[FileInfo], max_files: int | None) -> list[FileInfo]: files = sorted(files, key=sort_order, reverse=True) if max_files is not None: files = files[:max_files] - return sorted(files, key=lambda x: x.path), count_files + return sorted(files, key=lambda x: x.path) def sort_order(file_info: FileInfo) -> tuple[int, int, int]: @@ -216,10 +257,18 @@ def read_template_file(template: str) -> str: def get_file_url( filename: pathlib.Path, lines: tuple[int, int] | None = None, + base: bool = False, *, repo_name: str, pr_number: int, + base_ref: str, ) -> str: + if base: + s = f'https://github.com/{repo_name}/blob/{base_ref}/{str(filename)}' + if lines is not None: + s += f'#L{lines[0]}-L{lines[1]}' + return s + # To link to a file in a PR, GitHub uses the link to the file overview combined with a SHA256 hash of the file path s = f"https://github.com/{repo_name}/pull/{pr_number}/files#diff-{hashlib.sha256(str(filename).encode('utf-8')).hexdigest()}" @@ -228,19 +277,3 @@ def get_file_url( s += f'R{lines[0]}-R{lines[1]}' return s - - -def get_file_base_url( - filename: pathlib.Path, - lines: tuple[int, int] | None = None, - *, - repo_name: str, - base_ref: str, -) -> str: - s = f'https://github.com/{repo_name}/blob/{base_ref}/{str(filename)}' - - if lines is not None: - # L stands for Left side of the diff. But since we generate these links for base code we only need the left side. - s += f'#L{lines[0]}-L{lines[1]}' - - return s diff --git a/codecov/template_files/comment.md.j2 b/codecov/template_files/comment.md.j2 index cf09772..6047375 100644 --- a/codecov/template_files/comment.md.j2 +++ b/codecov/template_files/comment.md.j2 @@ -20,26 +20,26 @@ {%- endblock diff_coverage_badge -%} {%- endblock coverage_badges -%} -{%- macro statements_badge(path, statements_count) -%} +{%- macro statements_badge(path, statements_count, base=false) -%} {% set text = "The " ~ path ~ " contains " ~ statements_count ~ " statement" ~ (statements_count | pluralize) ~"." -%} {% set color = "007ec6" -%} - + {%- endmacro -%} -{%- macro missing_lines_badge(path, missing_lines_count) -%} +{%- macro missing_lines_badge(path, missing_lines_count, base=false) -%} {%- set text = "" ~ missing_lines_count ~ " statement" ~ (statements_count | pluralize) ~ " missing the coverage in " ~ path ~ "." -%} {%- set color = 'red' -%} - + {%- endmacro -%} -{%- macro coverage_rate_badge(path, percent_covered, percent_covered_display, covered_statements_count, statements_count) -%} +{%- macro coverage_rate_badge(path, percent_covered, percent_covered_display, covered_statements_count, statements_count, base=false) -%} {%- set text = "The coverage rate of " ~ path ~ " is " ~ percent_covered_display ~ " (" ~ covered_statements_count ~ "/" ~ statements_count ~ ")." -%} {%- set message = "(" ~ covered_statements_count ~ "/" ~ statements_count ~ ")" -%} {%- set color = percent_covered | get_badge_color -%} - + {%- endmacro -%} @@ -209,41 +209,49 @@ This report was generated by [CI-codecov] -{# Whole project coverage file report TODO: #} +{# Whole project coverage file report #} {%- block project_coverage_by_file -%} {%- if complete_project_report %} +{%- if not coverage_files %} + +_No additional project files to report the coverage._ +{%- else -%}
Click to see whole project coverage -{%- for path, file_coverage in coverage.files.items() -%} +{%- for coverage_file in coverage_files -%} - + {#- Statements cell -#} {%- block project_statements_badge_cell scoped -%} {{- statements_badge( - path=path, - statements_count=file_coverage.info.num_statements, + path=coverage_file.path, + statements_count=coverage_file.coverage.info.num_statements, + base=true, ) -}} {%- endblock project_statements_badge_cell-%} {#- Missing cell -#} {%- block project_missing_lines_badge_cell scoped -%} {{- missing_lines_badge( - path=path, - missing_lines_count=file_coverage.info.missing_lines, + path=coverage_file.path, + missing_lines_count=coverage_file.coverage.info.missing_lines, + base=true, ) -}} {%- endblock project_missing_lines_badge_cell -%} -{#- Coverage rate -#} +{#- Coverage cell -#} {%- block project_coverage_rate_badge_cell scoped -%} {{- coverage_rate_badge( - path=path, - percent_covered=file_coverage.info.percent_covered, - covered_statements_count=file_coverage.info.covered_lines, - statements_count=file_coverage.info.num_statements, + path=coverage_file.path, + percent_covered=coverage_file.coverage.info.percent_covered, + percent_covered_display=coverage_file.coverage.info.percent_covered_display, + covered_statements_count=coverage_file.coverage.info.covered_lines, + statements_count=coverage_file.coverage.info.num_statements, + base=true, ) -}} {%- endblock project_coverage_rate_badge_cell -%} @@ -252,9 +260,9 @@ This report was generated by [CI-codecov] {%- endmacro -%}
FileStatementsMissingCoverageLines missing
  {{ path }}  {{ coverage_file.path }} {%- set comma = joiner() -%} -{%- for group in missing_lines_for_whole_project.get(path, []) -%} +{%- for group in missing_lines_for_whole_project.get(coverage_file.path, []) -%} {{- comma() -}} - + {{- group.line_start -}} {%- if group.line_start != group.line_end -%} @@ -277,7 +285,7 @@ This report was generated by [CI-codecov] {#- Statements cell -#} {%- block project_statements_badge_total_cell scoped -%} {{- statements_badge( - path="the whole project", + path="whole project", statements_count=coverage.info.num_statements, ) -}} {%- endblock project_statements_badge_total_cell -%} @@ -316,6 +324,7 @@ This report was generated by [CI-codecov] {% endblock project_footer -%} +{%- endif -%} {%- endif -%} {%- endblock project_coverage_by_file %} From a2051ed85fe9faa09c355b2c484ab7b21b4edafc Mon Sep 17 00:00:00 2001 From: Pradeep Tammali Date: Thu, 25 Jan 2024 14:58:21 +0100 Subject: [PATCH 3/5] fix: linter formatting and refactoring --- README.md | 2 +- codecov/badge.py | 45 ----------------------------------------- codecov/coverage.py | 2 +- codecov/diff_grouper.py | 3 +-- codecov/main.py | 4 +--- codecov/subprocess.py | 4 ++-- codecov/template.py | 16 ++++++--------- pyproject.toml | 6 +++++- 8 files changed, 17 insertions(+), 65 deletions(-) diff --git a/README.md b/README.md index 944aa96..f0497a1 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Create a Coverage report comment on Github PR To get started, follow these steps: -1. Clone the repository: +1. Clone the repository: ``` git clone ``` diff --git a/codecov/badge.py b/codecov/badge.py index 7a2c22d..abc515f 100644 --- a/codecov/badge.py +++ b/codecov/badge.py @@ -6,11 +6,8 @@ from __future__ import annotations import decimal -import json import urllib.parse -import httpx - def get_badge_color( rate: decimal.Decimal, @@ -26,50 +23,8 @@ def get_badge_color( return 'red' -def compute_badge_endpoint_data( - line_rate: decimal.Decimal, - color: str, -) -> str: - badge = { - 'schemaVersion': 1, - 'label': 'Coverage', - 'message': f'{int(line_rate)}%', - 'color': color, - } - - return json.dumps(badge) - - -def compute_badge_image(line_rate: decimal.Decimal, color: str, http_session: httpx.Client) -> str: - return http_session.get( - 'https://img.shields.io/static/v1?' - + urllib.parse.urlencode( - { - 'label': 'Coverage', - 'message': f'{int(line_rate)}%', - 'color': color, - } - ) - ).text - - def get_static_badge_url(label: str, message: str, color: str) -> str: if not color or not message: raise ValueError('color and message are required') code = '-'.join(e.replace('_', '__').replace('-', '--') for e in (label, message, color) if e) return 'https://img.shields.io/badge/' + urllib.parse.quote(f'{code}.svg') - - -def get_endpoint_url(endpoint_url: str) -> str: - return f'https://img.shields.io/endpoint?url={endpoint_url}' - - -def get_dynamic_url(endpoint_url: str) -> str: - return 'https://img.shields.io/badge/dynamic/json?' + urllib.parse.urlencode( - { - 'color': 'brightgreen', - 'label': 'coverage', - 'query': '$.message', - 'url': endpoint_url, - } - ) diff --git a/codecov/coverage.py b/codecov/coverage.py index 8c2830e..973002f 100644 --- a/codecov/coverage.py +++ b/codecov/coverage.py @@ -23,7 +23,7 @@ class CoverageMetadata: @dataclasses.dataclass -class CoverageInfo: +class CoverageInfo: # pylint: disable=too-many-instance-attributes covered_lines: int num_statements: int percent_covered: decimal.Decimal diff --git a/codecov/diff_grouper.py b/codecov/diff_grouper.py index 0b2a0fd..a3f92f9 100644 --- a/codecov/diff_grouper.py +++ b/codecov/diff_grouper.py @@ -3,8 +3,7 @@ from collections.abc import Iterable -from codecov import coverage as coverage_module -from codecov import groups +from codecov import coverage as coverage_module, groups MAX_ANNOTATION_GAP = 3 diff --git a/codecov/main.py b/codecov/main.py index 63c5f52..a644fd6 100644 --- a/codecov/main.py +++ b/codecov/main.py @@ -7,8 +7,6 @@ from codecov import ( coverage as coverage_module, -) -from codecov import ( diff_grouper, github, github_client, @@ -74,7 +72,7 @@ def action(config: settings.Config, github_session: httpx.Client, git: subproces ) -def process_pr( +def process_pr( # pylint: disable=too-many-locals config: settings.Config, gh: github_client.GitHub, repo_info: github.RepositoryInfo, diff --git a/codecov/subprocess.py b/codecov/subprocess.py index 685c9ed..538b5e8 100644 --- a/codecov/subprocess.py +++ b/codecov/subprocess.py @@ -5,7 +5,7 @@ import os import pathlib import shlex -import subprocess +import subprocess # nosec: B404:blacklist from typing import Any from codecov import log @@ -22,7 +22,7 @@ class GitError(SubProcessError): def run(*args, path: pathlib.Path, **kwargs) -> str: try: return subprocess.run( - [shlex.quote(arg) for arg in args], # noqa: S603 + [shlex.quote(arg) for arg in args], # nosec: B603:subprocess_without_shell_equals_true # noqa: S603 cwd=path, text=True, # Only relates to DecodeErrors while decoding the output diff --git a/codecov/template.py b/codecov/template.py index e7b01ff..3165022 100644 --- a/codecov/template.py +++ b/codecov/template.py @@ -13,8 +13,7 @@ import jinja2 from jinja2.sandbox import SandboxedEnvironment -from codecov import badge, diff_grouper -from codecov import coverage as coverage_module +from codecov import badge, coverage as coverage_module, diff_grouper MARKER = """""" @@ -27,7 +26,9 @@ class CommentLoader(jinja2.BaseLoader): def __init__(self, base_template: str): self.base_template = base_template - def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str | None, Callable[..., bool]]: + # fmt: off + def get_source(self, environment: jinja2.Environment, template: str) -> tuple[str, str | None, Callable[..., bool]]: # pylint: disable=unused-argument + # fmt: on if template == 'base': return ( self.base_template, @@ -61,10 +62,6 @@ def sign(val: int | decimal.Decimal) -> str: return '+' if val > 0 else '' if val < 0 else '±' -def delta(val: int) -> str: - return f'({sign(val)}{val})' - - def remove_exponent(val: decimal.Decimal) -> decimal.Decimal: # From https://docs.python.org/3/library/decimal.html#decimal-faq return val.quantize(decimal.Decimal(1)) if val == val.to_integral() else val.normalize() @@ -95,7 +92,7 @@ class FileInfo: diff: coverage_module.FileDiffCoverage | None -def get_comment_markdown( +def get_comment_markdown( # pylint: disable=too-many-arguments,too-many-locals *, coverage: coverage_module.Coverage, diff_coverage: coverage_module.DiffCoverage, @@ -117,7 +114,6 @@ def get_comment_markdown( loader = CommentLoader(base_template=base_template) env = SandboxedEnvironment(loader=loader) env.filters['pct'] = pct - env.filters['delta'] = delta env.filters['x100'] = x100 env.filters['generate_badge'] = badge.get_static_badge_url env.filters['pluralize'] = pluralize @@ -254,7 +250,7 @@ def read_template_file(template: str) -> str: return (resources.files('codecov') / 'template_files' / template).read_text() -def get_file_url( +def get_file_url( # pylint: disable=too-many-arguments filename: pathlib.Path, lines: tuple[int, int] | None = None, base: bool = False, diff --git a/pyproject.toml b/pyproject.toml index 630d63f..c14394c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,9 @@ target-version = 'py311' [tool.ruff.mccabe] max-complexity = 10 +[tool.ruff.lint.isort] +combine-as-imports = true + [tool.ruff.format] quote-style = 'single' @@ -42,9 +45,10 @@ quote-style = 'single' [tool.isort] profile = "black" line_length = 120 +combine_as_imports = true [tool.flake8] -ignore = ['E501'] +ignore = ['E501', 'W503', 'W504'] [tool.pytest.ini_options] env = ['APP_ENVIRONMENT = unittest'] From e977b98ff7fe01ff9a60b260502f0214b2b458c8 Mon Sep 17 00:00:00 2001 From: Pradeep Tammali Date: Thu, 25 Jan 2024 16:16:29 +0100 Subject: [PATCH 4/5] test: test cases --- tests/__init__.py | 0 tests/conftest.py | 0 tests/test_badge.py | 51 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/test_badge.py diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_badge.py b/tests/test_badge.py new file mode 100644 index 0000000..cc966ab --- /dev/null +++ b/tests/test_badge.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +from __future__ import annotations + +import decimal + +import pytest + +from codecov import badge + + +@pytest.mark.parametrize( + 'rate, expected', + [ + (decimal.Decimal('10'), 'red'), + (decimal.Decimal('80'), 'orange'), + (decimal.Decimal('99'), 'brightgreen'), + ], +) +def test_get_badge_color(rate, expected): + color = badge.get_badge_color( + rate=rate, + minimum_green=decimal.Decimal('90'), + minimum_orange=decimal.Decimal('60'), + ) + assert color == expected + + +def test_get_static_badge_url(): + result = badge.get_static_badge_url(label='a-b', message='c_d e', color='green') + + assert result == 'https://img.shields.io/badge/a--b-c__d%20e-green.svg' + + +@pytest.mark.parametrize( + 'label, message, color', + [ + ( + 'Label', + '', + 'brightgreen', + ), + ( + 'Label', + '100% > 99%', + '', + ), + ], +) +def test_get_static_badge_url__error(label, message, color): + with pytest.raises(ValueError): + badge.get_static_badge_url(label=label, message=message, color=color) From 9cb76915b8939af159c9ed7f2bf1a1555ffb729a Mon Sep 17 00:00:00 2001 From: Pradeep Tammali Date: Thu, 25 Jan 2024 16:27:51 +0100 Subject: [PATCH 5/5] refactor: refactoring template --- .markdownlint.yaml | 9 +++ .pre-commit-config.yaml | 5 ++ Makefile | 2 +- README.md | 88 ++++++++++++++++++++++------ codecov/main.py | 2 +- codecov/template.py | 2 +- codecov/template_files/comment.md.j2 | 10 +++- 7 files changed, 95 insertions(+), 23 deletions(-) create mode 100644 .markdownlint.yaml diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 0000000..a8ade53 --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,9 @@ +default: true + +MD013: + # Number of characters + line_length: 120 + # Number of characters for headings + heading_line_length: 80 + # Number of characters for code blocks + code_block_line_length: 120 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9545941..7b1b397 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,6 +32,11 @@ repos: - id: python-check-mock-methods - id: python-no-eval - id: python-no-log-warn +- repo: https://github.com/igorshubovych/markdownlint-cli + rev: v0.38.0 + hooks: + - id: markdownlint-fix + args: ['--config', '.markdownlint.yaml'] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.1.6 hooks: diff --git a/Makefile b/Makefile index 72636de..0b15e88 100644 --- a/Makefile +++ b/Makefile @@ -34,6 +34,6 @@ clean-lint: pipenv run pre-commit clean pipenv run pre-commit gc -all: setup-pipenv setup lint run +all: setup-pipenv setup lint clean: clean-lint clean-setup diff --git a/README.md b/README.md index f0497a1..483c87c 100644 --- a/README.md +++ b/README.md @@ -1,57 +1,111 @@ # python-coverage-comment -Create a Coverage report comment on Github PR - +Create a Coverage report comment on Github PR ## Setting up Local Environment using Pipenv To get started, follow these steps: 1. Clone the repository: - ``` + + ```bash git clone ``` 2. Navigate to the cloned repository: - ``` + + ```bash cd ``` 3. Build the project: - ``` + + ```bash make all ``` -That's it! You have successfully cloned the repository and built the project. +4. **Export the required environment variables**: -## Custom Installation: + ```bash + export GITHUB_REPOSITORY= + export COVERAGE_PATH= + export GITHUB_TOKEN= + export GITHUB_PR_NUMBER= + ``` -1. Install Python: Make sure you have Python installed on your system. You can download and install Python from the official Python website. +5. **Run the action**: -2. Install Pipenv: Pipenv is a package manager that combines pip and virtualenv. You can install Pipenv using pip, the Python package installer. Open your terminal or command prompt and run the following command: + ```bash + make run ``` + +## Required Environment Variables + +- `GITHUB_REPOSITORY`: The name of the GitHub repository where the action is running. +- `COVERAGE_PATH`: The path to the coverage report file. (JSON format) +- `GITHUB_TOKEN`: The GitHub token used for authentication. +- `GITHUB_PR_NUMBER`: The number of the pull request where the action is running. (Optional) +- `GITHUB_REF`: The branch to run the action on. If not provided, it will be used to get the PR number. (Optional) + +Note: Either `GITHUB_PR_NUMBER` or `GITHUB_REF` is required. + +## Optional Environment Variables + +- `GITHUB_BASE_REF`: The base branch for the pull request. Default is `main`. +- `SUBPROJECT_ID`: The ID of the subproject. (Optional) +- `MINIMUM_GREEN`: The minimum coverage percentage for green status. Default is 100. +- `MINIMUM_ORANGE`: The minimum coverage percentage for orange status. Default is 70. +- `ANNOTATE_MISSING_LINES`: Whether to annotate missing lines in the coverage report. Default is False. +- `ANNOTATION_TYPE`: The type of annotation to use for missing lines. Default is 'warning'. +- `MAX_FILES_IN_COMMENT`: The maximum number of files to include in the coverage report comment. Default is 25. +- `COMPLETE_PROJECT_REPORT`: Whether to include the complete project coverage report in the comment. Default is False. +- `DEBUG`: Whether to enable debug mode. Default is False. + +That's it! You have successfully cloned the repository and built the project. + +## Custom Installation + +1. Install Python: Make sure you have Python installed on your system. +You can download and install Python from the official Python website. + +2. Install Pipenv: Pipenv is a package manager that combines pip and virtualenv. +You can install Pipenv using pip, the Python package installer. +Open your terminal or command prompt and run the following command: + + ```bash pip install pipenv ``` -4. Install project dependencies: To install the project dependencies specified in the Pipfile, run the following command: - ``` +3. Install project dependencies: +To install the project dependencies specified in the Pipfile, run the following command: + + ```bash pipenv install --dev ``` -5. Activate the virtual environment: To activate the virtual environment created by Pipenv, run the following command: - ``` +4. Activate the virtual environment: +To activate the virtual environment created by Pipenv, run the following command: + + ```bash pipenv shell ``` -6. Run your project: You can now run your project using the activated virtual environment. For example, if your project has a run.py file, you can run it using the following command: - ``` +5. Run your project: +You can now run your project using the activated virtual environment. +For example, if your project has a run.py file, you can run it using the following command: + + ```bash python run.py ``` -7. Install pre-commit hooks: To set up pre-commit hooks for your project, run the following command: - ``` +6. Install pre-commit hooks: To set up pre-commit hooks for your project, run the following command: + + ```bash pipenv run pre-commit install ``` + This will install and configure pre-commit hooks that will run before each commit to enforce code quality and style standards. That's it! You have successfully set up your local environment using Pipenv. + +This project is inspired by the concepts of [py-cov-action/python-coverage-comment-action](https://github.com/py-cov-action/python-coverage-comment-action.git). diff --git a/codecov/main.py b/codecov/main.py index a644fd6..5926bed 100644 --- a/codecov/main.py +++ b/codecov/main.py @@ -22,7 +22,7 @@ def main(): try: config = settings.Config.from_environ(environ=os.environ) - logging.basicConfig(level='DEBUG') + logging.basicConfig(level='DEBUG' if config.DEBUG else 'INFO') logging.getLogger().handlers[0].formatter = ( log_utils.ConsoleFormatter() if config.DEBUG else log_utils.GitHubFormatter() ) diff --git a/codecov/template.py b/codecov/template.py index 3165022..fc27071 100644 --- a/codecov/template.py +++ b/codecov/template.py @@ -71,7 +71,7 @@ def percentage_value(val: decimal.Decimal, precision: int = 2) -> decimal.Decima return remove_exponent( (decimal.Decimal('100') * val).quantize( decimal.Decimal('1.' + ('0' * precision)), - rounding=decimal.ROUND_DOWN, + rounding=decimal.ROUND_CEILING, ) ) diff --git a/codecov/template_files/comment.md.j2 b/codecov/template_files/comment.md.j2 index 6047375..7f030a7 100644 --- a/codecov/template_files/comment.md.j2 +++ b/codecov/template_files/comment.md.j2 @@ -6,7 +6,7 @@ {%- if coverage %} {%- set text = "Coverage of the whole project for this PR is" ~ coverage.info.percent_covered_display ~ "." -%} {%- set color = coverage.info.percent_covered | get_badge_color -%} - + {%- endif -%} {%- endblock coverage_evolution_badge -%} @@ -29,8 +29,12 @@ {%- macro missing_lines_badge(path, missing_lines_count, base=false) -%} -{%- set text = "" ~ missing_lines_count ~ " statement" ~ (statements_count | pluralize) ~ " missing the coverage in " ~ path ~ "." -%} -{%- set color = 'red' -%} +{%- set text = missing_lines_count ~ " statement" ~ (statements_count | pluralize) ~ " missing the coverage in " ~ path ~ "." -%} +{% if missing_lines_count == 0 -%} +{%- set color = "brightgreen" -%} +{% else -%} +{%- set color = "red" -%} +{% endif -%}