diff --git a/newsfragments/5139.feature.rst b/newsfragments/5139.feature.rst new file mode 100644 index 0000000000..aed3df5b74 --- /dev/null +++ b/newsfragments/5139.feature.rst @@ -0,0 +1 @@ +Refreshed vendored dependencies. diff --git a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/REQUESTED b/setuptools/_vendor/.lock similarity index 100% rename from setuptools/_vendor/importlib_metadata-8.0.0.dist-info/REQUESTED rename to setuptools/_vendor/.lock diff --git a/setuptools/_vendor/autocommand-2.2.2.dist-info/INSTALLER b/setuptools/_vendor/autocommand-2.2.2.dist-info/INSTALLER index a1b589e38a..5c69047b2e 100644 --- a/setuptools/_vendor/autocommand-2.2.2.dist-info/INSTALLER +++ b/setuptools/_vendor/autocommand-2.2.2.dist-info/INSTALLER @@ -1 +1 @@ -pip +uv \ No newline at end of file diff --git a/setuptools/_vendor/autocommand-2.2.2.dist-info/RECORD b/setuptools/_vendor/autocommand-2.2.2.dist-info/RECORD index e6e12ea51e..8cd7450980 100644 --- a/setuptools/_vendor/autocommand-2.2.2.dist-info/RECORD +++ b/setuptools/_vendor/autocommand-2.2.2.dist-info/RECORD @@ -1,18 +1,13 @@ -autocommand-2.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -autocommand-2.2.2.dist-info/LICENSE,sha256=reeNBJgtaZctREqOFKlPh6IzTdOFXMgDSOqOJAqg3y0,7634 -autocommand-2.2.2.dist-info/METADATA,sha256=OADZuR3O6iBlpu1ieTgzYul6w4uOVrk0P0BO5TGGAJk,15006 -autocommand-2.2.2.dist-info/RECORD,, -autocommand-2.2.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 -autocommand-2.2.2.dist-info/top_level.txt,sha256=AzfhgKKS8EdAwWUTSF8mgeVQbXOY9kokHB6kSqwwqu0,12 -autocommand/__init__.py,sha256=zko5Rnvolvb-UXjCx_2ArPTGBWwUK5QY4LIQIKYR7As,1037 -autocommand/__pycache__/__init__.cpython-312.pyc,, -autocommand/__pycache__/autoasync.cpython-312.pyc,, -autocommand/__pycache__/autocommand.cpython-312.pyc,, -autocommand/__pycache__/automain.cpython-312.pyc,, -autocommand/__pycache__/autoparse.cpython-312.pyc,, -autocommand/__pycache__/errors.cpython-312.pyc,, -autocommand/autoasync.py,sha256=AMdyrxNS4pqWJfP_xuoOcImOHWD-qT7x06wmKN1Vp-U,5680 -autocommand/autocommand.py,sha256=hmkEmQ72HtL55gnURVjDOnsfYlGd5lLXbvT4KG496Qw,2505 -autocommand/automain.py,sha256=A2b8i754Mxc_DjU9WFr6vqYDWlhz0cn8miu8d8EsxV8,2076 -autocommand/autoparse.py,sha256=WVWmZJPcbzUKXP40raQw_0HD8qPJ2V9VG1eFFmmnFxw,11642 -autocommand/errors.py,sha256=7aa3roh9Herd6nIKpQHNWEslWE8oq7GiHYVUuRqORnA,886 +autocommand-2.2.2.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +autocommand-2.2.2.dist-info/LICENSE,sha256=reeNBJgtaZctREqOFKlPh6IzTdOFXMgDSOqOJAqg3y0,7634 +autocommand-2.2.2.dist-info/METADATA,sha256=OADZuR3O6iBlpu1ieTgzYul6w4uOVrk0P0BO5TGGAJk,15006 +autocommand-2.2.2.dist-info/RECORD,, +autocommand-2.2.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +autocommand-2.2.2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +autocommand-2.2.2.dist-info/top_level.txt,sha256=AzfhgKKS8EdAwWUTSF8mgeVQbXOY9kokHB6kSqwwqu0,12 +autocommand/__init__.py,sha256=zko5Rnvolvb-UXjCx_2ArPTGBWwUK5QY4LIQIKYR7As,1037 +autocommand/autoasync.py,sha256=AMdyrxNS4pqWJfP_xuoOcImOHWD-qT7x06wmKN1Vp-U,5680 +autocommand/autocommand.py,sha256=hmkEmQ72HtL55gnURVjDOnsfYlGd5lLXbvT4KG496Qw,2505 +autocommand/automain.py,sha256=A2b8i754Mxc_DjU9WFr6vqYDWlhz0cn8miu8d8EsxV8,2076 +autocommand/autoparse.py,sha256=WVWmZJPcbzUKXP40raQw_0HD8qPJ2V9VG1eFFmmnFxw,11642 +autocommand/errors.py,sha256=7aa3roh9Herd6nIKpQHNWEslWE8oq7GiHYVUuRqORnA,886 diff --git a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/REQUESTED b/setuptools/_vendor/autocommand-2.2.2.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/jaraco.collections-5.1.0.dist-info/REQUESTED rename to setuptools/_vendor/autocommand-2.2.2.dist-info/REQUESTED diff --git a/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/INSTALLER b/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/INSTALLER index a1b589e38a..5c69047b2e 100644 --- a/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/INSTALLER +++ b/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/INSTALLER @@ -1 +1 @@ -pip +uv \ No newline at end of file diff --git a/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/RECORD b/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/RECORD index 536dc2f09e..c7d2c25bc1 100644 --- a/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/RECORD +++ b/setuptools/_vendor/backports.tarfile-1.2.0.dist-info/RECORD @@ -1,17 +1,12 @@ -backports.tarfile-1.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -backports.tarfile-1.2.0.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -backports.tarfile-1.2.0.dist-info/METADATA,sha256=ghXFTq132dxaEIolxr3HK1mZqm9iyUmaRANZQSr6WlE,2020 -backports.tarfile-1.2.0.dist-info/RECORD,, -backports.tarfile-1.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -backports.tarfile-1.2.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 -backports.tarfile-1.2.0.dist-info/top_level.txt,sha256=cGjaLMOoBR1FK0ApojtzWVmViTtJ7JGIK_HwXiEsvtU,10 -backports/__init__.py,sha256=iOEMwnlORWezdO8-2vxBIPSR37D7JGjluZ8f55vzxls,81 -backports/__pycache__/__init__.cpython-312.pyc,, -backports/tarfile/__init__.py,sha256=Pwf2qUIfB0SolJPCKcx3vz3UEu_aids4g4sAfxy94qg,108491 -backports/tarfile/__main__.py,sha256=Yw2oGT1afrz2eBskzdPYL8ReB_3liApmhFkN2EbDmc4,59 -backports/tarfile/__pycache__/__init__.cpython-312.pyc,, -backports/tarfile/__pycache__/__main__.cpython-312.pyc,, -backports/tarfile/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -backports/tarfile/compat/__pycache__/__init__.cpython-312.pyc,, -backports/tarfile/compat/__pycache__/py38.cpython-312.pyc,, -backports/tarfile/compat/py38.py,sha256=iYkyt_gvWjLzGUTJD9TuTfMMjOk-ersXZmRlvQYN2qE,568 +backports.tarfile-1.2.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +backports.tarfile-1.2.0.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 +backports.tarfile-1.2.0.dist-info/METADATA,sha256=ghXFTq132dxaEIolxr3HK1mZqm9iyUmaRANZQSr6WlE,2020 +backports.tarfile-1.2.0.dist-info/RECORD,, +backports.tarfile-1.2.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +backports.tarfile-1.2.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +backports.tarfile-1.2.0.dist-info/top_level.txt,sha256=cGjaLMOoBR1FK0ApojtzWVmViTtJ7JGIK_HwXiEsvtU,10 +backports/__init__.py,sha256=iOEMwnlORWezdO8-2vxBIPSR37D7JGjluZ8f55vzxls,81 +backports/tarfile/__init__.py,sha256=Pwf2qUIfB0SolJPCKcx3vz3UEu_aids4g4sAfxy94qg,108491 +backports/tarfile/__main__.py,sha256=Yw2oGT1afrz2eBskzdPYL8ReB_3liApmhFkN2EbDmc4,59 +backports/tarfile/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +backports/tarfile/compat/py38.py,sha256=iYkyt_gvWjLzGUTJD9TuTfMMjOk-ersXZmRlvQYN2qE,568 diff --git a/setuptools/_vendor/bin/wheel b/setuptools/_vendor/bin/wheel new file mode 100755 index 0000000000..fa5a76906b --- /dev/null +++ b/setuptools/_vendor/bin/wheel @@ -0,0 +1,10 @@ +#!/Users/jaraco/code/pypa/setuptools/.tox/vendor/bin/python3 +# -*- coding: utf-8 -*- +import sys +from wheel.cli import main +if __name__ == "__main__": + if sys.argv[0].endswith("-script.pyw"): + sys.argv[0] = sys.argv[0][:-11] + elif sys.argv[0].endswith(".exe"): + sys.argv[0] = sys.argv[0][:-4] + sys.exit(main()) diff --git a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/INSTALLER b/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/LICENSE b/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/RECORD b/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/RECORD deleted file mode 100644 index 07b7dc51db..0000000000 --- a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/RECORD +++ /dev/null @@ -1,32 +0,0 @@ -importlib_metadata-8.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -importlib_metadata-8.0.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 -importlib_metadata-8.0.0.dist-info/METADATA,sha256=anuQ7_7h4J1bSEzfcjIBakPi2cyVQ7y7jklLHsBeH1k,4648 -importlib_metadata-8.0.0.dist-info/RECORD,, -importlib_metadata-8.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_metadata-8.0.0.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91 -importlib_metadata-8.0.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19 -importlib_metadata/__init__.py,sha256=tZNB-23h8Bixi9uCrQqj9Yf0aeC--Josdy3IZRIQeB0,33798 -importlib_metadata/__pycache__/__init__.cpython-312.pyc,, -importlib_metadata/__pycache__/_adapters.cpython-312.pyc,, -importlib_metadata/__pycache__/_collections.cpython-312.pyc,, -importlib_metadata/__pycache__/_compat.cpython-312.pyc,, -importlib_metadata/__pycache__/_functools.cpython-312.pyc,, -importlib_metadata/__pycache__/_itertools.cpython-312.pyc,, -importlib_metadata/__pycache__/_meta.cpython-312.pyc,, -importlib_metadata/__pycache__/_text.cpython-312.pyc,, -importlib_metadata/__pycache__/diagnose.cpython-312.pyc,, -importlib_metadata/_adapters.py,sha256=rIhWTwBvYA1bV7i-5FfVX38qEXDTXFeS5cb5xJtP3ks,2317 -importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743 -importlib_metadata/_compat.py,sha256=73QKrN9KNoaZzhbX5yPCCZa-FaALwXe8TPlDR72JgBU,1314 -importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895 -importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068 -importlib_metadata/_meta.py,sha256=nxZ7C8GVlcBFAKWyVOn_dn7ot_twBcbm1NmvjIetBHI,1801 -importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166 -importlib_metadata/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -importlib_metadata/compat/__pycache__/__init__.cpython-312.pyc,, -importlib_metadata/compat/__pycache__/py311.cpython-312.pyc,, -importlib_metadata/compat/__pycache__/py39.cpython-312.pyc,, -importlib_metadata/compat/py311.py,sha256=uqm-K-uohyj1042TH4a9Er_I5o7667DvulcD-gC_fSA,608 -importlib_metadata/compat/py39.py,sha256=cPkMv6-0ilK-0Jw_Tkn0xYbOKJZc4WJKQHow0c2T44w,1102 -importlib_metadata/diagnose.py,sha256=nkSRMiowlmkhLYhKhvCg9glmt_11Cox-EmLzEbqYTa8,379 -importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/setuptools/_vendor/packaging-24.2.dist-info/INSTALLER b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/INSTALLER similarity index 100% rename from setuptools/_vendor/packaging-24.2.dist-info/INSTALLER rename to setuptools/_vendor/importlib_metadata-8.7.1.dist-info/INSTALLER diff --git a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/METADATA b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/METADATA similarity index 71% rename from setuptools/_vendor/importlib_metadata-8.0.0.dist-info/METADATA rename to setuptools/_vendor/importlib_metadata-8.7.1.dist-info/METADATA index 85513e8a9f..9d894b433d 100644 --- a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/METADATA +++ b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/METADATA @@ -1,41 +1,45 @@ -Metadata-Version: 2.1 +Metadata-Version: 2.4 Name: importlib_metadata -Version: 8.0.0 +Version: 8.7.1 Summary: Read metadata from Python packages Author-email: "Jason R. Coombs" +License-Expression: Apache-2.0 Project-URL: Source, https://github.com/python/importlib_metadata Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: Apache Software License Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3 :: Only -Requires-Python: >=3.8 +Requires-Python: >=3.9 Description-Content-Type: text/x-rst License-File: LICENSE -Requires-Dist: zipp >=0.5 -Requires-Dist: typing-extensions >=3.6.4 ; python_version < "3.8" +Requires-Dist: zipp>=3.20 +Provides-Extra: test +Requires-Dist: pytest!=8.1.*,>=6; extra == "test" +Requires-Dist: packaging; extra == "test" +Requires-Dist: pyfakefs; extra == "test" +Requires-Dist: flufl.flake8; extra == "test" +Requires-Dist: pytest-perf>=0.9.2; extra == "test" +Requires-Dist: jaraco.test>=5.4; extra == "test" Provides-Extra: doc -Requires-Dist: sphinx >=3.5 ; extra == 'doc' -Requires-Dist: jaraco.packaging >=9.3 ; extra == 'doc' -Requires-Dist: rst.linker >=1.9 ; extra == 'doc' -Requires-Dist: furo ; extra == 'doc' -Requires-Dist: sphinx-lint ; extra == 'doc' -Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'doc' +Requires-Dist: sphinx>=3.5; extra == "doc" +Requires-Dist: jaraco.packaging>=9.3; extra == "doc" +Requires-Dist: rst.linker>=1.9; extra == "doc" +Requires-Dist: furo; extra == "doc" +Requires-Dist: sphinx-lint; extra == "doc" +Requires-Dist: jaraco.tidelift>=1.4; extra == "doc" Provides-Extra: perf -Requires-Dist: ipython ; extra == 'perf' -Provides-Extra: test -Requires-Dist: pytest !=8.1.*,>=6 ; extra == 'test' -Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'test' -Requires-Dist: pytest-cov ; extra == 'test' -Requires-Dist: pytest-mypy ; extra == 'test' -Requires-Dist: pytest-enabler >=2.2 ; extra == 'test' -Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'test' -Requires-Dist: packaging ; extra == 'test' -Requires-Dist: pyfakefs ; extra == 'test' -Requires-Dist: flufl.flake8 ; extra == 'test' -Requires-Dist: pytest-perf >=0.9.2 ; extra == 'test' -Requires-Dist: jaraco.test >=5.4 ; extra == 'test' -Requires-Dist: importlib-resources >=1.3 ; (python_version < "3.9") and extra == 'test' +Requires-Dist: ipython; extra == "perf" +Provides-Extra: check +Requires-Dist: pytest-checkdocs>=2.4; extra == "check" +Requires-Dist: pytest-ruff>=0.2.1; sys_platform != "cygwin" and extra == "check" +Provides-Extra: cover +Requires-Dist: pytest-cov; extra == "cover" +Provides-Extra: enabler +Requires-Dist: pytest-enabler>=3.4; extra == "enabler" +Provides-Extra: type +Requires-Dist: pytest-mypy>=1.0.1; extra == "type" +Requires-Dist: mypy<1.19; platform_python_implementation == "PyPy" and extra == "type" +Dynamic: license-file .. image:: https://img.shields.io/pypi/v/importlib_metadata.svg :target: https://pypi.org/project/importlib_metadata @@ -46,14 +50,14 @@ Requires-Dist: importlib-resources >=1.3 ; (python_version < "3.9") and extra == :target: https://github.com/python/importlib_metadata/actions?query=workflow%3A%22tests%22 :alt: tests -.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json :target: https://github.com/astral-sh/ruff :alt: Ruff .. image:: https://readthedocs.org/projects/importlib-metadata/badge/?version=latest :target: https://importlib-metadata.readthedocs.io/en/latest/?badge=latest -.. image:: https://img.shields.io/badge/skeleton-2024-informational +.. image:: https://img.shields.io/badge/skeleton-2025-informational :target: https://blog.jaraco.com/skeleton .. image:: https://tidelift.com/badges/package/pypi/importlib-metadata diff --git a/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/RECORD b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/RECORD new file mode 100644 index 0000000000..78aa761cab --- /dev/null +++ b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/RECORD @@ -0,0 +1,21 @@ +importlib_metadata-8.7.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +importlib_metadata-8.7.1.dist-info/METADATA,sha256=o-OLnuQyYonUhkcE8w4pnudp4jCc6fSnXw3hpQrQo1Y,4670 +importlib_metadata-8.7.1.dist-info/RECORD,, +importlib_metadata-8.7.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_metadata-8.7.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +importlib_metadata-8.7.1.dist-info/licenses/LICENSE,sha256=RYUC4S2Xu_ZEOGBqIARKqF6wX7CoqAe7NdvsJT_R_AQ,10278 +importlib_metadata-8.7.1.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19 +importlib_metadata/__init__.py,sha256=u7Ew4-UkpzNY-ka6k-WRkDhQZS1akkLMfWs2eEnUmGo,37734 +importlib_metadata/_adapters.py,sha256=r5i8XLrKT6xmrpoREZhZrfczOYDmrVZeJBW5u0HzIGU,3797 +importlib_metadata/_collections.py,sha256=CxAhzlF3g1rwu_fMiB53JtRQiUFh0RgiMpoOvmK_ocg,760 +importlib_metadata/_compat.py,sha256=VC5ZDLlT-BcshauCShdFJvMNLntJJfZzNK1meGa-enw,1313 +importlib_metadata/_functools.py,sha256=0pA2OoiVK6wnsGq8HvVIzgdkvLiZ0nfnfw7IsndjoHk,3510 +importlib_metadata/_itertools.py,sha256=nMvp9SfHAQ_JYwK4L2i64lr3GRXGlYlikGTVzWbys_E,5351 +importlib_metadata/_meta.py,sha256=EtHyiJ5kGzWFDfKyQ2XQp6Vu113CeadKW1Vf6aGc1B4,1765 +importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166 +importlib_metadata/_typing.py,sha256=EQKhhsEgz_Sa-FnePI-faC72rNOOQwopjA1i5pG8FDU,367 +importlib_metadata/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +importlib_metadata/compat/py311.py,sha256=uqm-K-uohyj1042TH4a9Er_I5o7667DvulcD-gC_fSA,608 +importlib_metadata/compat/py39.py,sha256=J3W7PUVRPNYMmcvT12RF8ndBU9e8_T0Ac4U87Bsrq70,1187 +importlib_metadata/diagnose.py,sha256=nkSRMiowlmkhLYhKhvCg9glmt_11Cox-EmLzEbqYTa8,379 +importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/REQUESTED b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/jaraco.text-3.12.1.dist-info/REQUESTED rename to setuptools/_vendor/importlib_metadata-8.7.1.dist-info/REQUESTED diff --git a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/WHEEL b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/WHEEL similarity index 65% rename from setuptools/_vendor/importlib_metadata-8.0.0.dist-info/WHEEL rename to setuptools/_vendor/importlib_metadata-8.7.1.dist-info/WHEEL index edf4ec7c70..e7fa31b6f3 100644 --- a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/WHEEL +++ b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: setuptools (70.1.1) +Generator: setuptools (80.9.0) Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/licenses/LICENSE b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000..5c1d8bbc02 --- /dev/null +++ b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/licenses/LICENSE @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright 2025 [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/setuptools/_vendor/importlib_metadata-8.0.0.dist-info/top_level.txt b/setuptools/_vendor/importlib_metadata-8.7.1.dist-info/top_level.txt similarity index 100% rename from setuptools/_vendor/importlib_metadata-8.0.0.dist-info/top_level.txt rename to setuptools/_vendor/importlib_metadata-8.7.1.dist-info/top_level.txt diff --git a/setuptools/_vendor/importlib_metadata/__init__.py b/setuptools/_vendor/importlib_metadata/__init__.py index ed4813551a..508b02e4fc 100644 --- a/setuptools/_vendor/importlib_metadata/__init__.py +++ b/setuptools/_vendor/importlib_metadata/__init__.py @@ -1,44 +1,52 @@ +""" +APIs exposing metadata from third-party Python packages. + +This codebase is shared between importlib.metadata in the stdlib +and importlib_metadata in PyPI. See +https://github.com/python/importlib_metadata/wiki/Development-Methodology +for more detail. +""" + from __future__ import annotations -import os -import re import abc -import sys -import json -import zipp +import collections import email -import types -import inspect -import pathlib -import operator -import textwrap import functools import itertools +import operator +import os +import pathlib import posixpath -import collections +import re +import sys +import textwrap +import types +from collections.abc import Iterable, Mapping +from contextlib import suppress +from importlib import import_module +from importlib.abc import MetaPathFinder +from itertools import starmap +from typing import Any from . import _meta -from .compat import py39, py311 from ._collections import FreezableDefaultDict, Pair from ._compat import ( NullFinder, install, ) -from ._functools import method_cache, pass_none -from ._itertools import always_iterable, unique_everseen +from ._functools import method_cache, noop, pass_none, passthrough +from ._itertools import always_iterable, bucket, unique_everseen from ._meta import PackageMetadata, SimplePath - -from contextlib import suppress -from importlib import import_module -from importlib.abc import MetaPathFinder -from itertools import starmap -from typing import Any, Iterable, List, Mapping, Match, Optional, Set, cast +from ._typing import md_none +from .compat import py39, py311 __all__ = [ 'Distribution', 'DistributionFinder', 'PackageMetadata', 'PackageNotFoundError', + 'SimplePath', 'distribution', 'distributions', 'entry_points', @@ -57,7 +65,7 @@ def __str__(self) -> str: return f"No package metadata was found for {self.name}" @property - def name(self) -> str: # type: ignore[override] + def name(self) -> str: # type: ignore[override] # make readonly (name,) = self.args return name @@ -127,6 +135,12 @@ def valid(line: str): return line and not line.startswith('#') +class _EntryPointMatch(types.SimpleNamespace): + module: str + attr: str + extras: str + + class EntryPoint: """An entry point as defined by Python packaging conventions. @@ -142,6 +156,30 @@ class EntryPoint: 'attr' >>> ep.extras ['extra1', 'extra2'] + + If the value package or module are not valid identifiers, a + ValueError is raised on access. + + >>> EntryPoint(name=None, group=None, value='invalid-name').module + Traceback (most recent call last): + ... + ValueError: ('Invalid object reference...invalid-name... + >>> EntryPoint(name=None, group=None, value='invalid-name').attr + Traceback (most recent call last): + ... + ValueError: ('Invalid object reference...invalid-name... + >>> EntryPoint(name=None, group=None, value='invalid-name').extras + Traceback (most recent call last): + ... + ValueError: ('Invalid object reference...invalid-name... + + The same thing happens on construction. + + >>> EntryPoint(name=None, group=None, value='invalid-name') + Traceback (most recent call last): + ... + ValueError: ('Invalid object reference...invalid-name... + """ pattern = re.compile( @@ -169,38 +207,44 @@ class EntryPoint: value: str group: str - dist: Optional[Distribution] = None + dist: Distribution | None = None def __init__(self, name: str, value: str, group: str) -> None: vars(self).update(name=name, value=value, group=group) + self.module def load(self) -> Any: """Load the entry point from its definition. If only a module is indicated by the value, return that module. Otherwise, return the named object. """ - match = cast(Match, self.pattern.match(self.value)) - module = import_module(match.group('module')) - attrs = filter(None, (match.group('attr') or '').split('.')) + module = import_module(self.module) + attrs = filter(None, (self.attr or '').split('.')) return functools.reduce(getattr, attrs, module) @property def module(self) -> str: - match = self.pattern.match(self.value) - assert match is not None - return match.group('module') + return self._match.module @property def attr(self) -> str: - match = self.pattern.match(self.value) - assert match is not None - return match.group('attr') + return self._match.attr @property - def extras(self) -> List[str]: + def extras(self) -> list[str]: + return re.findall(r'\w+', self._match.extras or '') + + @functools.cached_property + def _match(self) -> _EntryPointMatch: match = self.pattern.match(self.value) - assert match is not None - return re.findall(r'\w+', match.group('extras') or '') + if not match: + raise ValueError( + 'Invalid object reference. ' + 'See https://packaging.python.org' + '/en/latest/specifications/entry-points/#data-model', + self.value, + ) + return _EntryPointMatch(**match.groupdict()) def _for(self, dist): vars(self).update(dist=dist) @@ -226,9 +270,26 @@ def matches(self, **params): >>> ep.matches(attr='bong') True """ + self._disallow_dist(params) attrs = (getattr(self, param) for param in params) return all(map(operator.eq, params.values(), attrs)) + @staticmethod + def _disallow_dist(params): + """ + Querying by dist is not allowed (dist objects are not comparable). + >>> EntryPoint(name='fan', value='fav', group='fag').matches(dist='foo') + Traceback (most recent call last): + ... + ValueError: "dist" is not suitable for matching... + """ + if "dist" in params: + raise ValueError( + '"dist" is not suitable for matching. ' + "Instead, use Distribution.entry_points.select() on a " + "located distribution." + ) + def _key(self): return self.name, self.value, self.group @@ -258,7 +319,7 @@ class EntryPoints(tuple): __slots__ = () - def __getitem__(self, name: str) -> EntryPoint: # type: ignore[override] + def __getitem__(self, name: str) -> EntryPoint: # type: ignore[override] # Work with str instead of int """ Get the EntryPoint in self matching name. """ @@ -282,14 +343,14 @@ def select(self, **params) -> EntryPoints: return EntryPoints(ep for ep in self if py39.ep_matches(ep, **params)) @property - def names(self) -> Set[str]: + def names(self) -> set[str]: """ Return the set of all names of all entry points. """ return {ep.name for ep in self} @property - def groups(self) -> Set[str]: + def groups(self) -> set[str]: """ Return the set of all groups of all entry points. """ @@ -310,11 +371,11 @@ def _from_text(text): class PackagePath(pathlib.PurePosixPath): """A reference to a path in a package""" - hash: Optional[FileHash] + hash: FileHash | None size: int dist: Distribution - def read_text(self, encoding: str = 'utf-8') -> str: # type: ignore[override] + def read_text(self, encoding: str = 'utf-8') -> str: return self.locate().read_text(encoding=encoding) def read_binary(self) -> bytes: @@ -345,7 +406,7 @@ class Distribution(metaclass=abc.ABCMeta): """ @abc.abstractmethod - def read_text(self, filename) -> Optional[str]: + def read_text(self, filename) -> str | None: """Attempt to load metadata file given by the name. Python distribution metadata is organized by blobs of text @@ -372,6 +433,17 @@ def locate_file(self, path: str | os.PathLike[str]) -> SimplePath: """ Given a path to a file in this distribution, return a SimplePath to it. + + This method is used by callers of ``Distribution.files()`` to + locate files within the distribution. If it's possible for a + Distribution to represent files in the distribution as + ``SimplePath`` objects, it should implement this method + to resolve such objects. + + Some Distribution providers may elect not to resolve SimplePath + objects within the distribution by raising a + NotImplementedError, but consumers of such a Distribution would + be unable to invoke ``Distribution.files()``. """ @classmethod @@ -388,13 +460,13 @@ def from_name(cls, name: str) -> Distribution: if not name: raise ValueError("A distribution name is required.") try: - return next(iter(cls.discover(name=name))) + return next(iter(cls._prefer_valid(cls.discover(name=name)))) except StopIteration: raise PackageNotFoundError(name) @classmethod def discover( - cls, *, context: Optional[DistributionFinder.Context] = None, **kwargs + cls, *, context: DistributionFinder.Context | None = None, **kwargs ) -> Iterable[Distribution]: """Return an iterable of Distribution objects for all packages. @@ -412,6 +484,16 @@ def discover( resolver(context) for resolver in cls._discover_resolvers() ) + @staticmethod + def _prefer_valid(dists: Iterable[Distribution]) -> Iterable[Distribution]: + """ + Prefer (move to the front) distributions that have metadata. + + Ref python/importlib_resources#489. + """ + buckets = bucket(dists, lambda dist: bool(dist.metadata)) + return itertools.chain(buckets[True], buckets[False]) + @staticmethod def at(path: str | os.PathLike[str]) -> Distribution: """Return a Distribution for the indicated metadata path. @@ -430,7 +512,7 @@ def _discover_resolvers(): return filter(None, declared) @property - def metadata(self) -> _meta.PackageMetadata: + def metadata(self) -> _meta.PackageMetadata | None: """Return the parsed metadata for this Distribution. The returned object will have keys that name the various bits of @@ -440,10 +522,8 @@ def metadata(self) -> _meta.PackageMetadata: Custom providers may provide the METADATA file or override this property. """ - # deferred for performance (python/cpython#109829) - from . import _adapters - opt_text = ( + text = ( self.read_text('METADATA') or self.read_text('PKG-INFO') # This last clause is here to support old egg-info files. Its @@ -451,13 +531,20 @@ def metadata(self) -> _meta.PackageMetadata: # (which points to the egg-info file) attribute unchanged. or self.read_text('') ) - text = cast(str, opt_text) + return self._assemble_message(text) + + @staticmethod + @pass_none + def _assemble_message(text: str) -> _meta.PackageMetadata: + # deferred for performance (python/cpython#109829) + from . import _adapters + return _adapters.Message(email.message_from_string(text)) @property def name(self) -> str: """Return the 'Name' metadata for the distribution package.""" - return self.metadata['Name'] + return md_none(self.metadata)['Name'] @property def _normalized_name(self): @@ -467,7 +554,7 @@ def _normalized_name(self): @property def version(self) -> str: """Return the 'Version' metadata for the distribution package.""" - return self.metadata['Version'] + return md_none(self.metadata)['Version'] @property def entry_points(self) -> EntryPoints: @@ -480,7 +567,7 @@ def entry_points(self) -> EntryPoints: return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self) @property - def files(self) -> Optional[List[PackagePath]]: + def files(self) -> list[PackagePath] | None: """Files in this distribution. :return: List of PackagePath for this distribution or None @@ -549,7 +636,8 @@ def _read_files_egginfo_installed(self): return paths = ( - py311.relative_fix((subdir / name).resolve()) + py311 + .relative_fix((subdir / name).resolve()) .relative_to(self.locate_file('').resolve(), walk_up=True) .as_posix() for name in text.splitlines() @@ -572,7 +660,7 @@ def _read_files_egginfo_sources(self): return text and map('"{}"'.format, text.splitlines()) @property - def requires(self) -> Optional[List[str]]: + def requires(self) -> list[str] | None: """Generated requirements specified for this Distribution""" reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs() return reqs and list(reqs) @@ -628,6 +716,9 @@ def origin(self): return self._load_json('direct_url.json') def _load_json(self, filename): + # Deferred for performance (python/importlib_metadata#503) + import json + return pass_none(json.loads)( self.read_text(filename), object_hook=lambda data: types.SimpleNamespace(**data), @@ -675,7 +766,7 @@ def __init__(self, **kwargs): vars(self).update(kwargs) @property - def path(self) -> List[str]: + def path(self) -> list[str]: """ The sequence of directory path that a distribution finder should search. @@ -696,6 +787,20 @@ def find_distributions(self, context=Context()) -> Iterable[Distribution]: """ +@passthrough +def _clear_after_fork(cached): + """Ensure ``func`` clears cached state after ``fork`` when supported. + + ``FastPath`` caches zip-backed ``pathlib.Path`` objects that retain a + reference to the parent's open ``ZipFile`` handle. Re-using a cached + instance in a forked child can therefore resurrect invalid file pointers + and trigger ``BadZipFile``/``OSError`` failures (python/importlib_metadata#520). + Registering ``cache_clear`` with ``os.register_at_fork`` keeps each process + on its own cache. + """ + getattr(os, 'register_at_fork', noop)(after_in_child=cached.cache_clear) + + class FastPath: """ Micro-optimized class for searching a root for children. @@ -712,7 +817,8 @@ class FastPath: True """ - @functools.lru_cache() # type: ignore + @_clear_after_fork # type: ignore[misc] + @functools.lru_cache() def __new__(cls, root): return super().__new__(cls) @@ -730,7 +836,10 @@ def children(self): return [] def zip_children(self): - zip_path = zipp.Path(self.root) + # deferred for performance (python/importlib_metadata#502) + from zipp.compat.overlay import zipfile + + zip_path = zipfile.Path(self.root) names = zip_path.root.namelist() self.joinpath = zip_path.joinpath @@ -824,7 +933,7 @@ class Prepared: normalized = None legacy_normalized = None - def __init__(self, name: Optional[str]): + def __init__(self, name: str | None): self.name = name if name is None: return @@ -894,7 +1003,7 @@ def __init__(self, path: SimplePath) -> None: """ self._path = path - def read_text(self, filename: str | os.PathLike[str]) -> Optional[str]: + def read_text(self, filename: str | os.PathLike[str]) -> str | None: with suppress( FileNotFoundError, IsADirectoryError, @@ -958,7 +1067,7 @@ def distributions(**kwargs) -> Iterable[Distribution]: return Distribution.discover(**kwargs) -def metadata(distribution_name: str) -> _meta.PackageMetadata: +def metadata(distribution_name: str) -> _meta.PackageMetadata | None: """Get the metadata for the named package. :param distribution_name: The name of the distribution package to query. @@ -1001,7 +1110,7 @@ def entry_points(**params) -> EntryPoints: return EntryPoints(eps).select(**params) -def files(distribution_name: str) -> Optional[List[PackagePath]]: +def files(distribution_name: str) -> list[PackagePath] | None: """Return a list of files for the named package. :param distribution_name: The name of the distribution package to query. @@ -1010,7 +1119,7 @@ def files(distribution_name: str) -> Optional[List[PackagePath]]: return distribution(distribution_name).files -def requires(distribution_name: str) -> Optional[List[str]]: +def requires(distribution_name: str) -> list[str] | None: """ Return a list of requirements for the named package. @@ -1020,7 +1129,7 @@ def requires(distribution_name: str) -> Optional[List[str]]: return distribution(distribution_name).requires -def packages_distributions() -> Mapping[str, List[str]]: +def packages_distributions() -> Mapping[str, list[str]]: """ Return a mapping of top-level packages to their distributions. @@ -1033,7 +1142,7 @@ def packages_distributions() -> Mapping[str, List[str]]: pkg_to_dist = collections.defaultdict(list) for dist in distributions(): for pkg in _top_level_declared(dist) or _top_level_inferred(dist): - pkg_to_dist[pkg].append(dist.metadata['Name']) + pkg_to_dist[pkg].append(md_none(dist.metadata)['Name']) return dict(pkg_to_dist) @@ -1041,7 +1150,7 @@ def _top_level_declared(dist): return (dist.read_text('top_level.txt') or '').split() -def _topmost(name: PackagePath) -> Optional[str]: +def _topmost(name: PackagePath) -> str | None: """ Return the top-most parent as long as there is a parent. """ @@ -1067,11 +1176,10 @@ def _get_toplevel_name(name: PackagePath) -> str: >>> _get_toplevel_name(PackagePath('foo.dist-info')) 'foo.dist-info' """ - return _topmost(name) or ( - # python/typeshed#10328 - inspect.getmodulename(name) # type: ignore - or str(name) - ) + # Defer import of inspect for performance (python/cpython#118761) + import inspect + + return _topmost(name) or inspect.getmodulename(name) or str(name) def _top_level_inferred(dist): diff --git a/setuptools/_vendor/importlib_metadata/_adapters.py b/setuptools/_vendor/importlib_metadata/_adapters.py index 6223263ed5..dede395d79 100644 --- a/setuptools/_vendor/importlib_metadata/_adapters.py +++ b/setuptools/_vendor/importlib_metadata/_adapters.py @@ -1,11 +1,59 @@ +import email.message +import email.policy import re import textwrap -import email.message from ._text import FoldedCase +class RawPolicy(email.policy.EmailPolicy): + def fold(self, name, value): + folded = self.linesep.join( + textwrap + .indent(value, prefix=' ' * 8, predicate=lambda line: True) + .lstrip() + .splitlines() + ) + return f'{name}: {folded}{self.linesep}' + + class Message(email.message.Message): + r""" + Specialized Message subclass to handle metadata naturally. + + Reads values that may have newlines in them and converts the + payload to the Description. + + >>> msg_text = textwrap.dedent(''' + ... Name: Foo + ... Version: 3.0 + ... License: blah + ... de-blah + ... + ... First line of description. + ... Second line of description. + ... + ... Fourth line! + ... ''').lstrip().replace('', '') + >>> msg = Message(email.message_from_string(msg_text)) + >>> msg['Description'] + 'First line of description.\nSecond line of description.\n\nFourth line!\n' + + Message should render even if values contain newlines. + + >>> print(msg) + Name: Foo + Version: 3.0 + License: blah + de-blah + Description: First line of description. + Second line of description. + + Fourth line! + + + """ + multiple_use_keys = set( map( FoldedCase, @@ -57,15 +105,20 @@ def __getitem__(self, item): def _repair_headers(self): def redent(value): "Correct for RFC822 indentation" - if not value or '\n' not in value: + indent = ' ' * 8 + if not value or '\n' + indent not in value: return value - return textwrap.dedent(' ' * 8 + value) + return textwrap.dedent(indent + value) headers = [(key, redent(value)) for key, value in vars(self)['_headers']] if self._payload: headers.append(('Description', self.get_payload())) + self.set_payload('') return headers + def as_string(self): + return super().as_string(policy=RawPolicy()) + @property def json(self): """ diff --git a/setuptools/_vendor/importlib_metadata/_collections.py b/setuptools/_vendor/importlib_metadata/_collections.py index cf0954e1a3..fc5045d36b 100644 --- a/setuptools/_vendor/importlib_metadata/_collections.py +++ b/setuptools/_vendor/importlib_metadata/_collections.py @@ -1,4 +1,5 @@ import collections +import typing # from jaraco.collections 3.3 @@ -24,7 +25,10 @@ def freeze(self): self._frozen = lambda key: self.default_factory() -class Pair(collections.namedtuple('Pair', 'name value')): +class Pair(typing.NamedTuple): + name: str + value: str + @classmethod def parse(cls, text): return cls(*map(str.strip, text.split("=", 1))) diff --git a/setuptools/_vendor/importlib_metadata/_compat.py b/setuptools/_vendor/importlib_metadata/_compat.py index df312b1cbb..01356d69b9 100644 --- a/setuptools/_vendor/importlib_metadata/_compat.py +++ b/setuptools/_vendor/importlib_metadata/_compat.py @@ -1,6 +1,5 @@ -import sys import platform - +import sys __all__ = ['install', 'NullFinder'] diff --git a/setuptools/_vendor/importlib_metadata/_functools.py b/setuptools/_vendor/importlib_metadata/_functools.py index 71f66bd03c..b1fd04a84a 100644 --- a/setuptools/_vendor/importlib_metadata/_functools.py +++ b/setuptools/_vendor/importlib_metadata/_functools.py @@ -1,5 +1,6 @@ -import types import functools +import types +from typing import Callable, TypeVar # from jaraco.functools 3.3 @@ -102,3 +103,33 @@ def wrapper(param, *args, **kwargs): return func(param, *args, **kwargs) return wrapper + + +# From jaraco.functools 4.4 +def noop(*args, **kwargs): + """ + A no-operation function that does nothing. + + >>> noop(1, 2, three=3) + """ + + +_T = TypeVar('_T') + + +# From jaraco.functools 4.4 +def passthrough(func: Callable[..., object]) -> Callable[[_T], _T]: + """ + Wrap the function to always return the first parameter. + + >>> passthrough(print)('3') + 3 + '3' + """ + + @functools.wraps(func) + def wrapper(first: _T, *args, **kwargs) -> _T: + func(first, *args, **kwargs) + return first + + return wrapper # type: ignore[return-value] diff --git a/setuptools/_vendor/importlib_metadata/_itertools.py b/setuptools/_vendor/importlib_metadata/_itertools.py index d4ca9b9140..79d37198ce 100644 --- a/setuptools/_vendor/importlib_metadata/_itertools.py +++ b/setuptools/_vendor/importlib_metadata/_itertools.py @@ -1,3 +1,4 @@ +from collections import defaultdict, deque from itertools import filterfalse @@ -71,3 +72,100 @@ def always_iterable(obj, base_type=(str, bytes)): return iter(obj) except TypeError: return iter((obj,)) + + +# Copied from more_itertools 10.3 +class bucket: + """Wrap *iterable* and return an object that buckets the iterable into + child iterables based on a *key* function. + + >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3'] + >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character + >>> sorted(list(s)) # Get the keys + ['a', 'b', 'c'] + >>> a_iterable = s['a'] + >>> next(a_iterable) + 'a1' + >>> next(a_iterable) + 'a2' + >>> list(s['b']) + ['b1', 'b2', 'b3'] + + The original iterable will be advanced and its items will be cached until + they are used by the child iterables. This may require significant storage. + + By default, attempting to select a bucket to which no items belong will + exhaust the iterable and cache all values. + If you specify a *validator* function, selected buckets will instead be + checked against it. + + >>> from itertools import count + >>> it = count(1, 2) # Infinite sequence of odd numbers + >>> key = lambda x: x % 10 # Bucket by last digit + >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only + >>> s = bucket(it, key=key, validator=validator) + >>> 2 in s + False + >>> list(s[2]) + [] + + """ + + def __init__(self, iterable, key, validator=None): + self._it = iter(iterable) + self._key = key + self._cache = defaultdict(deque) + self._validator = validator or (lambda x: True) + + def __contains__(self, value): + if not self._validator(value): + return False + + try: + item = next(self[value]) + except StopIteration: + return False + else: + self._cache[value].appendleft(item) + + return True + + def _get_values(self, value): + """ + Helper to yield items from the parent iterator that match *value*. + Items that don't match are stored in the local cache as they + are encountered. + """ + while True: + # If we've cached some items that match the target value, emit + # the first one and evict it from the cache. + if self._cache[value]: + yield self._cache[value].popleft() + # Otherwise we need to advance the parent iterator to search for + # a matching item, caching the rest. + else: + while True: + try: + item = next(self._it) + except StopIteration: + return + item_value = self._key(item) + if item_value == value: + yield item + break + elif self._validator(item_value): + self._cache[item_value].append(item) + + def __iter__(self): + for item in self._it: + item_value = self._key(item) + if self._validator(item_value): + self._cache[item_value].append(item) + + yield from self._cache.keys() + + def __getitem__(self, value): + if not self._validator(value): + return iter(()) + + return self._get_values(value) diff --git a/setuptools/_vendor/importlib_metadata/_meta.py b/setuptools/_vendor/importlib_metadata/_meta.py index 1927d0f624..0c20eff3da 100644 --- a/setuptools/_vendor/importlib_metadata/_meta.py +++ b/setuptools/_vendor/importlib_metadata/_meta.py @@ -1,9 +1,13 @@ from __future__ import annotations import os -from typing import Protocol -from typing import Any, Dict, Iterator, List, Optional, TypeVar, Union, overload - +from collections.abc import Iterator +from typing import ( + Any, + Protocol, + TypeVar, + overload, +) _T = TypeVar("_T") @@ -20,25 +24,25 @@ def __iter__(self) -> Iterator[str]: ... # pragma: no cover @overload def get( self, name: str, failobj: None = None - ) -> Optional[str]: ... # pragma: no cover + ) -> str | None: ... # pragma: no cover @overload - def get(self, name: str, failobj: _T) -> Union[str, _T]: ... # pragma: no cover + def get(self, name: str, failobj: _T) -> str | _T: ... # pragma: no cover # overload per python/importlib_metadata#435 @overload def get_all( self, name: str, failobj: None = None - ) -> Optional[List[Any]]: ... # pragma: no cover + ) -> list[Any] | None: ... # pragma: no cover @overload - def get_all(self, name: str, failobj: _T) -> Union[List[Any], _T]: + def get_all(self, name: str, failobj: _T) -> list[Any] | _T: """ Return all values associated with a possibly multi-valued key. """ @property - def json(self) -> Dict[str, Union[str, List[str]]]: + def json(self) -> dict[str, str | list[str]]: """ A JSON-compatible form of the metadata. """ @@ -50,11 +54,11 @@ class SimplePath(Protocol): """ def joinpath( - self, other: Union[str, os.PathLike[str]] + self, other: str | os.PathLike[str] ) -> SimplePath: ... # pragma: no cover def __truediv__( - self, other: Union[str, os.PathLike[str]] + self, other: str | os.PathLike[str] ) -> SimplePath: ... # pragma: no cover @property diff --git a/setuptools/_vendor/importlib_metadata/_typing.py b/setuptools/_vendor/importlib_metadata/_typing.py new file mode 100644 index 0000000000..32b1d2b98a --- /dev/null +++ b/setuptools/_vendor/importlib_metadata/_typing.py @@ -0,0 +1,15 @@ +import functools +import typing + +from ._meta import PackageMetadata + +md_none = functools.partial(typing.cast, PackageMetadata) +""" +Suppress type errors for optional metadata. + +Although Distribution.metadata can return None when metadata is corrupt +and thus None, allow callers to assume it's not None and crash if +that's the case. + +# python/importlib_metadata#493 +""" diff --git a/setuptools/_vendor/importlib_metadata/compat/py39.py b/setuptools/_vendor/importlib_metadata/compat/py39.py index 1f15bd97e6..3eb9c01ecb 100644 --- a/setuptools/_vendor/importlib_metadata/compat/py39.py +++ b/setuptools/_vendor/importlib_metadata/compat/py39.py @@ -2,7 +2,9 @@ Compatibility layer with Python 3.8/3.9 """ -from typing import TYPE_CHECKING, Any, Optional +from __future__ import annotations + +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: # pragma: no cover # Prevent circular imports on runtime. @@ -10,8 +12,10 @@ else: Distribution = EntryPoint = Any +from .._typing import md_none + -def normalized_name(dist: Distribution) -> Optional[str]: +def normalized_name(dist: Distribution) -> str | None: """ Honor name normalization for distributions that don't provide ``_normalized_name``. """ @@ -20,7 +24,9 @@ def normalized_name(dist: Distribution) -> Optional[str]: except AttributeError: from .. import Prepared # -> delay to prevent circular imports. - return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name']) + return Prepared.normalize( + getattr(dist, "name", None) or md_none(dist.metadata)['Name'] + ) def ep_matches(ep: EntryPoint, **params) -> bool: diff --git a/setuptools/_vendor/inflect-7.3.1.dist-info/INSTALLER b/setuptools/_vendor/inflect-7.3.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/inflect-7.3.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/inflect-7.3.1.dist-info/METADATA b/setuptools/_vendor/inflect-7.3.1.dist-info/METADATA deleted file mode 100644 index 9a2097a54a..0000000000 --- a/setuptools/_vendor/inflect-7.3.1.dist-info/METADATA +++ /dev/null @@ -1,591 +0,0 @@ -Metadata-Version: 2.1 -Name: inflect -Version: 7.3.1 -Summary: Correctly generate plurals, singular nouns, ordinals, indefinite articles -Author-email: Paul Dyson -Maintainer-email: "Jason R. Coombs" -Project-URL: Source, https://github.com/jaraco/inflect -Keywords: plural,inflect,participle -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Natural Language :: English -Classifier: Operating System :: OS Independent -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Linguistic -Requires-Python: >=3.8 -Description-Content-Type: text/x-rst -License-File: LICENSE -Requires-Dist: more-itertools >=8.5.0 -Requires-Dist: typeguard >=4.0.1 -Requires-Dist: typing-extensions ; python_version < "3.9" -Provides-Extra: doc -Requires-Dist: sphinx >=3.5 ; extra == 'doc' -Requires-Dist: jaraco.packaging >=9.3 ; extra == 'doc' -Requires-Dist: rst.linker >=1.9 ; extra == 'doc' -Requires-Dist: furo ; extra == 'doc' -Requires-Dist: sphinx-lint ; extra == 'doc' -Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'doc' -Provides-Extra: test -Requires-Dist: pytest !=8.1.*,>=6 ; extra == 'test' -Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'test' -Requires-Dist: pytest-cov ; extra == 'test' -Requires-Dist: pytest-mypy ; extra == 'test' -Requires-Dist: pytest-enabler >=2.2 ; extra == 'test' -Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'test' -Requires-Dist: pygments ; extra == 'test' - -.. image:: https://img.shields.io/pypi/v/inflect.svg - :target: https://pypi.org/project/inflect - -.. image:: https://img.shields.io/pypi/pyversions/inflect.svg - -.. image:: https://github.com/jaraco/inflect/actions/workflows/main.yml/badge.svg - :target: https://github.com/jaraco/inflect/actions?query=workflow%3A%22tests%22 - :alt: tests - -.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json - :target: https://github.com/astral-sh/ruff - :alt: Ruff - -.. image:: https://readthedocs.org/projects/inflect/badge/?version=latest - :target: https://inflect.readthedocs.io/en/latest/?badge=latest - -.. image:: https://img.shields.io/badge/skeleton-2024-informational - :target: https://blog.jaraco.com/skeleton - -.. image:: https://tidelift.com/badges/package/pypi/inflect - :target: https://tidelift.com/subscription/pkg/pypi-inflect?utm_source=pypi-inflect&utm_medium=readme - -NAME -==== - -inflect.py - Correctly generate plurals, singular nouns, ordinals, indefinite articles; convert numbers to words. - -SYNOPSIS -======== - -.. code-block:: python - - import inflect - - p = inflect.engine() - - # METHODS: - - # plural plural_noun plural_verb plural_adj singular_noun no num - # compare compare_nouns compare_nouns compare_adjs - # a an - # present_participle - # ordinal number_to_words - # join - # inflect classical gender - # defnoun defverb defadj defa defan - - - # UNCONDITIONALLY FORM THE PLURAL - - print("The plural of ", word, " is ", p.plural(word)) - - - # CONDITIONALLY FORM THE PLURAL - - print("I saw", cat_count, p.plural("cat", cat_count)) - - - # FORM PLURALS FOR SPECIFIC PARTS OF SPEECH - - print( - p.plural_noun("I", N1), - p.plural_verb("saw", N1), - p.plural_adj("my", N2), - p.plural_noun("saw", N2), - ) - - - # FORM THE SINGULAR OF PLURAL NOUNS - - print("The singular of ", word, " is ", p.singular_noun(word)) - - # SELECT THE GENDER OF SINGULAR PRONOUNS - - print(p.singular_noun("they")) # 'it' - p.gender("feminine") - print(p.singular_noun("they")) # 'she' - - - # DEAL WITH "0/1/N" -> "no/1/N" TRANSLATION: - - print("There ", p.plural_verb("was", errors), p.no(" error", errors)) - - - # USE DEFAULT COUNTS: - - print( - p.num(N1, ""), - p.plural("I"), - p.plural_verb(" saw"), - p.num(N2), - p.plural_noun(" saw"), - ) - print("There ", p.num(errors, ""), p.plural_verb("was"), p.no(" error")) - - - # COMPARE TWO WORDS "NUMBER-INSENSITIVELY": - - if p.compare(word1, word2): - print("same") - if p.compare_nouns(word1, word2): - print("same noun") - if p.compare_verbs(word1, word2): - print("same verb") - if p.compare_adjs(word1, word2): - print("same adj.") - - - # ADD CORRECT "a" OR "an" FOR A GIVEN WORD: - - print("Did you want ", p.a(thing), " or ", p.an(idea)) - - - # CONVERT NUMERALS INTO ORDINALS (i.e. 1->1st, 2->2nd, 3->3rd, etc.) - - print("It was", p.ordinal(position), " from the left\n") - - # CONVERT NUMERALS TO WORDS (i.e. 1->"one", 101->"one hundred and one", etc.) - # RETURNS A SINGLE STRING... - - words = p.number_to_words(1234) - # "one thousand, two hundred and thirty-four" - words = p.number_to_words(p.ordinal(1234)) - # "one thousand, two hundred and thirty-fourth" - - - # GET BACK A LIST OF STRINGS, ONE FOR EACH "CHUNK"... - - words = p.number_to_words(1234, wantlist=True) - # ("one thousand","two hundred and thirty-four") - - - # OPTIONAL PARAMETERS CHANGE TRANSLATION: - - words = p.number_to_words(12345, group=1) - # "one, two, three, four, five" - - words = p.number_to_words(12345, group=2) - # "twelve, thirty-four, five" - - words = p.number_to_words(12345, group=3) - # "one twenty-three, forty-five" - - words = p.number_to_words(1234, andword="") - # "one thousand, two hundred thirty-four" - - words = p.number_to_words(1234, andword=", plus") - # "one thousand, two hundred, plus thirty-four" - # TODO: I get no comma before plus: check perl - - words = p.number_to_words(555_1202, group=1, zero="oh") - # "five, five, five, one, two, oh, two" - - words = p.number_to_words(555_1202, group=1, one="unity") - # "five, five, five, unity, two, oh, two" - - words = p.number_to_words(123.456, group=1, decimal="mark") - # "one two three mark four five six" - # TODO: DOCBUG: perl gives commas here as do I - - # LITERAL STYLE ONLY NAMES NUMBERS LESS THAN A CERTAIN THRESHOLD... - - words = p.number_to_words(9, threshold=10) # "nine" - words = p.number_to_words(10, threshold=10) # "ten" - words = p.number_to_words(11, threshold=10) # "11" - words = p.number_to_words(1000, threshold=10) # "1,000" - - # JOIN WORDS INTO A LIST: - - mylist = p.join(("apple", "banana", "carrot")) - # "apple, banana, and carrot" - - mylist = p.join(("apple", "banana")) - # "apple and banana" - - mylist = p.join(("apple", "banana", "carrot"), final_sep="") - # "apple, banana and carrot" - - - # REQUIRE "CLASSICAL" PLURALS (EG: "focus"->"foci", "cherub"->"cherubim") - - p.classical() # USE ALL CLASSICAL PLURALS - - p.classical(all=True) # USE ALL CLASSICAL PLURALS - p.classical(all=False) # SWITCH OFF CLASSICAL MODE - - p.classical(zero=True) # "no error" INSTEAD OF "no errors" - p.classical(zero=False) # "no errors" INSTEAD OF "no error" - - p.classical(herd=True) # "2 buffalo" INSTEAD OF "2 buffalos" - p.classical(herd=False) # "2 buffalos" INSTEAD OF "2 buffalo" - - p.classical(persons=True) # "2 chairpersons" INSTEAD OF "2 chairpeople" - p.classical(persons=False) # "2 chairpeople" INSTEAD OF "2 chairpersons" - - p.classical(ancient=True) # "2 formulae" INSTEAD OF "2 formulas" - p.classical(ancient=False) # "2 formulas" INSTEAD OF "2 formulae" - - - # INTERPOLATE "plural()", "plural_noun()", "plural_verb()", "plural_adj()", "singular_noun()", - # a()", "an()", "num()" AND "ordinal()" WITHIN STRINGS: - - print(p.inflect("The plural of {0} is plural('{0}')".format(word))) - print(p.inflect("The singular of {0} is singular_noun('{0}')".format(word))) - print(p.inflect("I saw {0} plural('cat',{0})".format(cat_count))) - print( - p.inflect( - "plural('I',{0}) " - "plural_verb('saw',{0}) " - "plural('a',{1}) " - "plural_noun('saw',{1})".format(N1, N2) - ) - ) - print( - p.inflect( - "num({0}, False)plural('I') " - "plural_verb('saw') " - "num({1}, False)plural('a') " - "plural_noun('saw')".format(N1, N2) - ) - ) - print(p.inflect("I saw num({0}) plural('cat')\nnum()".format(cat_count))) - print(p.inflect("There plural_verb('was',{0}) no('error',{0})".format(errors))) - print(p.inflect("There num({0}, False)plural_verb('was') no('error')".format(errors))) - print(p.inflect("Did you want a('{0}') or an('{1}')".format(thing, idea))) - print(p.inflect("It was ordinal('{0}') from the left".format(position))) - - - # ADD USER-DEFINED INFLECTIONS (OVERRIDING INBUILT RULES): - - p.defnoun("VAX", "VAXen") # SINGULAR => PLURAL - - p.defverb( - "will", # 1ST PERSON SINGULAR - "shall", # 1ST PERSON PLURAL - "will", # 2ND PERSON SINGULAR - "will", # 2ND PERSON PLURAL - "will", # 3RD PERSON SINGULAR - "will", # 3RD PERSON PLURAL - ) - - p.defadj("hir", "their") # SINGULAR => PLURAL - - p.defa("h") # "AY HALWAYS SEZ 'HAITCH'!" - - p.defan("horrendous.*") # "AN HORRENDOUS AFFECTATION" - - -DESCRIPTION -=========== - -The methods of the class ``engine`` in module ``inflect.py`` provide plural -inflections, singular noun inflections, "a"/"an" selection for English words, -and manipulation of numbers as words. - -Plural forms of all nouns, most verbs, and some adjectives are -provided. Where appropriate, "classical" variants (for example: "brother" -> -"brethren", "dogma" -> "dogmata", etc.) are also provided. - -Single forms of nouns are also provided. The gender of singular pronouns -can be chosen (for example "they" -> "it" or "she" or "he" or "they"). - -Pronunciation-based "a"/"an" selection is provided for all English -words, and most initialisms. - -It is also possible to inflect numerals (1,2,3) to ordinals (1st, 2nd, 3rd) -and to English words ("one", "two", "three"). - -In generating these inflections, ``inflect.py`` follows the Oxford -English Dictionary and the guidelines in Fowler's Modern English -Usage, preferring the former where the two disagree. - -The module is built around standard British spelling, but is designed -to cope with common American variants as well. Slang, jargon, and -other English dialects are *not* explicitly catered for. - -Where two or more inflected forms exist for a single word (typically a -"classical" form and a "modern" form), ``inflect.py`` prefers the -more common form (typically the "modern" one), unless "classical" -processing has been specified -(see `MODERN VS CLASSICAL INFLECTIONS`). - -FORMING PLURALS AND SINGULARS -============================= - -Inflecting Plurals and Singulars --------------------------------- - -All of the ``plural...`` plural inflection methods take the word to be -inflected as their first argument and return the corresponding inflection. -Note that all such methods expect the *singular* form of the word. The -results of passing a plural form are undefined (and unlikely to be correct). -Similarly, the ``si...`` singular inflection method expects the *plural* -form of the word. - -The ``plural...`` methods also take an optional second argument, -which indicates the grammatical "number" of the word (or of another word -with which the word being inflected must agree). If the "number" argument is -supplied and is not ``1`` (or ``"one"`` or ``"a"``, or some other adjective that -implies the singular), the plural form of the word is returned. If the -"number" argument *does* indicate singularity, the (uninflected) word -itself is returned. If the number argument is omitted, the plural form -is returned unconditionally. - -The ``si...`` method takes a second argument in a similar fashion. If it is -some form of the number ``1``, or is omitted, the singular form is returned. -Otherwise the plural is returned unaltered. - - -The various methods of ``inflect.engine`` are: - - - -``plural_noun(word, count=None)`` - - The method ``plural_noun()`` takes a *singular* English noun or - pronoun and returns its plural. Pronouns in the nominative ("I" -> - "we") and accusative ("me" -> "us") cases are handled, as are - possessive pronouns ("mine" -> "ours"). - - -``plural_verb(word, count=None)`` - - The method ``plural_verb()`` takes the *singular* form of a - conjugated verb (that is, one which is already in the correct "person" - and "mood") and returns the corresponding plural conjugation. - - -``plural_adj(word, count=None)`` - - The method ``plural_adj()`` takes the *singular* form of - certain types of adjectives and returns the corresponding plural form. - Adjectives that are correctly handled include: "numerical" adjectives - ("a" -> "some"), demonstrative adjectives ("this" -> "these", "that" -> - "those"), and possessives ("my" -> "our", "cat's" -> "cats'", "child's" - -> "childrens'", etc.) - - -``plural(word, count=None)`` - - The method ``plural()`` takes a *singular* English noun, - pronoun, verb, or adjective and returns its plural form. Where a word - has more than one inflection depending on its part of speech (for - example, the noun "thought" inflects to "thoughts", the verb "thought" - to "thought"), the (singular) noun sense is preferred to the (singular) - verb sense. - - Hence ``plural("knife")`` will return "knives" ("knife" having been treated - as a singular noun), whereas ``plural("knifes")`` will return "knife" - ("knifes" having been treated as a 3rd person singular verb). - - The inherent ambiguity of such cases suggests that, - where the part of speech is known, ``plural_noun``, ``plural_verb``, and - ``plural_adj`` should be used in preference to ``plural``. - - -``singular_noun(word, count=None)`` - - The method ``singular_noun()`` takes a *plural* English noun or - pronoun and returns its singular. Pronouns in the nominative ("we" -> - "I") and accusative ("us" -> "me") cases are handled, as are - possessive pronouns ("ours" -> "mine"). When third person - singular pronouns are returned they take the neuter gender by default - ("they" -> "it"), not ("they"-> "she") nor ("they" -> "he"). This can be - changed with ``gender()``. - -Note that all these methods ignore any whitespace surrounding the -word being inflected, but preserve that whitespace when the result is -returned. For example, ``plural(" cat ")`` returns " cats ". - - -``gender(genderletter)`` - - The third person plural pronoun takes the same form for the female, male and - neuter (e.g. "they"). The singular however, depends upon gender (e.g. "she", - "he", "it" and "they" -- "they" being the gender neutral form.) By default - ``singular_noun`` returns the neuter form, however, the gender can be selected with - the ``gender`` method. Pass the first letter of the gender to - ``gender`` to return the f(eminine), m(asculine), n(euter) or t(hey) - form of the singular. e.g. - gender('f') followed by singular_noun('themselves') returns 'herself'. - -Numbered plurals ----------------- - -The ``plural...`` methods return only the inflected word, not the count that -was used to inflect it. Thus, in order to produce "I saw 3 ducks", it -is necessary to use: - -.. code-block:: python - - print("I saw", N, p.plural_noun(animal, N)) - -Since the usual purpose of producing a plural is to make it agree with -a preceding count, inflect.py provides a method -(``no(word, count)``) which, given a word and a(n optional) count, returns the -count followed by the correctly inflected word. Hence the previous -example can be rewritten: - -.. code-block:: python - - print("I saw ", p.no(animal, N)) - -In addition, if the count is zero (or some other term which implies -zero, such as ``"zero"``, ``"nil"``, etc.) the count is replaced by the -word "no". Hence, if ``N`` had the value zero, the previous example -would print (the somewhat more elegant):: - - I saw no animals - -rather than:: - - I saw 0 animals - -Note that the name of the method is a pun: the method -returns either a number (a *No.*) or a ``"no"``, in front of the -inflected word. - - -Reducing the number of counts required --------------------------------------- - -In some contexts, the need to supply an explicit count to the various -``plural...`` methods makes for tiresome repetition. For example: - -.. code-block:: python - - print( - plural_adj("This", errors), - plural_noun(" error", errors), - plural_verb(" was", errors), - " fatal.", - ) - -inflect.py therefore provides a method -(``num(count=None, show=None)``) which may be used to set a persistent "default number" -value. If such a value is set, it is subsequently used whenever an -optional second "number" argument is omitted. The default value thus set -can subsequently be removed by calling ``num()`` with no arguments. -Hence we could rewrite the previous example: - -.. code-block:: python - - p.num(errors) - print(p.plural_adj("This"), p.plural_noun(" error"), p.plural_verb(" was"), "fatal.") - p.num() - -Normally, ``num()`` returns its first argument, so that it may also -be "inlined" in contexts like: - -.. code-block:: python - - print(p.num(errors), p.plural_noun(" error"), p.plural_verb(" was"), " detected.") - if severity > 1: - print( - p.plural_adj("This"), p.plural_noun(" error"), p.plural_verb(" was"), "fatal." - ) - -However, in certain contexts (see `INTERPOLATING INFLECTIONS IN STRINGS`) -it is preferable that ``num()`` return an empty string. Hence ``num()`` -provides an optional second argument. If that argument is supplied (that is, if -it is defined) and evaluates to false, ``num`` returns an empty string -instead of its first argument. For example: - -.. code-block:: python - - print(p.num(errors, 0), p.no("error"), p.plural_verb(" was"), " detected.") - if severity > 1: - print( - p.plural_adj("This"), p.plural_noun(" error"), p.plural_verb(" was"), "fatal." - ) - - - -Number-insensitive equality ---------------------------- - -inflect.py also provides a solution to the problem -of comparing words of differing plurality through the methods -``compare(word1, word2)``, ``compare_nouns(word1, word2)``, -``compare_verbs(word1, word2)``, and ``compare_adjs(word1, word2)``. -Each of these methods takes two strings, and compares them -using the corresponding plural-inflection method (``plural()``, ``plural_noun()``, -``plural_verb()``, and ``plural_adj()`` respectively). - -The comparison returns true if: - -- the strings are equal, or -- one string is equal to a plural form of the other, or -- the strings are two different plural forms of the one word. - - -Hence all of the following return true: - -.. code-block:: python - - p.compare("index", "index") # RETURNS "eq" - p.compare("index", "indexes") # RETURNS "s:p" - p.compare("index", "indices") # RETURNS "s:p" - p.compare("indexes", "index") # RETURNS "p:s" - p.compare("indices", "index") # RETURNS "p:s" - p.compare("indices", "indexes") # RETURNS "p:p" - p.compare("indexes", "indices") # RETURNS "p:p" - p.compare("indices", "indices") # RETURNS "eq" - -As indicated by the comments in the previous example, the actual value -returned by the various ``compare`` methods encodes which of the -three equality rules succeeded: "eq" is returned if the strings were -identical, "s:p" if the strings were singular and plural respectively, -"p:s" for plural and singular, and "p:p" for two distinct plurals. -Inequality is indicated by returning an empty string. - -It should be noted that two distinct singular words which happen to take -the same plural form are *not* considered equal, nor are cases where -one (singular) word's plural is the other (plural) word's singular. -Hence all of the following return false: - -.. code-block:: python - - p.compare("base", "basis") # ALTHOUGH BOTH -> "bases" - p.compare("syrinx", "syringe") # ALTHOUGH BOTH -> "syringes" - p.compare("she", "he") # ALTHOUGH BOTH -> "they" - - p.compare("opus", "operas") # ALTHOUGH "opus" -> "opera" -> "operas" - p.compare("taxi", "taxes") # ALTHOUGH "taxi" -> "taxis" -> "taxes" - -Note too that, although the comparison is "number-insensitive" it is *not* -case-insensitive (that is, ``plural("time","Times")`` returns false. To obtain -both number and case insensitivity, use the ``lower()`` method on both strings -(that is, ``plural("time".lower(), "Times".lower())`` returns true). - -Related Functionality -===================== - -Shout out to these libraries that provide related functionality: - -* `WordSet `_ - parses identifiers like variable names into sets of words suitable for re-assembling - in another form. - -* `word2number `_ converts words to - a number. - - -For Enterprise -============== - -Available as part of the Tidelift Subscription. - -This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. - -`Learn more `_. diff --git a/setuptools/_vendor/inflect-7.3.1.dist-info/RECORD b/setuptools/_vendor/inflect-7.3.1.dist-info/RECORD deleted file mode 100644 index 73ff576be5..0000000000 --- a/setuptools/_vendor/inflect-7.3.1.dist-info/RECORD +++ /dev/null @@ -1,13 +0,0 @@ -inflect-7.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -inflect-7.3.1.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -inflect-7.3.1.dist-info/METADATA,sha256=ZgMNY0WAZRs-U8wZiV2SMfjSKqBrMngXyDMs_CAwMwg,21079 -inflect-7.3.1.dist-info/RECORD,, -inflect-7.3.1.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91 -inflect-7.3.1.dist-info/top_level.txt,sha256=m52ujdp10CqT6jh1XQxZT6kEntcnv-7Tl7UiGNTzWZA,8 -inflect/__init__.py,sha256=Jxy1HJXZiZ85kHeLAhkmvz6EMTdFqBe-duvt34R6IOc,103796 -inflect/__pycache__/__init__.cpython-312.pyc,, -inflect/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -inflect/compat/__pycache__/__init__.cpython-312.pyc,, -inflect/compat/__pycache__/py38.cpython-312.pyc,, -inflect/compat/py38.py,sha256=oObVfVnWX9_OpnOuEJn1mFbJxVhwyR5epbiTNXDDaso,160 -inflect/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/setuptools/_vendor/inflect-7.3.1.dist-info/top_level.txt b/setuptools/_vendor/inflect-7.3.1.dist-info/top_level.txt deleted file mode 100644 index 0fd75fab3e..0000000000 --- a/setuptools/_vendor/inflect-7.3.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -inflect diff --git a/setuptools/_vendor/inflect/__init__.py b/setuptools/_vendor/inflect/__init__.py deleted file mode 100644 index 3eec27f4c6..0000000000 --- a/setuptools/_vendor/inflect/__init__.py +++ /dev/null @@ -1,3986 +0,0 @@ -""" -inflect: english language inflection - - correctly generate plurals, ordinals, indefinite articles - - convert numbers to words - -Copyright (C) 2010 Paul Dyson - -Based upon the Perl module -`Lingua::EN::Inflect `_. - -methods: - classical inflect - plural plural_noun plural_verb plural_adj singular_noun no num a an - compare compare_nouns compare_verbs compare_adjs - present_participle - ordinal - number_to_words - join - defnoun defverb defadj defa defan - -INFLECTIONS: - classical inflect - plural plural_noun plural_verb plural_adj singular_noun compare - no num a an present_participle - -PLURALS: - classical inflect - plural plural_noun plural_verb plural_adj singular_noun no num - compare compare_nouns compare_verbs compare_adjs - -COMPARISONS: - classical - compare compare_nouns compare_verbs compare_adjs - -ARTICLES: - classical inflect num a an - -NUMERICAL: - ordinal number_to_words - -USER_DEFINED: - defnoun defverb defadj defa defan - -Exceptions: - UnknownClassicalModeError - BadNumValueError - BadChunkingOptionError - NumOutOfRangeError - BadUserDefinedPatternError - BadRcFileError - BadGenderError - -""" - -from __future__ import annotations - -import ast -import collections -import contextlib -import functools -import itertools -import re -from numbers import Number -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterable, - List, - Literal, - Match, - Optional, - Sequence, - Tuple, - Union, - cast, -) - -from more_itertools import windowed_complete -from typeguard import typechecked - -from .compat.py38 import Annotated - - -class UnknownClassicalModeError(Exception): - pass - - -class BadNumValueError(Exception): - pass - - -class BadChunkingOptionError(Exception): - pass - - -class NumOutOfRangeError(Exception): - pass - - -class BadUserDefinedPatternError(Exception): - pass - - -class BadRcFileError(Exception): - pass - - -class BadGenderError(Exception): - pass - - -def enclose(s: str) -> str: - return f"(?:{s})" - - -def joinstem(cutpoint: Optional[int] = 0, words: Optional[Iterable[str]] = None) -> str: - """ - Join stem of each word in words into a string for regex. - - Each word is truncated at cutpoint. - - Cutpoint is usually negative indicating the number of letters to remove - from the end of each word. - - >>> joinstem(-2, ["ephemeris", "iris", ".*itis"]) - '(?:ephemer|ir|.*it)' - - >>> joinstem(None, ["ephemeris"]) - '(?:ephemeris)' - - >>> joinstem(5, None) - '(?:)' - """ - return enclose("|".join(w[:cutpoint] for w in words or [])) - - -def bysize(words: Iterable[str]) -> Dict[int, set]: - """ - From a list of words, return a dict of sets sorted by word length. - - >>> words = ['ant', 'cat', 'dog', 'pig', 'frog', 'goat', 'horse', 'elephant'] - >>> ret = bysize(words) - >>> sorted(ret[3]) - ['ant', 'cat', 'dog', 'pig'] - >>> ret[5] - {'horse'} - """ - res: Dict[int, set] = collections.defaultdict(set) - for w in words: - res[len(w)].add(w) - return res - - -def make_pl_si_lists( - lst: Iterable[str], - plending: str, - siendingsize: Optional[int], - dojoinstem: bool = True, -): - """ - given a list of singular words: lst - - an ending to append to make the plural: plending - - the number of characters to remove from the singular - before appending plending: siendingsize - - a flag whether to create a joinstem: dojoinstem - - return: - a list of pluralised words: si_list (called si because this is what you need to - look for to make the singular) - - the pluralised words as a dict of sets sorted by word length: si_bysize - the singular words as a dict of sets sorted by word length: pl_bysize - if dojoinstem is True: a regular expression that matches any of the stems: stem - """ - if siendingsize is not None: - siendingsize = -siendingsize - si_list = [w[:siendingsize] + plending for w in lst] - pl_bysize = bysize(lst) - si_bysize = bysize(si_list) - if dojoinstem: - stem = joinstem(siendingsize, lst) - return si_list, si_bysize, pl_bysize, stem - else: - return si_list, si_bysize, pl_bysize - - -# 1. PLURALS - -pl_sb_irregular_s = { - "corpus": "corpuses|corpora", - "opus": "opuses|opera", - "genus": "genera", - "mythos": "mythoi", - "penis": "penises|penes", - "testis": "testes", - "atlas": "atlases|atlantes", - "yes": "yeses", -} - -pl_sb_irregular = { - "child": "children", - "chili": "chilis|chilies", - "brother": "brothers|brethren", - "infinity": "infinities|infinity", - "loaf": "loaves", - "lore": "lores|lore", - "hoof": "hoofs|hooves", - "beef": "beefs|beeves", - "thief": "thiefs|thieves", - "money": "monies", - "mongoose": "mongooses", - "ox": "oxen", - "cow": "cows|kine", - "graffito": "graffiti", - "octopus": "octopuses|octopodes", - "genie": "genies|genii", - "ganglion": "ganglions|ganglia", - "trilby": "trilbys", - "turf": "turfs|turves", - "numen": "numina", - "atman": "atmas", - "occiput": "occiputs|occipita", - "sabretooth": "sabretooths", - "sabertooth": "sabertooths", - "lowlife": "lowlifes", - "flatfoot": "flatfoots", - "tenderfoot": "tenderfoots", - "romany": "romanies", - "jerry": "jerries", - "mary": "maries", - "talouse": "talouses", - "rom": "roma", - "carmen": "carmina", -} - -pl_sb_irregular.update(pl_sb_irregular_s) -# pl_sb_irregular_keys = enclose('|'.join(pl_sb_irregular.keys())) - -pl_sb_irregular_caps = { - "Romany": "Romanies", - "Jerry": "Jerrys", - "Mary": "Marys", - "Rom": "Roma", -} - -pl_sb_irregular_compound = {"prima donna": "prima donnas|prime donne"} - -si_sb_irregular = {v: k for (k, v) in pl_sb_irregular.items()} -for k in list(si_sb_irregular): - if "|" in k: - k1, k2 = k.split("|") - si_sb_irregular[k1] = si_sb_irregular[k2] = si_sb_irregular[k] - del si_sb_irregular[k] -si_sb_irregular_caps = {v: k for (k, v) in pl_sb_irregular_caps.items()} -si_sb_irregular_compound = {v: k for (k, v) in pl_sb_irregular_compound.items()} -for k in list(si_sb_irregular_compound): - if "|" in k: - k1, k2 = k.split("|") - si_sb_irregular_compound[k1] = si_sb_irregular_compound[k2] = ( - si_sb_irregular_compound[k] - ) - del si_sb_irregular_compound[k] - -# si_sb_irregular_keys = enclose('|'.join(si_sb_irregular.keys())) - -# Z's that don't double - -pl_sb_z_zes_list = ("quartz", "topaz") -pl_sb_z_zes_bysize = bysize(pl_sb_z_zes_list) - -pl_sb_ze_zes_list = ("snooze",) -pl_sb_ze_zes_bysize = bysize(pl_sb_ze_zes_list) - - -# CLASSICAL "..is" -> "..ides" - -pl_sb_C_is_ides_complete = [ - # GENERAL WORDS... - "ephemeris", - "iris", - "clitoris", - "chrysalis", - "epididymis", -] - -pl_sb_C_is_ides_endings = [ - # INFLAMATIONS... - "itis" -] - -pl_sb_C_is_ides = joinstem( - -2, pl_sb_C_is_ides_complete + [f".*{w}" for w in pl_sb_C_is_ides_endings] -) - -pl_sb_C_is_ides_list = pl_sb_C_is_ides_complete + pl_sb_C_is_ides_endings - -( - si_sb_C_is_ides_list, - si_sb_C_is_ides_bysize, - pl_sb_C_is_ides_bysize, -) = make_pl_si_lists(pl_sb_C_is_ides_list, "ides", 2, dojoinstem=False) - - -# CLASSICAL "..a" -> "..ata" - -pl_sb_C_a_ata_list = ( - "anathema", - "bema", - "carcinoma", - "charisma", - "diploma", - "dogma", - "drama", - "edema", - "enema", - "enigma", - "lemma", - "lymphoma", - "magma", - "melisma", - "miasma", - "oedema", - "sarcoma", - "schema", - "soma", - "stigma", - "stoma", - "trauma", - "gumma", - "pragma", -) - -( - si_sb_C_a_ata_list, - si_sb_C_a_ata_bysize, - pl_sb_C_a_ata_bysize, - pl_sb_C_a_ata, -) = make_pl_si_lists(pl_sb_C_a_ata_list, "ata", 1) - -# UNCONDITIONAL "..a" -> "..ae" - -pl_sb_U_a_ae_list = ( - "alumna", - "alga", - "vertebra", - "persona", - "vita", -) -( - si_sb_U_a_ae_list, - si_sb_U_a_ae_bysize, - pl_sb_U_a_ae_bysize, - pl_sb_U_a_ae, -) = make_pl_si_lists(pl_sb_U_a_ae_list, "e", None) - -# CLASSICAL "..a" -> "..ae" - -pl_sb_C_a_ae_list = ( - "amoeba", - "antenna", - "formula", - "hyperbola", - "medusa", - "nebula", - "parabola", - "abscissa", - "hydra", - "nova", - "lacuna", - "aurora", - "umbra", - "flora", - "fauna", -) -( - si_sb_C_a_ae_list, - si_sb_C_a_ae_bysize, - pl_sb_C_a_ae_bysize, - pl_sb_C_a_ae, -) = make_pl_si_lists(pl_sb_C_a_ae_list, "e", None) - - -# CLASSICAL "..en" -> "..ina" - -pl_sb_C_en_ina_list = ("stamen", "foramen", "lumen") - -( - si_sb_C_en_ina_list, - si_sb_C_en_ina_bysize, - pl_sb_C_en_ina_bysize, - pl_sb_C_en_ina, -) = make_pl_si_lists(pl_sb_C_en_ina_list, "ina", 2) - - -# UNCONDITIONAL "..um" -> "..a" - -pl_sb_U_um_a_list = ( - "bacterium", - "agendum", - "desideratum", - "erratum", - "stratum", - "datum", - "ovum", - "extremum", - "candelabrum", -) -( - si_sb_U_um_a_list, - si_sb_U_um_a_bysize, - pl_sb_U_um_a_bysize, - pl_sb_U_um_a, -) = make_pl_si_lists(pl_sb_U_um_a_list, "a", 2) - -# CLASSICAL "..um" -> "..a" - -pl_sb_C_um_a_list = ( - "maximum", - "minimum", - "momentum", - "optimum", - "quantum", - "cranium", - "curriculum", - "dictum", - "phylum", - "aquarium", - "compendium", - "emporium", - "encomium", - "gymnasium", - "honorarium", - "interregnum", - "lustrum", - "memorandum", - "millennium", - "rostrum", - "spectrum", - "speculum", - "stadium", - "trapezium", - "ultimatum", - "medium", - "vacuum", - "velum", - "consortium", - "arboretum", -) - -( - si_sb_C_um_a_list, - si_sb_C_um_a_bysize, - pl_sb_C_um_a_bysize, - pl_sb_C_um_a, -) = make_pl_si_lists(pl_sb_C_um_a_list, "a", 2) - - -# UNCONDITIONAL "..us" -> "i" - -pl_sb_U_us_i_list = ( - "alumnus", - "alveolus", - "bacillus", - "bronchus", - "locus", - "nucleus", - "stimulus", - "meniscus", - "sarcophagus", -) -( - si_sb_U_us_i_list, - si_sb_U_us_i_bysize, - pl_sb_U_us_i_bysize, - pl_sb_U_us_i, -) = make_pl_si_lists(pl_sb_U_us_i_list, "i", 2) - -# CLASSICAL "..us" -> "..i" - -pl_sb_C_us_i_list = ( - "focus", - "radius", - "genius", - "incubus", - "succubus", - "nimbus", - "fungus", - "nucleolus", - "stylus", - "torus", - "umbilicus", - "uterus", - "hippopotamus", - "cactus", -) - -( - si_sb_C_us_i_list, - si_sb_C_us_i_bysize, - pl_sb_C_us_i_bysize, - pl_sb_C_us_i, -) = make_pl_si_lists(pl_sb_C_us_i_list, "i", 2) - - -# CLASSICAL "..us" -> "..us" (ASSIMILATED 4TH DECLENSION LATIN NOUNS) - -pl_sb_C_us_us = ( - "status", - "apparatus", - "prospectus", - "sinus", - "hiatus", - "impetus", - "plexus", -) -pl_sb_C_us_us_bysize = bysize(pl_sb_C_us_us) - -# UNCONDITIONAL "..on" -> "a" - -pl_sb_U_on_a_list = ( - "criterion", - "perihelion", - "aphelion", - "phenomenon", - "prolegomenon", - "noumenon", - "organon", - "asyndeton", - "hyperbaton", -) -( - si_sb_U_on_a_list, - si_sb_U_on_a_bysize, - pl_sb_U_on_a_bysize, - pl_sb_U_on_a, -) = make_pl_si_lists(pl_sb_U_on_a_list, "a", 2) - -# CLASSICAL "..on" -> "..a" - -pl_sb_C_on_a_list = ("oxymoron",) - -( - si_sb_C_on_a_list, - si_sb_C_on_a_bysize, - pl_sb_C_on_a_bysize, - pl_sb_C_on_a, -) = make_pl_si_lists(pl_sb_C_on_a_list, "a", 2) - - -# CLASSICAL "..o" -> "..i" (BUT NORMALLY -> "..os") - -pl_sb_C_o_i = [ - "solo", - "soprano", - "basso", - "alto", - "contralto", - "tempo", - "piano", - "virtuoso", -] # list not tuple so can concat for pl_sb_U_o_os - -pl_sb_C_o_i_bysize = bysize(pl_sb_C_o_i) -si_sb_C_o_i_bysize = bysize([f"{w[:-1]}i" for w in pl_sb_C_o_i]) - -pl_sb_C_o_i_stems = joinstem(-1, pl_sb_C_o_i) - -# ALWAYS "..o" -> "..os" - -pl_sb_U_o_os_complete = {"ado", "ISO", "NATO", "NCO", "NGO", "oto"} -si_sb_U_o_os_complete = {f"{w}s" for w in pl_sb_U_o_os_complete} - - -pl_sb_U_o_os_endings = [ - "aficionado", - "aggro", - "albino", - "allegro", - "ammo", - "Antananarivo", - "archipelago", - "armadillo", - "auto", - "avocado", - "Bamako", - "Barquisimeto", - "bimbo", - "bingo", - "Biro", - "bolero", - "Bolzano", - "bongo", - "Boto", - "burro", - "Cairo", - "canto", - "cappuccino", - "casino", - "cello", - "Chicago", - "Chimango", - "cilantro", - "cochito", - "coco", - "Colombo", - "Colorado", - "commando", - "concertino", - "contango", - "credo", - "crescendo", - "cyano", - "demo", - "ditto", - "Draco", - "dynamo", - "embryo", - "Esperanto", - "espresso", - "euro", - "falsetto", - "Faro", - "fiasco", - "Filipino", - "flamenco", - "furioso", - "generalissimo", - "Gestapo", - "ghetto", - "gigolo", - "gizmo", - "Greensboro", - "gringo", - "Guaiabero", - "guano", - "gumbo", - "gyro", - "hairdo", - "hippo", - "Idaho", - "impetigo", - "inferno", - "info", - "intermezzo", - "intertrigo", - "Iquico", - "jumbo", - "junto", - "Kakapo", - "kilo", - "Kinkimavo", - "Kokako", - "Kosovo", - "Lesotho", - "libero", - "libido", - "libretto", - "lido", - "Lilo", - "limbo", - "limo", - "lineno", - "lingo", - "lino", - "livedo", - "loco", - "logo", - "lumbago", - "macho", - "macro", - "mafioso", - "magneto", - "magnifico", - "Majuro", - "Malabo", - "manifesto", - "Maputo", - "Maracaibo", - "medico", - "memo", - "metro", - "Mexico", - "micro", - "Milano", - "Monaco", - "mono", - "Montenegro", - "Morocco", - "Muqdisho", - "myo", - "neutrino", - "Ningbo", - "octavo", - "oregano", - "Orinoco", - "Orlando", - "Oslo", - "panto", - "Paramaribo", - "Pardusco", - "pedalo", - "photo", - "pimento", - "pinto", - "pleco", - "Pluto", - "pogo", - "polo", - "poncho", - "Porto-Novo", - "Porto", - "pro", - "psycho", - "pueblo", - "quarto", - "Quito", - "repo", - "rhino", - "risotto", - "rococo", - "rondo", - "Sacramento", - "saddo", - "sago", - "salvo", - "Santiago", - "Sapporo", - "Sarajevo", - "scherzando", - "scherzo", - "silo", - "sirocco", - "sombrero", - "staccato", - "sterno", - "stucco", - "stylo", - "sumo", - "Taiko", - "techno", - "terrazzo", - "testudo", - "timpano", - "tiro", - "tobacco", - "Togo", - "Tokyo", - "torero", - "Torino", - "Toronto", - "torso", - "tremolo", - "typo", - "tyro", - "ufo", - "UNESCO", - "vaquero", - "vermicello", - "verso", - "vibrato", - "violoncello", - "Virgo", - "weirdo", - "WHO", - "WTO", - "Yamoussoukro", - "yo-yo", - "zero", - "Zibo", -] + pl_sb_C_o_i - -pl_sb_U_o_os_bysize = bysize(pl_sb_U_o_os_endings) -si_sb_U_o_os_bysize = bysize([f"{w}s" for w in pl_sb_U_o_os_endings]) - - -# UNCONDITIONAL "..ch" -> "..chs" - -pl_sb_U_ch_chs_list = ("czech", "eunuch", "stomach") - -( - si_sb_U_ch_chs_list, - si_sb_U_ch_chs_bysize, - pl_sb_U_ch_chs_bysize, - pl_sb_U_ch_chs, -) = make_pl_si_lists(pl_sb_U_ch_chs_list, "s", None) - - -# UNCONDITIONAL "..[ei]x" -> "..ices" - -pl_sb_U_ex_ices_list = ("codex", "murex", "silex") -( - si_sb_U_ex_ices_list, - si_sb_U_ex_ices_bysize, - pl_sb_U_ex_ices_bysize, - pl_sb_U_ex_ices, -) = make_pl_si_lists(pl_sb_U_ex_ices_list, "ices", 2) - -pl_sb_U_ix_ices_list = ("radix", "helix") -( - si_sb_U_ix_ices_list, - si_sb_U_ix_ices_bysize, - pl_sb_U_ix_ices_bysize, - pl_sb_U_ix_ices, -) = make_pl_si_lists(pl_sb_U_ix_ices_list, "ices", 2) - -# CLASSICAL "..[ei]x" -> "..ices" - -pl_sb_C_ex_ices_list = ( - "vortex", - "vertex", - "cortex", - "latex", - "pontifex", - "apex", - "index", - "simplex", -) - -( - si_sb_C_ex_ices_list, - si_sb_C_ex_ices_bysize, - pl_sb_C_ex_ices_bysize, - pl_sb_C_ex_ices, -) = make_pl_si_lists(pl_sb_C_ex_ices_list, "ices", 2) - - -pl_sb_C_ix_ices_list = ("appendix",) - -( - si_sb_C_ix_ices_list, - si_sb_C_ix_ices_bysize, - pl_sb_C_ix_ices_bysize, - pl_sb_C_ix_ices, -) = make_pl_si_lists(pl_sb_C_ix_ices_list, "ices", 2) - - -# ARABIC: ".." -> "..i" - -pl_sb_C_i_list = ("afrit", "afreet", "efreet") - -(si_sb_C_i_list, si_sb_C_i_bysize, pl_sb_C_i_bysize, pl_sb_C_i) = make_pl_si_lists( - pl_sb_C_i_list, "i", None -) - - -# HEBREW: ".." -> "..im" - -pl_sb_C_im_list = ("goy", "seraph", "cherub") - -(si_sb_C_im_list, si_sb_C_im_bysize, pl_sb_C_im_bysize, pl_sb_C_im) = make_pl_si_lists( - pl_sb_C_im_list, "im", None -) - - -# UNCONDITIONAL "..man" -> "..mans" - -pl_sb_U_man_mans_list = """ - ataman caiman cayman ceriman - desman dolman farman harman hetman - human leman ottoman shaman talisman -""".split() -pl_sb_U_man_mans_caps_list = """ - Alabaman Bahaman Burman German - Hiroshiman Liman Nakayaman Norman Oklahoman - Panaman Roman Selman Sonaman Tacoman Yakiman - Yokohaman Yuman -""".split() - -( - si_sb_U_man_mans_list, - si_sb_U_man_mans_bysize, - pl_sb_U_man_mans_bysize, -) = make_pl_si_lists(pl_sb_U_man_mans_list, "s", None, dojoinstem=False) -( - si_sb_U_man_mans_caps_list, - si_sb_U_man_mans_caps_bysize, - pl_sb_U_man_mans_caps_bysize, -) = make_pl_si_lists(pl_sb_U_man_mans_caps_list, "s", None, dojoinstem=False) - -# UNCONDITIONAL "..louse" -> "..lice" -pl_sb_U_louse_lice_list = ("booklouse", "grapelouse", "louse", "woodlouse") - -( - si_sb_U_louse_lice_list, - si_sb_U_louse_lice_bysize, - pl_sb_U_louse_lice_bysize, -) = make_pl_si_lists(pl_sb_U_louse_lice_list, "lice", 5, dojoinstem=False) - -pl_sb_uninflected_s_complete = [ - # PAIRS OR GROUPS SUBSUMED TO A SINGULAR... - "breeches", - "britches", - "pajamas", - "pyjamas", - "clippers", - "gallows", - "hijinks", - "headquarters", - "pliers", - "scissors", - "testes", - "herpes", - "pincers", - "shears", - "proceedings", - "trousers", - # UNASSIMILATED LATIN 4th DECLENSION - "cantus", - "coitus", - "nexus", - # RECENT IMPORTS... - "contretemps", - "corps", - "debris", - "siemens", - # DISEASES - "mumps", - # MISCELLANEOUS OTHERS... - "diabetes", - "jackanapes", - "series", - "species", - "subspecies", - "rabies", - "chassis", - "innings", - "news", - "mews", - "haggis", -] - -pl_sb_uninflected_s_endings = [ - # RECENT IMPORTS... - "ois", - # DISEASES - "measles", -] - -pl_sb_uninflected_s = pl_sb_uninflected_s_complete + [ - f".*{w}" for w in pl_sb_uninflected_s_endings -] - -pl_sb_uninflected_herd = ( - # DON'T INFLECT IN CLASSICAL MODE, OTHERWISE NORMAL INFLECTION - "wildebeest", - "swine", - "eland", - "bison", - "buffalo", - "cattle", - "elk", - "rhinoceros", - "zucchini", - "caribou", - "dace", - "grouse", - "guinea fowl", - "guinea-fowl", - "haddock", - "hake", - "halibut", - "herring", - "mackerel", - "pickerel", - "pike", - "roe", - "seed", - "shad", - "snipe", - "teal", - "turbot", - "water fowl", - "water-fowl", -) - -pl_sb_uninflected_complete = [ - # SOME FISH AND HERD ANIMALS - "tuna", - "salmon", - "mackerel", - "trout", - "bream", - "sea-bass", - "sea bass", - "carp", - "cod", - "flounder", - "whiting", - "moose", - # OTHER ODDITIES - "graffiti", - "djinn", - "samuri", - "offspring", - "pence", - "quid", - "hertz", -] + pl_sb_uninflected_s_complete -# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE) - -pl_sb_uninflected_caps = [ - # ALL NATIONALS ENDING IN -ese - "Portuguese", - "Amoyese", - "Borghese", - "Congoese", - "Faroese", - "Foochowese", - "Genevese", - "Genoese", - "Gilbertese", - "Hottentotese", - "Kiplingese", - "Kongoese", - "Lucchese", - "Maltese", - "Nankingese", - "Niasese", - "Pekingese", - "Piedmontese", - "Pistoiese", - "Sarawakese", - "Shavese", - "Vermontese", - "Wenchowese", - "Yengeese", -] - - -pl_sb_uninflected_endings = [ - # UNCOUNTABLE NOUNS - "butter", - "cash", - "furniture", - "information", - # SOME FISH AND HERD ANIMALS - "fish", - "deer", - "sheep", - # ALL NATIONALS ENDING IN -ese - "nese", - "rese", - "lese", - "mese", - # DISEASES - "pox", - # OTHER ODDITIES - "craft", -] + pl_sb_uninflected_s_endings -# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE) - - -pl_sb_uninflected_bysize = bysize(pl_sb_uninflected_endings) - - -# SINGULAR WORDS ENDING IN ...s (ALL INFLECT WITH ...es) - -pl_sb_singular_s_complete = [ - "acropolis", - "aegis", - "alias", - "asbestos", - "bathos", - "bias", - "bronchitis", - "bursitis", - "caddis", - "cannabis", - "canvas", - "chaos", - "cosmos", - "dais", - "digitalis", - "epidermis", - "ethos", - "eyas", - "gas", - "glottis", - "hubris", - "ibis", - "lens", - "mantis", - "marquis", - "metropolis", - "pathos", - "pelvis", - "polis", - "rhinoceros", - "sassafras", - "trellis", -] + pl_sb_C_is_ides_complete - - -pl_sb_singular_s_endings = ["ss", "us"] + pl_sb_C_is_ides_endings - -pl_sb_singular_s_bysize = bysize(pl_sb_singular_s_endings) - -si_sb_singular_s_complete = [f"{w}es" for w in pl_sb_singular_s_complete] -si_sb_singular_s_endings = [f"{w}es" for w in pl_sb_singular_s_endings] -si_sb_singular_s_bysize = bysize(si_sb_singular_s_endings) - -pl_sb_singular_s_es = ["[A-Z].*es"] - -pl_sb_singular_s = enclose( - "|".join( - pl_sb_singular_s_complete - + [f".*{w}" for w in pl_sb_singular_s_endings] - + pl_sb_singular_s_es - ) -) - - -# PLURALS ENDING IN uses -> use - - -si_sb_ois_oi_case = ("Bolshois", "Hanois") - -si_sb_uses_use_case = ("Betelgeuses", "Duses", "Meuses", "Syracuses", "Toulouses") - -si_sb_uses_use = ( - "abuses", - "applauses", - "blouses", - "carouses", - "causes", - "chartreuses", - "clauses", - "contuses", - "douses", - "excuses", - "fuses", - "grouses", - "hypotenuses", - "masseuses", - "menopauses", - "misuses", - "muses", - "overuses", - "pauses", - "peruses", - "profuses", - "recluses", - "reuses", - "ruses", - "souses", - "spouses", - "suffuses", - "transfuses", - "uses", -) - -si_sb_ies_ie_case = ( - "Addies", - "Aggies", - "Allies", - "Amies", - "Angies", - "Annies", - "Annmaries", - "Archies", - "Arties", - "Aussies", - "Barbies", - "Barries", - "Basies", - "Bennies", - "Bernies", - "Berties", - "Bessies", - "Betties", - "Billies", - "Blondies", - "Bobbies", - "Bonnies", - "Bowies", - "Brandies", - "Bries", - "Brownies", - "Callies", - "Carnegies", - "Carries", - "Cassies", - "Charlies", - "Cheries", - "Christies", - "Connies", - "Curies", - "Dannies", - "Debbies", - "Dixies", - "Dollies", - "Donnies", - "Drambuies", - "Eddies", - "Effies", - "Ellies", - "Elsies", - "Eries", - "Ernies", - "Essies", - "Eugenies", - "Fannies", - "Flossies", - "Frankies", - "Freddies", - "Gillespies", - "Goldies", - "Gracies", - "Guthries", - "Hallies", - "Hatties", - "Hetties", - "Hollies", - "Jackies", - "Jamies", - "Janies", - "Jannies", - "Jeanies", - "Jeannies", - "Jennies", - "Jessies", - "Jimmies", - "Jodies", - "Johnies", - "Johnnies", - "Josies", - "Julies", - "Kalgoorlies", - "Kathies", - "Katies", - "Kellies", - "Kewpies", - "Kristies", - "Laramies", - "Lassies", - "Lauries", - "Leslies", - "Lessies", - "Lillies", - "Lizzies", - "Lonnies", - "Lories", - "Lorries", - "Lotties", - "Louies", - "Mackenzies", - "Maggies", - "Maisies", - "Mamies", - "Marcies", - "Margies", - "Maries", - "Marjories", - "Matties", - "McKenzies", - "Melanies", - "Mickies", - "Millies", - "Minnies", - "Mollies", - "Mounties", - "Nannies", - "Natalies", - "Nellies", - "Netties", - "Ollies", - "Ozzies", - "Pearlies", - "Pottawatomies", - "Reggies", - "Richies", - "Rickies", - "Robbies", - "Ronnies", - "Rosalies", - "Rosemaries", - "Rosies", - "Roxies", - "Rushdies", - "Ruthies", - "Sadies", - "Sallies", - "Sammies", - "Scotties", - "Selassies", - "Sherries", - "Sophies", - "Stacies", - "Stefanies", - "Stephanies", - "Stevies", - "Susies", - "Sylvies", - "Tammies", - "Terries", - "Tessies", - "Tommies", - "Tracies", - "Trekkies", - "Valaries", - "Valeries", - "Valkyries", - "Vickies", - "Virgies", - "Willies", - "Winnies", - "Wylies", - "Yorkies", -) - -si_sb_ies_ie = ( - "aeries", - "baggies", - "belies", - "biggies", - "birdies", - "bogies", - "bonnies", - "boogies", - "bookies", - "bourgeoisies", - "brownies", - "budgies", - "caddies", - "calories", - "camaraderies", - "cockamamies", - "collies", - "cookies", - "coolies", - "cooties", - "coteries", - "crappies", - "curies", - "cutesies", - "dogies", - "eyries", - "floozies", - "footsies", - "freebies", - "genies", - "goalies", - "groupies", - "hies", - "jalousies", - "junkies", - "kiddies", - "laddies", - "lassies", - "lies", - "lingeries", - "magpies", - "menageries", - "mommies", - "movies", - "neckties", - "newbies", - "nighties", - "oldies", - "organdies", - "overlies", - "pies", - "pinkies", - "pixies", - "potpies", - "prairies", - "quickies", - "reveries", - "rookies", - "rotisseries", - "softies", - "sorties", - "species", - "stymies", - "sweeties", - "ties", - "underlies", - "unties", - "veggies", - "vies", - "yuppies", - "zombies", -) - - -si_sb_oes_oe_case = ( - "Chloes", - "Crusoes", - "Defoes", - "Faeroes", - "Ivanhoes", - "Joes", - "McEnroes", - "Moes", - "Monroes", - "Noes", - "Poes", - "Roscoes", - "Tahoes", - "Tippecanoes", - "Zoes", -) - -si_sb_oes_oe = ( - "aloes", - "backhoes", - "canoes", - "does", - "floes", - "foes", - "hoes", - "mistletoes", - "oboes", - "pekoes", - "roes", - "sloes", - "throes", - "tiptoes", - "toes", - "woes", -) - -si_sb_z_zes = ("quartzes", "topazes") - -si_sb_zzes_zz = ("buzzes", "fizzes", "frizzes", "razzes") - -si_sb_ches_che_case = ( - "Andromaches", - "Apaches", - "Blanches", - "Comanches", - "Nietzsches", - "Porsches", - "Roches", -) - -si_sb_ches_che = ( - "aches", - "avalanches", - "backaches", - "bellyaches", - "caches", - "cloches", - "creches", - "douches", - "earaches", - "fiches", - "headaches", - "heartaches", - "microfiches", - "niches", - "pastiches", - "psyches", - "quiches", - "stomachaches", - "toothaches", - "tranches", -) - -si_sb_xes_xe = ("annexes", "axes", "deluxes", "pickaxes") - -si_sb_sses_sse_case = ("Hesses", "Jesses", "Larousses", "Matisses") -si_sb_sses_sse = ( - "bouillabaisses", - "crevasses", - "demitasses", - "impasses", - "mousses", - "posses", -) - -si_sb_ves_ve_case = ( - # *[nwl]ives -> [nwl]live - "Clives", - "Palmolives", -) -si_sb_ves_ve = ( - # *[^d]eaves -> eave - "interweaves", - "weaves", - # *[nwl]ives -> [nwl]live - "olives", - # *[eoa]lves -> [eoa]lve - "bivalves", - "dissolves", - "resolves", - "salves", - "twelves", - "valves", -) - - -plverb_special_s = enclose( - "|".join( - [pl_sb_singular_s] - + pl_sb_uninflected_s - + list(pl_sb_irregular_s) - + ["(.*[csx])is", "(.*)ceps", "[A-Z].*s"] - ) -) - -_pl_sb_postfix_adj_defn = ( - ("general", enclose(r"(?!major|lieutenant|brigadier|adjutant|.*star)\S+")), - ("martial", enclose("court")), - ("force", enclose("pound")), -) - -pl_sb_postfix_adj: Iterable[str] = ( - enclose(val + f"(?=(?:-|\\s+){key})") for key, val in _pl_sb_postfix_adj_defn -) - -pl_sb_postfix_adj_stems = f"({'|'.join(pl_sb_postfix_adj)})(.*)" - - -# PLURAL WORDS ENDING IS es GO TO SINGULAR is - -si_sb_es_is = ( - "amanuenses", - "amniocenteses", - "analyses", - "antitheses", - "apotheoses", - "arterioscleroses", - "atheroscleroses", - "axes", - # 'bases', # bases -> basis - "catalyses", - "catharses", - "chasses", - "cirrhoses", - "cocces", - "crises", - "diagnoses", - "dialyses", - "diereses", - "electrolyses", - "emphases", - "exegeses", - "geneses", - "halitoses", - "hydrolyses", - "hypnoses", - "hypotheses", - "hystereses", - "metamorphoses", - "metastases", - "misdiagnoses", - "mitoses", - "mononucleoses", - "narcoses", - "necroses", - "nemeses", - "neuroses", - "oases", - "osmoses", - "osteoporoses", - "paralyses", - "parentheses", - "parthenogeneses", - "periphrases", - "photosyntheses", - "probosces", - "prognoses", - "prophylaxes", - "prostheses", - "preces", - "psoriases", - "psychoanalyses", - "psychokineses", - "psychoses", - "scleroses", - "scolioses", - "sepses", - "silicoses", - "symbioses", - "synopses", - "syntheses", - "taxes", - "telekineses", - "theses", - "thromboses", - "tuberculoses", - "urinalyses", -) - -pl_prep_list = """ - about above across after among around at athwart before behind - below beneath beside besides between betwixt beyond but by - during except for from in into near of off on onto out over - since till to under until unto upon with""".split() - -pl_prep_list_da = pl_prep_list + ["de", "du", "da"] - -pl_prep_bysize = bysize(pl_prep_list_da) - -pl_prep = enclose("|".join(pl_prep_list_da)) - -pl_sb_prep_dual_compound = rf"(.*?)((?:-|\s+)(?:{pl_prep})(?:-|\s+))a(?:-|\s+)(.*)" - - -singular_pronoun_genders = { - "neuter", - "feminine", - "masculine", - "gender-neutral", - "feminine or masculine", - "masculine or feminine", -} - -pl_pron_nom = { - # NOMINATIVE REFLEXIVE - "i": "we", - "myself": "ourselves", - "you": "you", - "yourself": "yourselves", - "she": "they", - "herself": "themselves", - "he": "they", - "himself": "themselves", - "it": "they", - "itself": "themselves", - "they": "they", - "themself": "themselves", - # POSSESSIVE - "mine": "ours", - "yours": "yours", - "hers": "theirs", - "his": "theirs", - "its": "theirs", - "theirs": "theirs", -} - -si_pron: Dict[str, Dict[str, Union[str, Dict[str, str]]]] = { - "nom": {v: k for (k, v) in pl_pron_nom.items()} -} -si_pron["nom"]["we"] = "I" - - -pl_pron_acc = { - # ACCUSATIVE REFLEXIVE - "me": "us", - "myself": "ourselves", - "you": "you", - "yourself": "yourselves", - "her": "them", - "herself": "themselves", - "him": "them", - "himself": "themselves", - "it": "them", - "itself": "themselves", - "them": "them", - "themself": "themselves", -} - -pl_pron_acc_keys = enclose("|".join(pl_pron_acc)) -pl_pron_acc_keys_bysize = bysize(pl_pron_acc) - -si_pron["acc"] = {v: k for (k, v) in pl_pron_acc.items()} - -for _thecase, _plur, _gend, _sing in ( - ("nom", "they", "neuter", "it"), - ("nom", "they", "feminine", "she"), - ("nom", "they", "masculine", "he"), - ("nom", "they", "gender-neutral", "they"), - ("nom", "they", "feminine or masculine", "she or he"), - ("nom", "they", "masculine or feminine", "he or she"), - ("nom", "themselves", "neuter", "itself"), - ("nom", "themselves", "feminine", "herself"), - ("nom", "themselves", "masculine", "himself"), - ("nom", "themselves", "gender-neutral", "themself"), - ("nom", "themselves", "feminine or masculine", "herself or himself"), - ("nom", "themselves", "masculine or feminine", "himself or herself"), - ("nom", "theirs", "neuter", "its"), - ("nom", "theirs", "feminine", "hers"), - ("nom", "theirs", "masculine", "his"), - ("nom", "theirs", "gender-neutral", "theirs"), - ("nom", "theirs", "feminine or masculine", "hers or his"), - ("nom", "theirs", "masculine or feminine", "his or hers"), - ("acc", "them", "neuter", "it"), - ("acc", "them", "feminine", "her"), - ("acc", "them", "masculine", "him"), - ("acc", "them", "gender-neutral", "them"), - ("acc", "them", "feminine or masculine", "her or him"), - ("acc", "them", "masculine or feminine", "him or her"), - ("acc", "themselves", "neuter", "itself"), - ("acc", "themselves", "feminine", "herself"), - ("acc", "themselves", "masculine", "himself"), - ("acc", "themselves", "gender-neutral", "themself"), - ("acc", "themselves", "feminine or masculine", "herself or himself"), - ("acc", "themselves", "masculine or feminine", "himself or herself"), -): - try: - si_pron[_thecase][_plur][_gend] = _sing # type: ignore - except TypeError: - si_pron[_thecase][_plur] = {} - si_pron[_thecase][_plur][_gend] = _sing # type: ignore - - -si_pron_acc_keys = enclose("|".join(si_pron["acc"])) -si_pron_acc_keys_bysize = bysize(si_pron["acc"]) - - -def get_si_pron(thecase, word, gender) -> str: - try: - sing = si_pron[thecase][word] - except KeyError: - raise # not a pronoun - try: - return sing[gender] # has several types due to gender - except TypeError: - return cast(str, sing) # answer independent of gender - - -# These dictionaries group verbs by first, second and third person -# conjugations. - -plverb_irregular_pres = { - "am": "are", - "are": "are", - "is": "are", - "was": "were", - "were": "were", - "have": "have", - "has": "have", - "do": "do", - "does": "do", -} - -plverb_ambiguous_pres = { - "act": "act", - "acts": "act", - "blame": "blame", - "blames": "blame", - "can": "can", - "must": "must", - "fly": "fly", - "flies": "fly", - "copy": "copy", - "copies": "copy", - "drink": "drink", - "drinks": "drink", - "fight": "fight", - "fights": "fight", - "fire": "fire", - "fires": "fire", - "like": "like", - "likes": "like", - "look": "look", - "looks": "look", - "make": "make", - "makes": "make", - "reach": "reach", - "reaches": "reach", - "run": "run", - "runs": "run", - "sink": "sink", - "sinks": "sink", - "sleep": "sleep", - "sleeps": "sleep", - "view": "view", - "views": "view", -} - -plverb_ambiguous_pres_keys = re.compile( - rf"^({enclose('|'.join(plverb_ambiguous_pres))})((\s.*)?)$", re.IGNORECASE -) - - -plverb_irregular_non_pres = ( - "did", - "had", - "ate", - "made", - "put", - "spent", - "fought", - "sank", - "gave", - "sought", - "shall", - "could", - "ought", - "should", -) - -plverb_ambiguous_non_pres = re.compile( - r"^((?:thought|saw|bent|will|might|cut))((\s.*)?)$", re.IGNORECASE -) - -# "..oes" -> "..oe" (the rest are "..oes" -> "o") - -pl_v_oes_oe = ("canoes", "floes", "oboes", "roes", "throes", "woes") -pl_v_oes_oe_endings_size4 = ("hoes", "toes") -pl_v_oes_oe_endings_size5 = ("shoes",) - - -pl_count_zero = ("0", "no", "zero", "nil") - - -pl_count_one = ("1", "a", "an", "one", "each", "every", "this", "that") - -pl_adj_special = {"a": "some", "an": "some", "this": "these", "that": "those"} - -pl_adj_special_keys = re.compile( - rf"^({enclose('|'.join(pl_adj_special))})$", re.IGNORECASE -) - -pl_adj_poss = { - "my": "our", - "your": "your", - "its": "their", - "her": "their", - "his": "their", - "their": "their", -} - -pl_adj_poss_keys = re.compile(rf"^({enclose('|'.join(pl_adj_poss))})$", re.IGNORECASE) - - -# 2. INDEFINITE ARTICLES - -# THIS PATTERN MATCHES STRINGS OF CAPITALS STARTING WITH A "VOWEL-SOUND" -# CONSONANT FOLLOWED BY ANOTHER CONSONANT, AND WHICH ARE NOT LIKELY -# TO BE REAL WORDS (OH, ALL RIGHT THEN, IT'S JUST MAGIC!) - -A_abbrev = re.compile( - r""" -^(?! FJO | [HLMNS]Y. | RY[EO] | SQU - | ( F[LR]? | [HL] | MN? | N | RH? | S[CHKLMNPTVW]? | X(YL)?) [AEIOU]) -[FHLMNRSX][A-Z] -""", - re.VERBOSE, -) - -# THIS PATTERN CODES THE BEGINNINGS OF ALL ENGLISH WORDS BEGINING WITH A -# 'y' FOLLOWED BY A CONSONANT. ANY OTHER Y-CONSONANT PREFIX THEREFORE -# IMPLIES AN ABBREVIATION. - -A_y_cons = re.compile(r"^(y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt))", re.IGNORECASE) - -# EXCEPTIONS TO EXCEPTIONS - -A_explicit_a = re.compile(r"^((?:unabomber|unanimous|US))", re.IGNORECASE) - -A_explicit_an = re.compile( - r"^((?:euler|hour(?!i)|heir|honest|hono[ur]|mpeg))", re.IGNORECASE -) - -A_ordinal_an = re.compile(r"^([aefhilmnorsx]-?th)", re.IGNORECASE) - -A_ordinal_a = re.compile(r"^([bcdgjkpqtuvwyz]-?th)", re.IGNORECASE) - - -# NUMERICAL INFLECTIONS - -nth = { - 0: "th", - 1: "st", - 2: "nd", - 3: "rd", - 4: "th", - 5: "th", - 6: "th", - 7: "th", - 8: "th", - 9: "th", - 11: "th", - 12: "th", - 13: "th", -} -nth_suff = set(nth.values()) - -ordinal = dict( - ty="tieth", - one="first", - two="second", - three="third", - five="fifth", - eight="eighth", - nine="ninth", - twelve="twelfth", -) - -ordinal_suff = re.compile(rf"({'|'.join(ordinal)})\Z") - - -# NUMBERS - -unit = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] -teen = [ - "ten", - "eleven", - "twelve", - "thirteen", - "fourteen", - "fifteen", - "sixteen", - "seventeen", - "eighteen", - "nineteen", -] -ten = [ - "", - "", - "twenty", - "thirty", - "forty", - "fifty", - "sixty", - "seventy", - "eighty", - "ninety", -] -mill = [ - " ", - " thousand", - " million", - " billion", - " trillion", - " quadrillion", - " quintillion", - " sextillion", - " septillion", - " octillion", - " nonillion", - " decillion", -] - - -# SUPPORT CLASSICAL PLURALIZATIONS - -def_classical = dict( - all=False, zero=False, herd=False, names=True, persons=False, ancient=False -) - -all_classical = {k: True for k in def_classical} -no_classical = {k: False for k in def_classical} - - -# Maps strings to built-in constant types -string_to_constant = {"True": True, "False": False, "None": None} - - -# Pre-compiled regular expression objects -DOLLAR_DIGITS = re.compile(r"\$(\d+)") -FUNCTION_CALL = re.compile(r"((\w+)\([^)]*\)*)", re.IGNORECASE) -PARTITION_WORD = re.compile(r"\A(\s*)(.+?)(\s*)\Z") -PL_SB_POSTFIX_ADJ_STEMS_RE = re.compile( - rf"^(?:{pl_sb_postfix_adj_stems})$", re.IGNORECASE -) -PL_SB_PREP_DUAL_COMPOUND_RE = re.compile( - rf"^(?:{pl_sb_prep_dual_compound})$", re.IGNORECASE -) -DENOMINATOR = re.compile(r"(?P.+)( (per|a) .+)") -PLVERB_SPECIAL_S_RE = re.compile(rf"^({plverb_special_s})$") -WHITESPACE = re.compile(r"\s") -ENDS_WITH_S = re.compile(r"^(.*[^s])s$", re.IGNORECASE) -ENDS_WITH_APOSTROPHE_S = re.compile(r"^(.*)'s?$") -INDEFINITE_ARTICLE_TEST = re.compile(r"\A(\s*)(?:an?\s+)?(.+?)(\s*)\Z", re.IGNORECASE) -SPECIAL_AN = re.compile(r"^[aefhilmnorsx]$", re.IGNORECASE) -SPECIAL_A = re.compile(r"^[bcdgjkpqtuvwyz]$", re.IGNORECASE) -SPECIAL_ABBREV_AN = re.compile(r"^[aefhilmnorsx][.-]", re.IGNORECASE) -SPECIAL_ABBREV_A = re.compile(r"^[a-z][.-]", re.IGNORECASE) -CONSONANTS = re.compile(r"^[^aeiouy]", re.IGNORECASE) -ARTICLE_SPECIAL_EU = re.compile(r"^e[uw]", re.IGNORECASE) -ARTICLE_SPECIAL_ONCE = re.compile(r"^onc?e\b", re.IGNORECASE) -ARTICLE_SPECIAL_ONETIME = re.compile(r"^onetime\b", re.IGNORECASE) -ARTICLE_SPECIAL_UNIT = re.compile(r"^uni([^nmd]|mo)", re.IGNORECASE) -ARTICLE_SPECIAL_UBA = re.compile(r"^u[bcfghjkqrst][aeiou]", re.IGNORECASE) -ARTICLE_SPECIAL_UKR = re.compile(r"^ukr", re.IGNORECASE) -SPECIAL_CAPITALS = re.compile(r"^U[NK][AIEO]?") -VOWELS = re.compile(r"^[aeiou]", re.IGNORECASE) - -DIGIT_GROUP = re.compile(r"(\d)") -TWO_DIGITS = re.compile(r"(\d)(\d)") -THREE_DIGITS = re.compile(r"(\d)(\d)(\d)") -THREE_DIGITS_WORD = re.compile(r"(\d)(\d)(\d)(?=\D*\Z)") -TWO_DIGITS_WORD = re.compile(r"(\d)(\d)(?=\D*\Z)") -ONE_DIGIT_WORD = re.compile(r"(\d)(?=\D*\Z)") - -FOUR_DIGIT_COMMA = re.compile(r"(\d)(\d{3}(?:,|\Z))") -NON_DIGIT = re.compile(r"\D") -WHITESPACES_COMMA = re.compile(r"\s+,") -COMMA_WORD = re.compile(r", (\S+)\s+\Z") -WHITESPACES = re.compile(r"\s+") - - -PRESENT_PARTICIPLE_REPLACEMENTS = ( - (re.compile(r"ie$"), r"y"), - ( - re.compile(r"ue$"), - r"u", - ), # TODO: isn't ue$ -> u encompassed in the following rule? - (re.compile(r"([auy])e$"), r"\g<1>"), - (re.compile(r"ski$"), r"ski"), - (re.compile(r"[^b]i$"), r""), - (re.compile(r"^(are|were)$"), r"be"), - (re.compile(r"^(had)$"), r"hav"), - (re.compile(r"^(hoe)$"), r"\g<1>"), - (re.compile(r"([^e])e$"), r"\g<1>"), - (re.compile(r"er$"), r"er"), - (re.compile(r"([^aeiou][aeiouy]([bdgmnprst]))$"), r"\g<1>\g<2>"), -) - -DIGIT = re.compile(r"\d") - - -class Words(str): - lowered: str - split_: List[str] - first: str - last: str - - def __init__(self, orig) -> None: - self.lowered = self.lower() - self.split_ = self.split() - self.first = self.split_[0] - self.last = self.split_[-1] - - -Falsish = Any # ideally, falsish would only validate on bool(value) is False - - -_STATIC_TYPE_CHECKING = TYPE_CHECKING -# ^-- Workaround for typeguard AST manipulation: -# https://github.com/agronholm/typeguard/issues/353#issuecomment-1556306554 - -if _STATIC_TYPE_CHECKING: # pragma: no cover - Word = Annotated[str, "String with at least 1 character"] -else: - - class _WordMeta(type): # Too dynamic to be supported by mypy... - def __instancecheck__(self, instance: Any) -> bool: - return isinstance(instance, str) and len(instance) >= 1 - - class Word(metaclass=_WordMeta): # type: ignore[no-redef] - """String with at least 1 character""" - - -class engine: - def __init__(self) -> None: - self.classical_dict = def_classical.copy() - self.persistent_count: Optional[int] = None - self.mill_count = 0 - self.pl_sb_user_defined: List[Optional[Word]] = [] - self.pl_v_user_defined: List[Optional[Word]] = [] - self.pl_adj_user_defined: List[Optional[Word]] = [] - self.si_sb_user_defined: List[Optional[Word]] = [] - self.A_a_user_defined: List[Optional[Word]] = [] - self.thegender = "neuter" - self.__number_args: Optional[Dict[str, str]] = None - - @property - def _number_args(self): - return cast(Dict[str, str], self.__number_args) - - @_number_args.setter - def _number_args(self, val): - self.__number_args = val - - @typechecked - def defnoun(self, singular: Optional[Word], plural: Optional[Word]) -> int: - """ - Set the noun plural of singular to plural. - - """ - self.checkpat(singular) - self.checkpatplural(plural) - self.pl_sb_user_defined.extend((singular, plural)) - self.si_sb_user_defined.extend((plural, singular)) - return 1 - - @typechecked - def defverb( - self, - s1: Optional[Word], - p1: Optional[Word], - s2: Optional[Word], - p2: Optional[Word], - s3: Optional[Word], - p3: Optional[Word], - ) -> int: - """ - Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively. - - Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb. - - """ - self.checkpat(s1) - self.checkpat(s2) - self.checkpat(s3) - self.checkpatplural(p1) - self.checkpatplural(p2) - self.checkpatplural(p3) - self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3)) - return 1 - - @typechecked - def defadj(self, singular: Optional[Word], plural: Optional[Word]) -> int: - """ - Set the adjective plural of singular to plural. - - """ - self.checkpat(singular) - self.checkpatplural(plural) - self.pl_adj_user_defined.extend((singular, plural)) - return 1 - - @typechecked - def defa(self, pattern: Optional[Word]) -> int: - """ - Define the indefinite article as 'a' for words matching pattern. - - """ - self.checkpat(pattern) - self.A_a_user_defined.extend((pattern, "a")) - return 1 - - @typechecked - def defan(self, pattern: Optional[Word]) -> int: - """ - Define the indefinite article as 'an' for words matching pattern. - - """ - self.checkpat(pattern) - self.A_a_user_defined.extend((pattern, "an")) - return 1 - - def checkpat(self, pattern: Optional[Word]) -> None: - """ - check for errors in a regex pattern - """ - if pattern is None: - return - try: - re.match(pattern, "") - except re.error as err: - raise BadUserDefinedPatternError(pattern) from err - - def checkpatplural(self, pattern: Optional[Word]) -> None: - """ - check for errors in a regex replace pattern - """ - return - - @typechecked - def ud_match(self, word: Word, wordlist: Sequence[Optional[Word]]) -> Optional[str]: - for i in range(len(wordlist) - 2, -2, -2): # backwards through even elements - mo = re.search(rf"^{wordlist[i]}$", word, re.IGNORECASE) - if mo: - if wordlist[i + 1] is None: - return None - pl = DOLLAR_DIGITS.sub( - r"\\1", cast(Word, wordlist[i + 1]) - ) # change $n to \n for expand - return mo.expand(pl) - return None - - def classical(self, **kwargs) -> None: - """ - turn classical mode on and off for various categories - - turn on all classical modes: - classical() - classical(all=True) - - turn on or off specific claassical modes: - e.g. - classical(herd=True) - classical(names=False) - - By default all classical modes are off except names. - - unknown value in args or key in kwargs raises - exception: UnknownClasicalModeError - - """ - if not kwargs: - self.classical_dict = all_classical.copy() - return - if "all" in kwargs: - if kwargs["all"]: - self.classical_dict = all_classical.copy() - else: - self.classical_dict = no_classical.copy() - - for k, v in kwargs.items(): - if k in def_classical: - self.classical_dict[k] = v - else: - raise UnknownClassicalModeError - - def num( - self, count: Optional[int] = None, show: Optional[int] = None - ) -> str: # (;$count,$show) - """ - Set the number to be used in other method calls. - - Returns count. - - Set show to False to return '' instead. - - """ - if count is not None: - try: - self.persistent_count = int(count) - except ValueError as err: - raise BadNumValueError from err - if (show is None) or show: - return str(count) - else: - self.persistent_count = None - return "" - - def gender(self, gender: str) -> None: - """ - set the gender for the singular of plural pronouns - - can be one of: - 'neuter' ('they' -> 'it') - 'feminine' ('they' -> 'she') - 'masculine' ('they' -> 'he') - 'gender-neutral' ('they' -> 'they') - 'feminine or masculine' ('they' -> 'she or he') - 'masculine or feminine' ('they' -> 'he or she') - """ - if gender in singular_pronoun_genders: - self.thegender = gender - else: - raise BadGenderError - - def _get_value_from_ast(self, obj): - """ - Return the value of the ast object. - """ - if isinstance(obj, ast.Num): - return obj.n - elif isinstance(obj, ast.Str): - return obj.s - elif isinstance(obj, ast.List): - return [self._get_value_from_ast(e) for e in obj.elts] - elif isinstance(obj, ast.Tuple): - return tuple([self._get_value_from_ast(e) for e in obj.elts]) - - # None, True and False are NameConstants in Py3.4 and above. - elif isinstance(obj, ast.NameConstant): - return obj.value - - # Probably passed a variable name. - # Or passed a single word without wrapping it in quotes as an argument - # ex: p.inflect("I plural(see)") instead of p.inflect("I plural('see')") - raise NameError(f"name '{obj.id}' is not defined") - - def _string_to_substitute( - self, mo: Match, methods_dict: Dict[str, Callable] - ) -> str: - """ - Return the string to be substituted for the match. - """ - matched_text, f_name = mo.groups() - # matched_text is the complete match string. e.g. plural_noun(cat) - # f_name is the function name. e.g. plural_noun - - # Return matched_text if function name is not in methods_dict - if f_name not in methods_dict: - return matched_text - - # Parse the matched text - a_tree = ast.parse(matched_text) - - # get the args and kwargs from ast objects - args_list = [ - self._get_value_from_ast(a) - for a in a_tree.body[0].value.args # type: ignore[attr-defined] - ] - kwargs_list = { - kw.arg: self._get_value_from_ast(kw.value) - for kw in a_tree.body[0].value.keywords # type: ignore[attr-defined] - } - - # Call the corresponding function - return methods_dict[f_name](*args_list, **kwargs_list) - - # 0. PERFORM GENERAL INFLECTIONS IN A STRING - - @typechecked - def inflect(self, text: Word) -> str: - """ - Perform inflections in a string. - - e.g. inflect('The plural of cat is plural(cat)') returns - 'The plural of cat is cats' - - can use plural, plural_noun, plural_verb, plural_adj, - singular_noun, a, an, no, ordinal, number_to_words, - and prespart - - """ - save_persistent_count = self.persistent_count - - # Dictionary of allowed methods - methods_dict: Dict[str, Callable] = { - "plural": self.plural, - "plural_adj": self.plural_adj, - "plural_noun": self.plural_noun, - "plural_verb": self.plural_verb, - "singular_noun": self.singular_noun, - "a": self.a, - "an": self.a, - "no": self.no, - "ordinal": self.ordinal, - "number_to_words": self.number_to_words, - "present_participle": self.present_participle, - "num": self.num, - } - - # Regular expression to find Python's function call syntax - output = FUNCTION_CALL.sub( - lambda mo: self._string_to_substitute(mo, methods_dict), text - ) - self.persistent_count = save_persistent_count - return output - - # ## PLURAL SUBROUTINES - - def postprocess(self, orig: str, inflected) -> str: - inflected = str(inflected) - if "|" in inflected: - word_options = inflected.split("|") - # When two parts of a noun need to be pluralized - if len(word_options[0].split(" ")) == len(word_options[1].split(" ")): - result = inflected.split("|")[self.classical_dict["all"]].split(" ") - # When only the last part of the noun needs to be pluralized - else: - result = inflected.split(" ") - for index, word in enumerate(result): - if "|" in word: - result[index] = word.split("|")[self.classical_dict["all"]] - else: - result = inflected.split(" ") - - # Try to fix word wise capitalization - for index, word in enumerate(orig.split(" ")): - if word == "I": - # Is this the only word for exceptions like this - # Where the original is fully capitalized - # without 'meaning' capitalization? - # Also this fails to handle a capitalizaion in context - continue - if word.capitalize() == word: - result[index] = result[index].capitalize() - if word == word.upper(): - result[index] = result[index].upper() - return " ".join(result) - - def partition_word(self, text: str) -> Tuple[str, str, str]: - mo = PARTITION_WORD.search(text) - if mo: - return mo.group(1), mo.group(2), mo.group(3) - else: - return "", "", "" - - @typechecked - def plural(self, text: Word, count: Optional[Union[str, int, Any]] = None) -> str: - """ - Return the plural of text. - - If count supplied, then return text if count is one of: - 1, a, an, one, each, every, this, that - - otherwise return the plural. - - Whitespace at the start and end is preserved. - - """ - pre, word, post = self.partition_word(text) - if not word: - return text - plural = self.postprocess( - word, - self._pl_special_adjective(word, count) - or self._pl_special_verb(word, count) - or self._plnoun(word, count), - ) - return f"{pre}{plural}{post}" - - @typechecked - def plural_noun( - self, text: Word, count: Optional[Union[str, int, Any]] = None - ) -> str: - """ - Return the plural of text, where text is a noun. - - If count supplied, then return text if count is one of: - 1, a, an, one, each, every, this, that - - otherwise return the plural. - - Whitespace at the start and end is preserved. - - """ - pre, word, post = self.partition_word(text) - if not word: - return text - plural = self.postprocess(word, self._plnoun(word, count)) - return f"{pre}{plural}{post}" - - @typechecked - def plural_verb( - self, text: Word, count: Optional[Union[str, int, Any]] = None - ) -> str: - """ - Return the plural of text, where text is a verb. - - If count supplied, then return text if count is one of: - 1, a, an, one, each, every, this, that - - otherwise return the plural. - - Whitespace at the start and end is preserved. - - """ - pre, word, post = self.partition_word(text) - if not word: - return text - plural = self.postprocess( - word, - self._pl_special_verb(word, count) or self._pl_general_verb(word, count), - ) - return f"{pre}{plural}{post}" - - @typechecked - def plural_adj( - self, text: Word, count: Optional[Union[str, int, Any]] = None - ) -> str: - """ - Return the plural of text, where text is an adjective. - - If count supplied, then return text if count is one of: - 1, a, an, one, each, every, this, that - - otherwise return the plural. - - Whitespace at the start and end is preserved. - - """ - pre, word, post = self.partition_word(text) - if not word: - return text - plural = self.postprocess(word, self._pl_special_adjective(word, count) or word) - return f"{pre}{plural}{post}" - - @typechecked - def compare(self, word1: Word, word2: Word) -> Union[str, bool]: - """ - compare word1 and word2 for equality regardless of plurality - - return values: - eq - the strings are equal - p:s - word1 is the plural of word2 - s:p - word2 is the plural of word1 - p:p - word1 and word2 are two different plural forms of the one word - False - otherwise - - >>> compare = engine().compare - >>> compare("egg", "eggs") - 's:p' - >>> compare('egg', 'egg') - 'eq' - - Words should not be empty. - - >>> compare('egg', '') - Traceback (most recent call last): - ... - typeguard.TypeCheckError:...is not an instance of inflect.Word - """ - norms = self.plural_noun, self.plural_verb, self.plural_adj - results = (self._plequal(word1, word2, norm) for norm in norms) - return next(filter(None, results), False) - - @typechecked - def compare_nouns(self, word1: Word, word2: Word) -> Union[str, bool]: - """ - compare word1 and word2 for equality regardless of plurality - word1 and word2 are to be treated as nouns - - return values: - eq - the strings are equal - p:s - word1 is the plural of word2 - s:p - word2 is the plural of word1 - p:p - word1 and word2 are two different plural forms of the one word - False - otherwise - - """ - return self._plequal(word1, word2, self.plural_noun) - - @typechecked - def compare_verbs(self, word1: Word, word2: Word) -> Union[str, bool]: - """ - compare word1 and word2 for equality regardless of plurality - word1 and word2 are to be treated as verbs - - return values: - eq - the strings are equal - p:s - word1 is the plural of word2 - s:p - word2 is the plural of word1 - p:p - word1 and word2 are two different plural forms of the one word - False - otherwise - - """ - return self._plequal(word1, word2, self.plural_verb) - - @typechecked - def compare_adjs(self, word1: Word, word2: Word) -> Union[str, bool]: - """ - compare word1 and word2 for equality regardless of plurality - word1 and word2 are to be treated as adjectives - - return values: - eq - the strings are equal - p:s - word1 is the plural of word2 - s:p - word2 is the plural of word1 - p:p - word1 and word2 are two different plural forms of the one word - False - otherwise - - """ - return self._plequal(word1, word2, self.plural_adj) - - @typechecked - def singular_noun( - self, - text: Word, - count: Optional[Union[int, str, Any]] = None, - gender: Optional[str] = None, - ) -> Union[str, Literal[False]]: - """ - Return the singular of text, where text is a plural noun. - - If count supplied, then return the singular if count is one of: - 1, a, an, one, each, every, this, that or if count is None - - otherwise return text unchanged. - - Whitespace at the start and end is preserved. - - >>> p = engine() - >>> p.singular_noun('horses') - 'horse' - >>> p.singular_noun('knights') - 'knight' - - Returns False when a singular noun is passed. - - >>> p.singular_noun('horse') - False - >>> p.singular_noun('knight') - False - >>> p.singular_noun('soldier') - False - - """ - pre, word, post = self.partition_word(text) - if not word: - return text - sing = self._sinoun(word, count=count, gender=gender) - if sing is not False: - plural = self.postprocess(word, sing) - return f"{pre}{plural}{post}" - return False - - def _plequal(self, word1: str, word2: str, pl) -> Union[str, bool]: # noqa: C901 - classval = self.classical_dict.copy() - self.classical_dict = all_classical.copy() - if word1 == word2: - return "eq" - if word1 == pl(word2): - return "p:s" - if pl(word1) == word2: - return "s:p" - self.classical_dict = no_classical.copy() - if word1 == pl(word2): - return "p:s" - if pl(word1) == word2: - return "s:p" - self.classical_dict = classval.copy() - - if pl == self.plural or pl == self.plural_noun: - if self._pl_check_plurals_N(word1, word2): - return "p:p" - if self._pl_check_plurals_N(word2, word1): - return "p:p" - if pl == self.plural or pl == self.plural_adj: - if self._pl_check_plurals_adj(word1, word2): - return "p:p" - return False - - def _pl_reg_plurals(self, pair: str, stems: str, end1: str, end2: str) -> bool: - pattern = rf"({stems})({end1}\|\1{end2}|{end2}\|\1{end1})" - return bool(re.search(pattern, pair)) - - def _pl_check_plurals_N(self, word1: str, word2: str) -> bool: - stem_endings = ( - (pl_sb_C_a_ata, "as", "ata"), - (pl_sb_C_is_ides, "is", "ides"), - (pl_sb_C_a_ae, "s", "e"), - (pl_sb_C_en_ina, "ens", "ina"), - (pl_sb_C_um_a, "ums", "a"), - (pl_sb_C_us_i, "uses", "i"), - (pl_sb_C_on_a, "ons", "a"), - (pl_sb_C_o_i_stems, "os", "i"), - (pl_sb_C_ex_ices, "exes", "ices"), - (pl_sb_C_ix_ices, "ixes", "ices"), - (pl_sb_C_i, "s", "i"), - (pl_sb_C_im, "s", "im"), - (".*eau", "s", "x"), - (".*ieu", "s", "x"), - (".*tri", "xes", "ces"), - (".{2,}[yia]n", "xes", "ges"), - ) - - words = map(Words, (word1, word2)) - pair = "|".join(word.last for word in words) - - return ( - pair in pl_sb_irregular_s.values() - or pair in pl_sb_irregular.values() - or pair in pl_sb_irregular_caps.values() - or any( - self._pl_reg_plurals(pair, stems, end1, end2) - for stems, end1, end2 in stem_endings - ) - ) - - def _pl_check_plurals_adj(self, word1: str, word2: str) -> bool: - word1a = word1[: word1.rfind("'")] if word1.endswith(("'s", "'")) else "" - word2a = word2[: word2.rfind("'")] if word2.endswith(("'s", "'")) else "" - - return ( - bool(word1a) - and bool(word2a) - and ( - self._pl_check_plurals_N(word1a, word2a) - or self._pl_check_plurals_N(word2a, word1a) - ) - ) - - def get_count(self, count: Optional[Union[str, int]] = None) -> Union[str, int]: - if count is None and self.persistent_count is not None: - count = self.persistent_count - - if count is not None: - count = ( - 1 - if ( - (str(count) in pl_count_one) - or ( - self.classical_dict["zero"] - and str(count).lower() in pl_count_zero - ) - ) - else 2 - ) - else: - count = "" - return count - - # @profile - def _plnoun( # noqa: C901 - self, word: str, count: Optional[Union[str, int]] = None - ) -> str: - count = self.get_count(count) - - # DEFAULT TO PLURAL - - if count == 1: - return word - - # HANDLE USER-DEFINED NOUNS - - value = self.ud_match(word, self.pl_sb_user_defined) - if value is not None: - return value - - # HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS - - if word == "": - return word - - word = Words(word) - - if word.last.lower() in pl_sb_uninflected_complete: - if len(word.split_) >= 3: - return self._handle_long_compounds(word, count=2) or word - return word - - if word in pl_sb_uninflected_caps: - return word - - for k, v in pl_sb_uninflected_bysize.items(): - if word.lowered[-k:] in v: - return word - - if self.classical_dict["herd"] and word.last.lower() in pl_sb_uninflected_herd: - return word - - # HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.) - - mo = PL_SB_POSTFIX_ADJ_STEMS_RE.search(word) - if mo and mo.group(2) != "": - return f"{self._plnoun(mo.group(1), 2)}{mo.group(2)}" - - if " a " in word.lowered or "-a-" in word.lowered: - mo = PL_SB_PREP_DUAL_COMPOUND_RE.search(word) - if mo and mo.group(2) != "" and mo.group(3) != "": - return ( - f"{self._plnoun(mo.group(1), 2)}" - f"{mo.group(2)}" - f"{self._plnoun(mo.group(3))}" - ) - - if len(word.split_) >= 3: - handled_words = self._handle_long_compounds(word, count=2) - if handled_words is not None: - return handled_words - - # only pluralize denominators in units - mo = DENOMINATOR.search(word.lowered) - if mo: - index = len(mo.group("denominator")) - return f"{self._plnoun(word[:index])}{word[index:]}" - - # handle units given in degrees (only accept if - # there is no more than one word following) - # degree Celsius => degrees Celsius but degree - # fahrenheit hour => degree fahrenheit hours - if len(word.split_) >= 2 and word.split_[-2] == "degree": - return " ".join([self._plnoun(word.first)] + word.split_[1:]) - - with contextlib.suppress(ValueError): - return self._handle_prepositional_phrase( - word.lowered, - functools.partial(self._plnoun, count=2), - '-', - ) - - # HANDLE PRONOUNS - - for k, v in pl_pron_acc_keys_bysize.items(): - if word.lowered[-k:] in v: # ends with accusative pronoun - for pk, pv in pl_prep_bysize.items(): - if word.lowered[:pk] in pv: # starts with a prep - if word.lowered.split() == [ - word.lowered[:pk], - word.lowered[-k:], - ]: - # only whitespace in between - return word.lowered[:-k] + pl_pron_acc[word.lowered[-k:]] - - try: - return pl_pron_nom[word.lowered] - except KeyError: - pass - - try: - return pl_pron_acc[word.lowered] - except KeyError: - pass - - # HANDLE ISOLATED IRREGULAR PLURALS - - if word.last in pl_sb_irregular_caps: - llen = len(word.last) - return f"{word[:-llen]}{pl_sb_irregular_caps[word.last]}" - - lowered_last = word.last.lower() - if lowered_last in pl_sb_irregular: - llen = len(lowered_last) - return f"{word[:-llen]}{pl_sb_irregular[lowered_last]}" - - dash_split = word.lowered.split('-') - if (" ".join(dash_split[-2:])).lower() in pl_sb_irregular_compound: - llen = len( - " ".join(dash_split[-2:]) - ) # TODO: what if 2 spaces between these words? - return ( - f"{word[:-llen]}" - f"{pl_sb_irregular_compound[(' '.join(dash_split[-2:])).lower()]}" - ) - - if word.lowered[-3:] == "quy": - return f"{word[:-1]}ies" - - if word.lowered[-6:] == "person": - if self.classical_dict["persons"]: - return f"{word}s" - else: - return f"{word[:-4]}ople" - - # HANDLE FAMILIES OF IRREGULAR PLURALS - - if word.lowered[-3:] == "man": - for k, v in pl_sb_U_man_mans_bysize.items(): - if word.lowered[-k:] in v: - return f"{word}s" - for k, v in pl_sb_U_man_mans_caps_bysize.items(): - if word[-k:] in v: - return f"{word}s" - return f"{word[:-3]}men" - if word.lowered[-5:] == "mouse": - return f"{word[:-5]}mice" - if word.lowered[-5:] == "louse": - v = pl_sb_U_louse_lice_bysize.get(len(word)) - if v and word.lowered in v: - return f"{word[:-5]}lice" - return f"{word}s" - if word.lowered[-5:] == "goose": - return f"{word[:-5]}geese" - if word.lowered[-5:] == "tooth": - return f"{word[:-5]}teeth" - if word.lowered[-4:] == "foot": - return f"{word[:-4]}feet" - if word.lowered[-4:] == "taco": - return f"{word[:-5]}tacos" - - if word.lowered == "die": - return "dice" - - # HANDLE UNASSIMILATED IMPORTS - - if word.lowered[-4:] == "ceps": - return word - if word.lowered[-4:] == "zoon": - return f"{word[:-2]}a" - if word.lowered[-3:] in ("cis", "sis", "xis"): - return f"{word[:-2]}es" - - for lastlet, d, numend, post in ( - ("h", pl_sb_U_ch_chs_bysize, None, "s"), - ("x", pl_sb_U_ex_ices_bysize, -2, "ices"), - ("x", pl_sb_U_ix_ices_bysize, -2, "ices"), - ("m", pl_sb_U_um_a_bysize, -2, "a"), - ("s", pl_sb_U_us_i_bysize, -2, "i"), - ("n", pl_sb_U_on_a_bysize, -2, "a"), - ("a", pl_sb_U_a_ae_bysize, None, "e"), - ): - if word.lowered[-1] == lastlet: # this test to add speed - for k, v in d.items(): - if word.lowered[-k:] in v: - return word[:numend] + post - - # HANDLE INCOMPLETELY ASSIMILATED IMPORTS - - if self.classical_dict["ancient"]: - if word.lowered[-4:] == "trix": - return f"{word[:-1]}ces" - if word.lowered[-3:] in ("eau", "ieu"): - return f"{word}x" - if word.lowered[-3:] in ("ynx", "inx", "anx") and len(word) > 4: - return f"{word[:-1]}ges" - - for lastlet, d, numend, post in ( - ("n", pl_sb_C_en_ina_bysize, -2, "ina"), - ("x", pl_sb_C_ex_ices_bysize, -2, "ices"), - ("x", pl_sb_C_ix_ices_bysize, -2, "ices"), - ("m", pl_sb_C_um_a_bysize, -2, "a"), - ("s", pl_sb_C_us_i_bysize, -2, "i"), - ("s", pl_sb_C_us_us_bysize, None, ""), - ("a", pl_sb_C_a_ae_bysize, None, "e"), - ("a", pl_sb_C_a_ata_bysize, None, "ta"), - ("s", pl_sb_C_is_ides_bysize, -1, "des"), - ("o", pl_sb_C_o_i_bysize, -1, "i"), - ("n", pl_sb_C_on_a_bysize, -2, "a"), - ): - if word.lowered[-1] == lastlet: # this test to add speed - for k, v in d.items(): - if word.lowered[-k:] in v: - return word[:numend] + post - - for d, numend, post in ( - (pl_sb_C_i_bysize, None, "i"), - (pl_sb_C_im_bysize, None, "im"), - ): - for k, v in d.items(): - if word.lowered[-k:] in v: - return word[:numend] + post - - # HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS - - if lowered_last in pl_sb_singular_s_complete: - return f"{word}es" - - for k, v in pl_sb_singular_s_bysize.items(): - if word.lowered[-k:] in v: - return f"{word}es" - - if word.lowered[-2:] == "es" and word[0] == word[0].upper(): - return f"{word}es" - - if word.lowered[-1] == "z": - for k, v in pl_sb_z_zes_bysize.items(): - if word.lowered[-k:] in v: - return f"{word}es" - - if word.lowered[-2:-1] != "z": - return f"{word}zes" - - if word.lowered[-2:] == "ze": - for k, v in pl_sb_ze_zes_bysize.items(): - if word.lowered[-k:] in v: - return f"{word}s" - - if word.lowered[-2:] in ("ch", "sh", "zz", "ss") or word.lowered[-1] == "x": - return f"{word}es" - - # HANDLE ...f -> ...ves - - if word.lowered[-3:] in ("elf", "alf", "olf"): - return f"{word[:-1]}ves" - if word.lowered[-3:] == "eaf" and word.lowered[-4:-3] != "d": - return f"{word[:-1]}ves" - if word.lowered[-4:] in ("nife", "life", "wife"): - return f"{word[:-2]}ves" - if word.lowered[-3:] == "arf": - return f"{word[:-1]}ves" - - # HANDLE ...y - - if word.lowered[-1] == "y": - if word.lowered[-2:-1] in "aeiou" or len(word) == 1: - return f"{word}s" - - if self.classical_dict["names"]: - if word.lowered[-1] == "y" and word[0] == word[0].upper(): - return f"{word}s" - - return f"{word[:-1]}ies" - - # HANDLE ...o - - if lowered_last in pl_sb_U_o_os_complete: - return f"{word}s" - - for k, v in pl_sb_U_o_os_bysize.items(): - if word.lowered[-k:] in v: - return f"{word}s" - - if word.lowered[-2:] in ("ao", "eo", "io", "oo", "uo"): - return f"{word}s" - - if word.lowered[-1] == "o": - return f"{word}es" - - # OTHERWISE JUST ADD ...s - - return f"{word}s" - - @classmethod - def _handle_prepositional_phrase(cls, phrase, transform, sep): - """ - Given a word or phrase possibly separated by sep, parse out - the prepositional phrase and apply the transform to the word - preceding the prepositional phrase. - - Raise ValueError if the pivot is not found or if at least two - separators are not found. - - >>> engine._handle_prepositional_phrase("man-of-war", str.upper, '-') - 'MAN-of-war' - >>> engine._handle_prepositional_phrase("man of war", str.upper, ' ') - 'MAN of war' - """ - parts = phrase.split(sep) - if len(parts) < 3: - raise ValueError("Cannot handle words with fewer than two separators") - - pivot = cls._find_pivot(parts, pl_prep_list_da) - - transformed = transform(parts[pivot - 1]) or parts[pivot - 1] - return " ".join( - parts[: pivot - 1] + [sep.join([transformed, parts[pivot], ''])] - ) + " ".join(parts[(pivot + 1) :]) - - def _handle_long_compounds(self, word: Words, count: int) -> Union[str, None]: - """ - Handles the plural and singular for compound `Words` that - have three or more words, based on the given count. - - >>> engine()._handle_long_compounds(Words("pair of scissors"), 2) - 'pairs of scissors' - >>> engine()._handle_long_compounds(Words("men beyond hills"), 1) - 'man beyond hills' - """ - inflection = self._sinoun if count == 1 else self._plnoun - solutions = ( # type: ignore - " ".join( - itertools.chain( - leader, - [inflection(cand, count), prep], # type: ignore - trailer, - ) - ) - for leader, (cand, prep), trailer in windowed_complete(word.split_, 2) - if prep in pl_prep_list_da # type: ignore - ) - return next(solutions, None) - - @staticmethod - def _find_pivot(words, candidates): - pivots = ( - index for index in range(1, len(words) - 1) if words[index] in candidates - ) - try: - return next(pivots) - except StopIteration: - raise ValueError("No pivot found") from None - - def _pl_special_verb( # noqa: C901 - self, word: str, count: Optional[Union[str, int]] = None - ) -> Union[str, bool]: - if self.classical_dict["zero"] and str(count).lower() in pl_count_zero: - return False - count = self.get_count(count) - - if count == 1: - return word - - # HANDLE USER-DEFINED VERBS - - value = self.ud_match(word, self.pl_v_user_defined) - if value is not None: - return value - - # HANDLE IRREGULAR PRESENT TENSE (SIMPLE AND COMPOUND) - - try: - words = Words(word) - except IndexError: - return False # word is '' - - if words.first in plverb_irregular_pres: - return f"{plverb_irregular_pres[words.first]}{words[len(words.first) :]}" - - # HANDLE IRREGULAR FUTURE, PRETERITE AND PERFECT TENSES - - if words.first in plverb_irregular_non_pres: - return word - - # HANDLE PRESENT NEGATIONS (SIMPLE AND COMPOUND) - - if words.first.endswith("n't") and words.first[:-3] in plverb_irregular_pres: - return ( - f"{plverb_irregular_pres[words.first[:-3]]}n't" - f"{words[len(words.first) :]}" - ) - - if words.first.endswith("n't"): - return word - - # HANDLE SPECIAL CASES - - mo = PLVERB_SPECIAL_S_RE.search(word) - if mo: - return False - if WHITESPACE.search(word): - return False - - if words.lowered == "quizzes": - return "quiz" - - # HANDLE STANDARD 3RD PERSON (CHOP THE ...(e)s OFF SINGLE WORDS) - - if ( - words.lowered[-4:] in ("ches", "shes", "zzes", "sses") - or words.lowered[-3:] == "xes" - ): - return words[:-2] - - if words.lowered[-3:] == "ies" and len(words) > 3: - return words.lowered[:-3] + "y" - - if ( - words.last.lower() in pl_v_oes_oe - or words.lowered[-4:] in pl_v_oes_oe_endings_size4 - or words.lowered[-5:] in pl_v_oes_oe_endings_size5 - ): - return words[:-1] - - if words.lowered.endswith("oes") and len(words) > 3: - return words.lowered[:-2] - - mo = ENDS_WITH_S.search(words) - if mo: - return mo.group(1) - - # OTHERWISE, A REGULAR VERB (HANDLE ELSEWHERE) - - return False - - def _pl_general_verb( - self, word: str, count: Optional[Union[str, int]] = None - ) -> str: - count = self.get_count(count) - - if count == 1: - return word - - # HANDLE AMBIGUOUS PRESENT TENSES (SIMPLE AND COMPOUND) - - mo = plverb_ambiguous_pres_keys.search(word) - if mo: - return f"{plverb_ambiguous_pres[mo.group(1).lower()]}{mo.group(2)}" - - # HANDLE AMBIGUOUS PRETERITE AND PERFECT TENSES - - mo = plverb_ambiguous_non_pres.search(word) - if mo: - return word - - # OTHERWISE, 1st OR 2ND PERSON IS UNINFLECTED - - return word - - def _pl_special_adjective( - self, word: str, count: Optional[Union[str, int]] = None - ) -> Union[str, bool]: - count = self.get_count(count) - - if count == 1: - return word - - # HANDLE USER-DEFINED ADJECTIVES - - value = self.ud_match(word, self.pl_adj_user_defined) - if value is not None: - return value - - # HANDLE KNOWN CASES - - mo = pl_adj_special_keys.search(word) - if mo: - return pl_adj_special[mo.group(1).lower()] - - # HANDLE POSSESSIVES - - mo = pl_adj_poss_keys.search(word) - if mo: - return pl_adj_poss[mo.group(1).lower()] - - mo = ENDS_WITH_APOSTROPHE_S.search(word) - if mo: - pl = self.plural_noun(mo.group(1)) - trailing_s = "" if pl[-1] == "s" else "s" - return f"{pl}'{trailing_s}" - - # OTHERWISE, NO IDEA - - return False - - # @profile - def _sinoun( # noqa: C901 - self, - word: str, - count: Optional[Union[str, int]] = None, - gender: Optional[str] = None, - ) -> Union[str, bool]: - count = self.get_count(count) - - # DEFAULT TO PLURAL - - if count == 2: - return word - - # SET THE GENDER - - try: - if gender is None: - gender = self.thegender - elif gender not in singular_pronoun_genders: - raise BadGenderError - except (TypeError, IndexError) as err: - raise BadGenderError from err - - # HANDLE USER-DEFINED NOUNS - - value = self.ud_match(word, self.si_sb_user_defined) - if value is not None: - return value - - # HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS - - if word == "": - return word - - if word in si_sb_ois_oi_case: - return word[:-1] - - words = Words(word) - - if words.last.lower() in pl_sb_uninflected_complete: - if len(words.split_) >= 3: - return self._handle_long_compounds(words, count=1) or word - return word - - if word in pl_sb_uninflected_caps: - return word - - for k, v in pl_sb_uninflected_bysize.items(): - if words.lowered[-k:] in v: - return word - - if self.classical_dict["herd"] and words.last.lower() in pl_sb_uninflected_herd: - return word - - if words.last.lower() in pl_sb_C_us_us: - return word if self.classical_dict["ancient"] else False - - # HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.) - - mo = PL_SB_POSTFIX_ADJ_STEMS_RE.search(word) - if mo and mo.group(2) != "": - return f"{self._sinoun(mo.group(1), 1, gender=gender)}{mo.group(2)}" - - with contextlib.suppress(ValueError): - return self._handle_prepositional_phrase( - words.lowered, - functools.partial(self._sinoun, count=1, gender=gender), - ' ', - ) - - with contextlib.suppress(ValueError): - return self._handle_prepositional_phrase( - words.lowered, - functools.partial(self._sinoun, count=1, gender=gender), - '-', - ) - - # HANDLE PRONOUNS - - for k, v in si_pron_acc_keys_bysize.items(): - if words.lowered[-k:] in v: # ends with accusative pronoun - for pk, pv in pl_prep_bysize.items(): - if words.lowered[:pk] in pv: # starts with a prep - if words.lowered.split() == [ - words.lowered[:pk], - words.lowered[-k:], - ]: - # only whitespace in between - return words.lowered[:-k] + get_si_pron( - "acc", words.lowered[-k:], gender - ) - - try: - return get_si_pron("nom", words.lowered, gender) - except KeyError: - pass - - try: - return get_si_pron("acc", words.lowered, gender) - except KeyError: - pass - - # HANDLE ISOLATED IRREGULAR PLURALS - - if words.last in si_sb_irregular_caps: - llen = len(words.last) - return f"{word[:-llen]}{si_sb_irregular_caps[words.last]}" - - if words.last.lower() in si_sb_irregular: - llen = len(words.last.lower()) - return f"{word[:-llen]}{si_sb_irregular[words.last.lower()]}" - - dash_split = words.lowered.split("-") - if (" ".join(dash_split[-2:])).lower() in si_sb_irregular_compound: - llen = len( - " ".join(dash_split[-2:]) - ) # TODO: what if 2 spaces between these words? - return "{}{}".format( - word[:-llen], - si_sb_irregular_compound[(" ".join(dash_split[-2:])).lower()], - ) - - if words.lowered[-5:] == "quies": - return word[:-3] + "y" - - if words.lowered[-7:] == "persons": - return word[:-1] - if words.lowered[-6:] == "people": - return word[:-4] + "rson" - - # HANDLE FAMILIES OF IRREGULAR PLURALS - - if words.lowered[-4:] == "mans": - for k, v in si_sb_U_man_mans_bysize.items(): - if words.lowered[-k:] in v: - return word[:-1] - for k, v in si_sb_U_man_mans_caps_bysize.items(): - if word[-k:] in v: - return word[:-1] - if words.lowered[-3:] == "men": - return word[:-3] + "man" - if words.lowered[-4:] == "mice": - return word[:-4] + "mouse" - if words.lowered[-4:] == "lice": - v = si_sb_U_louse_lice_bysize.get(len(word)) - if v and words.lowered in v: - return word[:-4] + "louse" - if words.lowered[-5:] == "geese": - return word[:-5] + "goose" - if words.lowered[-5:] == "teeth": - return word[:-5] + "tooth" - if words.lowered[-4:] == "feet": - return word[:-4] + "foot" - - if words.lowered == "dice": - return "die" - - # HANDLE UNASSIMILATED IMPORTS - - if words.lowered[-4:] == "ceps": - return word - if words.lowered[-3:] == "zoa": - return word[:-1] + "on" - - for lastlet, d, unass_numend, post in ( - ("s", si_sb_U_ch_chs_bysize, -1, ""), - ("s", si_sb_U_ex_ices_bysize, -4, "ex"), - ("s", si_sb_U_ix_ices_bysize, -4, "ix"), - ("a", si_sb_U_um_a_bysize, -1, "um"), - ("i", si_sb_U_us_i_bysize, -1, "us"), - ("a", si_sb_U_on_a_bysize, -1, "on"), - ("e", si_sb_U_a_ae_bysize, -1, ""), - ): - if words.lowered[-1] == lastlet: # this test to add speed - for k, v in d.items(): - if words.lowered[-k:] in v: - return word[:unass_numend] + post - - # HANDLE INCOMPLETELY ASSIMILATED IMPORTS - - if self.classical_dict["ancient"]: - if words.lowered[-6:] == "trices": - return word[:-3] + "x" - if words.lowered[-4:] in ("eaux", "ieux"): - return word[:-1] - if words.lowered[-5:] in ("ynges", "inges", "anges") and len(word) > 6: - return word[:-3] + "x" - - for lastlet, d, class_numend, post in ( - ("a", si_sb_C_en_ina_bysize, -3, "en"), - ("s", si_sb_C_ex_ices_bysize, -4, "ex"), - ("s", si_sb_C_ix_ices_bysize, -4, "ix"), - ("a", si_sb_C_um_a_bysize, -1, "um"), - ("i", si_sb_C_us_i_bysize, -1, "us"), - ("s", pl_sb_C_us_us_bysize, None, ""), - ("e", si_sb_C_a_ae_bysize, -1, ""), - ("a", si_sb_C_a_ata_bysize, -2, ""), - ("s", si_sb_C_is_ides_bysize, -3, "s"), - ("i", si_sb_C_o_i_bysize, -1, "o"), - ("a", si_sb_C_on_a_bysize, -1, "on"), - ("m", si_sb_C_im_bysize, -2, ""), - ("i", si_sb_C_i_bysize, -1, ""), - ): - if words.lowered[-1] == lastlet: # this test to add speed - for k, v in d.items(): - if words.lowered[-k:] in v: - return word[:class_numend] + post - - # HANDLE PLURLS ENDING IN uses -> use - - if ( - words.lowered[-6:] == "houses" - or word in si_sb_uses_use_case - or words.last.lower() in si_sb_uses_use - ): - return word[:-1] - - # HANDLE PLURLS ENDING IN ies -> ie - - if word in si_sb_ies_ie_case or words.last.lower() in si_sb_ies_ie: - return word[:-1] - - # HANDLE PLURLS ENDING IN oes -> oe - - if ( - words.lowered[-5:] == "shoes" - or word in si_sb_oes_oe_case - or words.last.lower() in si_sb_oes_oe - ): - return word[:-1] - - # HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS - - if word in si_sb_sses_sse_case or words.last.lower() in si_sb_sses_sse: - return word[:-1] - - if words.last.lower() in si_sb_singular_s_complete: - return word[:-2] - - for k, v in si_sb_singular_s_bysize.items(): - if words.lowered[-k:] in v: - return word[:-2] - - if words.lowered[-4:] == "eses" and word[0] == word[0].upper(): - return word[:-2] - - if words.last.lower() in si_sb_z_zes: - return word[:-2] - - if words.last.lower() in si_sb_zzes_zz: - return word[:-2] - - if words.lowered[-4:] == "zzes": - return word[:-3] - - if word in si_sb_ches_che_case or words.last.lower() in si_sb_ches_che: - return word[:-1] - - if words.lowered[-4:] in ("ches", "shes"): - return word[:-2] - - if words.last.lower() in si_sb_xes_xe: - return word[:-1] - - if words.lowered[-3:] == "xes": - return word[:-2] - - # HANDLE ...f -> ...ves - - if word in si_sb_ves_ve_case or words.last.lower() in si_sb_ves_ve: - return word[:-1] - - if words.lowered[-3:] == "ves": - if words.lowered[-5:-3] in ("el", "al", "ol"): - return word[:-3] + "f" - if words.lowered[-5:-3] == "ea" and word[-6:-5] != "d": - return word[:-3] + "f" - if words.lowered[-5:-3] in ("ni", "li", "wi"): - return word[:-3] + "fe" - if words.lowered[-5:-3] == "ar": - return word[:-3] + "f" - - # HANDLE ...y - - if words.lowered[-2:] == "ys": - if len(words.lowered) > 2 and words.lowered[-3] in "aeiou": - return word[:-1] - - if self.classical_dict["names"]: - if words.lowered[-2:] == "ys" and word[0] == word[0].upper(): - return word[:-1] - - if words.lowered[-3:] == "ies": - return word[:-3] + "y" - - # HANDLE ...o - - if words.lowered[-2:] == "os": - if words.last.lower() in si_sb_U_o_os_complete: - return word[:-1] - - for k, v in si_sb_U_o_os_bysize.items(): - if words.lowered[-k:] in v: - return word[:-1] - - if words.lowered[-3:] in ("aos", "eos", "ios", "oos", "uos"): - return word[:-1] - - if words.lowered[-3:] == "oes": - return word[:-2] - - # UNASSIMILATED IMPORTS FINAL RULE - - if word in si_sb_es_is: - return word[:-2] + "is" - - # OTHERWISE JUST REMOVE ...s - - if words.lowered[-1] == "s": - return word[:-1] - - # COULD NOT FIND SINGULAR - - return False - - # ADJECTIVES - - @typechecked - def a(self, text: Word, count: Optional[Union[int, str, Any]] = 1) -> str: - """ - Return the appropriate indefinite article followed by text. - - The indefinite article is either 'a' or 'an'. - - If count is not one, then return count followed by text - instead of 'a' or 'an'. - - Whitespace at the start and end is preserved. - - """ - mo = INDEFINITE_ARTICLE_TEST.search(text) - if mo: - word = mo.group(2) - if not word: - return text - pre = mo.group(1) - post = mo.group(3) - result = self._indef_article(word, count) - return f"{pre}{result}{post}" - return "" - - an = a - - _indef_article_cases = ( - # HANDLE ORDINAL FORMS - (A_ordinal_a, "a"), - (A_ordinal_an, "an"), - # HANDLE SPECIAL CASES - (A_explicit_an, "an"), - (SPECIAL_AN, "an"), - (SPECIAL_A, "a"), - # HANDLE ABBREVIATIONS - (A_abbrev, "an"), - (SPECIAL_ABBREV_AN, "an"), - (SPECIAL_ABBREV_A, "a"), - # HANDLE CONSONANTS - (CONSONANTS, "a"), - # HANDLE SPECIAL VOWEL-FORMS - (ARTICLE_SPECIAL_EU, "a"), - (ARTICLE_SPECIAL_ONCE, "a"), - (ARTICLE_SPECIAL_ONETIME, "a"), - (ARTICLE_SPECIAL_UNIT, "a"), - (ARTICLE_SPECIAL_UBA, "a"), - (ARTICLE_SPECIAL_UKR, "a"), - (A_explicit_a, "a"), - # HANDLE SPECIAL CAPITALS - (SPECIAL_CAPITALS, "a"), - # HANDLE VOWELS - (VOWELS, "an"), - # HANDLE y... - # (BEFORE CERTAIN CONSONANTS IMPLIES (UNNATURALIZED) "i.." SOUND) - (A_y_cons, "an"), - ) - - def _indef_article(self, word: str, count: Union[int, str, Any]) -> str: - mycount = self.get_count(count) - - if mycount != 1: - return f"{count} {word}" - - # HANDLE USER-DEFINED VARIANTS - - value = self.ud_match(word, self.A_a_user_defined) - if value is not None: - return f"{value} {word}" - - matches = ( - f'{article} {word}' - for regexen, article in self._indef_article_cases - if regexen.search(word) - ) - - # OTHERWISE, GUESS "a" - fallback = f'a {word}' - return next(matches, fallback) - - # 2. TRANSLATE ZERO-QUANTIFIED $word TO "no plural($word)" - - @typechecked - def no(self, text: Word, count: Optional[Union[int, str]] = None) -> str: - """ - If count is 0, no, zero or nil, return 'no' followed by the plural - of text. - - If count is one of: - 1, a, an, one, each, every, this, that - return count followed by text. - - Otherwise return count follow by the plural of text. - - In the return value count is always followed by a space. - - Whitespace at the start and end is preserved. - - """ - if count is None and self.persistent_count is not None: - count = self.persistent_count - - if count is None: - count = 0 - mo = PARTITION_WORD.search(text) - if mo: - pre = mo.group(1) - word = mo.group(2) - post = mo.group(3) - else: - pre = "" - word = "" - post = "" - - if str(count).lower() in pl_count_zero: - count = 'no' - return f"{pre}{count} {self.plural(word, count)}{post}" - - # PARTICIPLES - - @typechecked - def present_participle(self, word: Word) -> str: - """ - Return the present participle for word. - - word is the 3rd person singular verb. - - """ - plv = self.plural_verb(word, 2) - ans = plv - - for regexen, repl in PRESENT_PARTICIPLE_REPLACEMENTS: - ans, num = regexen.subn(repl, plv) - if num: - return f"{ans}ing" - return f"{ans}ing" - - # NUMERICAL INFLECTIONS - - @typechecked - def ordinal(self, num: Union[Number, Word]) -> str: - """ - Return the ordinal of num. - - >>> ordinal = engine().ordinal - >>> ordinal(1) - '1st' - >>> ordinal('one') - 'first' - """ - if DIGIT.match(str(num)): - if isinstance(num, (float, int)) and int(num) == num: - n = int(num) - else: - if "." in str(num): - try: - # numbers after decimal, - # so only need last one for ordinal - n = int(str(num)[-1]) - - except ValueError: # ends with '.', so need to use whole string - n = int(str(num)[:-1]) - else: - n = int(num) # type: ignore - try: - post = nth[n % 100] - except KeyError: - post = nth[n % 10] - return f"{num}{post}" - else: - return self._sub_ord(num) - - def millfn(self, ind: int = 0) -> str: - if ind > len(mill) - 1: - raise NumOutOfRangeError - return mill[ind] - - def unitfn(self, units: int, mindex: int = 0) -> str: - return f"{unit[units]}{self.millfn(mindex)}" - - def tenfn(self, tens, units, mindex=0) -> str: - if tens != 1: - tens_part = ten[tens] - if tens and units: - hyphen = "-" - else: - hyphen = "" - unit_part = unit[units] - mill_part = self.millfn(mindex) - return f"{tens_part}{hyphen}{unit_part}{mill_part}" - return f"{teen[units]}{mill[mindex]}" - - def hundfn(self, hundreds: int, tens: int, units: int, mindex: int) -> str: - if hundreds: - andword = f" {self._number_args['andword']} " if tens or units else "" - # use unit not unitfn as simpler - return ( - f"{unit[hundreds]} hundred{andword}" - f"{self.tenfn(tens, units)}{self.millfn(mindex)}, " - ) - if tens or units: - return f"{self.tenfn(tens, units)}{self.millfn(mindex)}, " - return "" - - def group1sub(self, mo: Match) -> str: - units = int(mo.group(1)) - if units == 1: - return f" {self._number_args['one']}, " - elif units: - return f"{unit[units]}, " - else: - return f" {self._number_args['zero']}, " - - def group1bsub(self, mo: Match) -> str: - units = int(mo.group(1)) - if units: - return f"{unit[units]}, " - else: - return f" {self._number_args['zero']}, " - - def group2sub(self, mo: Match) -> str: - tens = int(mo.group(1)) - units = int(mo.group(2)) - if tens: - return f"{self.tenfn(tens, units)}, " - if units: - return f" {self._number_args['zero']} {unit[units]}, " - return f" {self._number_args['zero']} {self._number_args['zero']}, " - - def group3sub(self, mo: Match) -> str: - hundreds = int(mo.group(1)) - tens = int(mo.group(2)) - units = int(mo.group(3)) - if hundreds == 1: - hunword = f" {self._number_args['one']}" - elif hundreds: - hunword = str(unit[hundreds]) - else: - hunword = f" {self._number_args['zero']}" - if tens: - tenword = self.tenfn(tens, units) - elif units: - tenword = f" {self._number_args['zero']} {unit[units]}" - else: - tenword = f" {self._number_args['zero']} {self._number_args['zero']}" - return f"{hunword} {tenword}, " - - def hundsub(self, mo: Match) -> str: - ret = self.hundfn( - int(mo.group(1)), int(mo.group(2)), int(mo.group(3)), self.mill_count - ) - self.mill_count += 1 - return ret - - def tensub(self, mo: Match) -> str: - return f"{self.tenfn(int(mo.group(1)), int(mo.group(2)), self.mill_count)}, " - - def unitsub(self, mo: Match) -> str: - return f"{self.unitfn(int(mo.group(1)), self.mill_count)}, " - - def enword(self, num: str, group: int) -> str: - # import pdb - # pdb.set_trace() - - if group == 1: - num = DIGIT_GROUP.sub(self.group1sub, num) - elif group == 2: - num = TWO_DIGITS.sub(self.group2sub, num) - num = DIGIT_GROUP.sub(self.group1bsub, num, 1) - elif group == 3: - num = THREE_DIGITS.sub(self.group3sub, num) - num = TWO_DIGITS.sub(self.group2sub, num, 1) - num = DIGIT_GROUP.sub(self.group1sub, num, 1) - elif int(num) == 0: - num = self._number_args["zero"] - elif int(num) == 1: - num = self._number_args["one"] - else: - num = num.lstrip().lstrip("0") - self.mill_count = 0 - # surely there's a better way to do the next bit - mo = THREE_DIGITS_WORD.search(num) - while mo: - num = THREE_DIGITS_WORD.sub(self.hundsub, num, 1) - mo = THREE_DIGITS_WORD.search(num) - num = TWO_DIGITS_WORD.sub(self.tensub, num, 1) - num = ONE_DIGIT_WORD.sub(self.unitsub, num, 1) - return num - - @staticmethod - def _sub_ord(val): - new = ordinal_suff.sub(lambda match: ordinal[match.group(1)], val) - return new + "th" * (new == val) - - @classmethod - def _chunk_num(cls, num, decimal, group): - if decimal: - max_split = -1 if group != 0 else 1 - chunks = num.split(".", max_split) - else: - chunks = [num] - return cls._remove_last_blank(chunks) - - @staticmethod - def _remove_last_blank(chunks): - """ - Remove the last item from chunks if it's a blank string. - - Return the resultant chunks and whether the last item was removed. - """ - removed = chunks[-1] == "" - result = chunks[:-1] if removed else chunks - return result, removed - - @staticmethod - def _get_sign(num): - return {'+': 'plus', '-': 'minus'}.get(num.lstrip()[0], '') - - @typechecked - def number_to_words( # noqa: C901 - self, - num: Union[Number, Word], - wantlist: bool = False, - group: int = 0, - comma: Union[Falsish, str] = ",", - andword: str = "and", - zero: str = "zero", - one: str = "one", - decimal: Union[Falsish, str] = "point", - threshold: Optional[int] = None, - ) -> Union[str, List[str]]: - """ - Return a number in words. - - group = 1, 2 or 3 to group numbers before turning into words - comma: define comma - - andword: - word for 'and'. Can be set to ''. - e.g. "one hundred and one" vs "one hundred one" - - zero: word for '0' - one: word for '1' - decimal: word for decimal point - threshold: numbers above threshold not turned into words - - parameters not remembered from last call. Departure from Perl version. - """ - self._number_args = {"andword": andword, "zero": zero, "one": one} - num = str(num) - - # Handle "stylistic" conversions (up to a given threshold)... - if threshold is not None and float(num) > threshold: - spnum = num.split(".", 1) - while comma: - (spnum[0], n) = FOUR_DIGIT_COMMA.subn(r"\1,\2", spnum[0]) - if n == 0: - break - try: - return f"{spnum[0]}.{spnum[1]}" - except IndexError: - return str(spnum[0]) - - if group < 0 or group > 3: - raise BadChunkingOptionError - - sign = self._get_sign(num) - - if num in nth_suff: - num = zero - - myord = num[-2:] in nth_suff - if myord: - num = num[:-2] - - chunks, finalpoint = self._chunk_num(num, decimal, group) - - loopstart = chunks[0] == "" - first: bool | None = not loopstart - - def _handle_chunk(chunk): - nonlocal first - - # remove all non numeric \D - chunk = NON_DIGIT.sub("", chunk) - if chunk == "": - chunk = "0" - - if group == 0 and not first: - chunk = self.enword(chunk, 1) - else: - chunk = self.enword(chunk, group) - - if chunk[-2:] == ", ": - chunk = chunk[:-2] - chunk = WHITESPACES_COMMA.sub(",", chunk) - - if group == 0 and first: - chunk = COMMA_WORD.sub(f" {andword} \\1", chunk) - chunk = WHITESPACES.sub(" ", chunk) - # chunk = re.sub(r"(\A\s|\s\Z)", self.blankfn, chunk) - chunk = chunk.strip() - if first: - first = None - return chunk - - chunks[loopstart:] = map(_handle_chunk, chunks[loopstart:]) - - numchunks = [] - if first != 0: - numchunks = chunks[0].split(f"{comma} ") - - if myord and numchunks: - numchunks[-1] = self._sub_ord(numchunks[-1]) - - for chunk in chunks[1:]: - numchunks.append(decimal) - numchunks.extend(chunk.split(f"{comma} ")) - - if finalpoint: - numchunks.append(decimal) - - if wantlist: - return [sign] * bool(sign) + numchunks - - signout = f"{sign} " if sign else "" - valout = ( - ', '.join(numchunks) - if group - else ''.join(self._render(numchunks, decimal, comma)) - ) - return signout + valout - - @staticmethod - def _render(chunks, decimal, comma): - first_item = chunks.pop(0) - yield first_item - first = decimal is None or not first_item.endswith(decimal) - for nc in chunks: - if nc == decimal: - first = False - elif first: - yield comma - yield f" {nc}" - - @typechecked - def join( - self, - words: Optional[Sequence[Word]], - sep: Optional[str] = None, - sep_spaced: bool = True, - final_sep: Optional[str] = None, - conj: str = "and", - conj_spaced: bool = True, - ) -> str: - """ - Join words into a list. - - e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly' - - options: - conj: replacement for 'and' - sep: separator. default ',', unless ',' is in the list then ';' - final_sep: final separator. default ',', unless ',' is in the list then ';' - conj_spaced: boolean. Should conj have spaces around it - - """ - if not words: - return "" - if len(words) == 1: - return words[0] - - if conj_spaced: - if conj == "": - conj = " " - else: - conj = f" {conj} " - - if len(words) == 2: - return f"{words[0]}{conj}{words[1]}" - - if sep is None: - if "," in "".join(words): - sep = ";" - else: - sep = "," - if final_sep is None: - final_sep = sep - - final_sep = f"{final_sep}{conj}" - - if sep_spaced: - sep += " " - - return f"{sep.join(words[0:-1])}{final_sep}{words[-1]}" diff --git a/setuptools/_vendor/inflect/compat/py38.py b/setuptools/_vendor/inflect/compat/py38.py deleted file mode 100644 index a2d01bd98f..0000000000 --- a/setuptools/_vendor/inflect/compat/py38.py +++ /dev/null @@ -1,7 +0,0 @@ -import sys - - -if sys.version_info > (3, 9): - from typing import Annotated -else: # pragma: no cover - from typing_extensions import Annotated # noqa: F401 diff --git a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/INSTALLER b/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/LICENSE b/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/LICENSE deleted file mode 100644 index 1bb5a44356..0000000000 --- a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/METADATA b/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/METADATA deleted file mode 100644 index fe6ca5ad88..0000000000 --- a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/METADATA +++ /dev/null @@ -1,85 +0,0 @@ -Metadata-Version: 2.1 -Name: jaraco.collections -Version: 5.1.0 -Summary: Collection objects similar to those in stdlib by jaraco -Author-email: "Jason R. Coombs" -Project-URL: Source, https://github.com/jaraco/jaraco.collections -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Requires-Python: >=3.8 -Description-Content-Type: text/x-rst -License-File: LICENSE -Requires-Dist: jaraco.text -Provides-Extra: check -Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'check' -Requires-Dist: pytest-ruff >=0.2.1 ; (sys_platform != "cygwin") and extra == 'check' -Provides-Extra: cover -Requires-Dist: pytest-cov ; extra == 'cover' -Provides-Extra: doc -Requires-Dist: sphinx >=3.5 ; extra == 'doc' -Requires-Dist: jaraco.packaging >=9.3 ; extra == 'doc' -Requires-Dist: rst.linker >=1.9 ; extra == 'doc' -Requires-Dist: furo ; extra == 'doc' -Requires-Dist: sphinx-lint ; extra == 'doc' -Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'doc' -Provides-Extra: enabler -Requires-Dist: pytest-enabler >=2.2 ; extra == 'enabler' -Provides-Extra: test -Requires-Dist: pytest !=8.1.*,>=6 ; extra == 'test' -Provides-Extra: type -Requires-Dist: pytest-mypy ; extra == 'type' - -.. image:: https://img.shields.io/pypi/v/jaraco.collections.svg - :target: https://pypi.org/project/jaraco.collections - -.. image:: https://img.shields.io/pypi/pyversions/jaraco.collections.svg - -.. image:: https://github.com/jaraco/jaraco.collections/actions/workflows/main.yml/badge.svg - :target: https://github.com/jaraco/jaraco.collections/actions?query=workflow%3A%22tests%22 - :alt: tests - -.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json - :target: https://github.com/astral-sh/ruff - :alt: Ruff - -.. image:: https://readthedocs.org/projects/jaracocollections/badge/?version=latest - :target: https://jaracocollections.readthedocs.io/en/latest/?badge=latest - -.. image:: https://img.shields.io/badge/skeleton-2024-informational - :target: https://blog.jaraco.com/skeleton - -.. image:: https://tidelift.com/badges/package/pypi/jaraco.collections - :target: https://tidelift.com/subscription/pkg/pypi-jaraco.collections?utm_source=pypi-jaraco.collections&utm_medium=readme - -Models and classes to supplement the stdlib 'collections' module. - -See the docs, linked above, for descriptions and usage examples. - -Highlights include: - -- RangeMap: A mapping that accepts a range of values for keys. -- Projection: A subset over an existing mapping. -- KeyTransformingDict: Generalized mapping with keys transformed by a function. -- FoldedCaseKeyedDict: A dict whose string keys are case-insensitive. -- BijectiveMap: A map where keys map to values and values back to their keys. -- ItemsAsAttributes: A mapping mix-in exposing items as attributes. -- IdentityOverrideMap: A map whose keys map by default to themselves unless overridden. -- FrozenDict: A hashable, immutable map. -- Enumeration: An object whose keys are enumerated. -- Everything: A container that contains all things. -- Least, Greatest: Objects that are always less than or greater than any other. -- pop_all: Return all items from the mutable sequence and remove them from that sequence. -- DictStack: A stack of dicts, great for sharing scopes. -- WeightedLookup: A specialized RangeMap for selecting an item by weights. - -For Enterprise -============== - -Available as part of the Tidelift Subscription. - -This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. - -`Learn more `_. diff --git a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/RECORD b/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/RECORD deleted file mode 100644 index 48b957ec88..0000000000 --- a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/RECORD +++ /dev/null @@ -1,10 +0,0 @@ -jaraco.collections-5.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -jaraco.collections-5.1.0.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -jaraco.collections-5.1.0.dist-info/METADATA,sha256=IMUaliNsA5X1Ox9MXUWOagch5R4Wwb_3M7erp29dBtg,3933 -jaraco.collections-5.1.0.dist-info/RECORD,, -jaraco.collections-5.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -jaraco.collections-5.1.0.dist-info/WHEEL,sha256=Mdi9PDNwEZptOjTlUcAth7XJDFtKrHYaQMPulZeBCiQ,91 -jaraco.collections-5.1.0.dist-info/top_level.txt,sha256=0JnN3LfXH4LIRfXL-QFOGCJzQWZO3ELx4R1d_louoQM,7 -jaraco/collections/__init__.py,sha256=Pc1-SqjWm81ad1P0-GttpkwO_LWlnaY6gUq8gcKh2v0,26640 -jaraco/collections/__pycache__/__init__.cpython-312.pyc,, -jaraco/collections/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/INSTALLER b/setuptools/_vendor/jaraco.context-5.3.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/LICENSE b/setuptools/_vendor/jaraco.context-5.3.0.dist-info/LICENSE deleted file mode 100644 index 1bb5a44356..0000000000 --- a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/RECORD b/setuptools/_vendor/jaraco.context-5.3.0.dist-info/RECORD deleted file mode 100644 index 09d191f214..0000000000 --- a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/RECORD +++ /dev/null @@ -1,8 +0,0 @@ -jaraco.context-5.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -jaraco.context-5.3.0.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -jaraco.context-5.3.0.dist-info/METADATA,sha256=xDtguJej0tN9iEXCUvxEJh2a7xceIRVBEakBLSr__tY,4020 -jaraco.context-5.3.0.dist-info/RECORD,, -jaraco.context-5.3.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 -jaraco.context-5.3.0.dist-info/top_level.txt,sha256=0JnN3LfXH4LIRfXL-QFOGCJzQWZO3ELx4R1d_louoQM,7 -jaraco/__pycache__/context.cpython-312.pyc,, -jaraco/context.py,sha256=REoLIxDkO5MfEYowt_WoupNCRoxBS5v7YX2PbW8lIcs,9552 diff --git a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/INSTALLER b/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/LICENSE b/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/LICENSE deleted file mode 100644 index 1bb5a44356..0000000000 --- a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/RECORD b/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/RECORD deleted file mode 100644 index ef3bc21e92..0000000000 --- a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/RECORD +++ /dev/null @@ -1,10 +0,0 @@ -jaraco.functools-4.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -jaraco.functools-4.0.1.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -jaraco.functools-4.0.1.dist-info/METADATA,sha256=i4aUaQDX-jjdEQK5wevhegyx8JyLfin2HyvaSk3FHso,2891 -jaraco.functools-4.0.1.dist-info/RECORD,, -jaraco.functools-4.0.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 -jaraco.functools-4.0.1.dist-info/top_level.txt,sha256=0JnN3LfXH4LIRfXL-QFOGCJzQWZO3ELx4R1d_louoQM,7 -jaraco/functools/__init__.py,sha256=hEAJaS2uSZRuF_JY4CxCHIYh79ZpxaPp9OiHyr9EJ1w,16642 -jaraco/functools/__init__.pyi,sha256=gk3dsgHzo5F_U74HzAvpNivFAPCkPJ1b2-yCd62dfnw,3878 -jaraco/functools/__pycache__/__init__.cpython-312.pyc,, -jaraco/functools/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/WHEEL b/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/WHEEL deleted file mode 100644 index bab98d6758..0000000000 --- a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.43.0) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/INSTALLER b/setuptools/_vendor/jaraco.text-3.12.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/LICENSE b/setuptools/_vendor/jaraco.text-3.12.1.dist-info/LICENSE deleted file mode 100644 index 1bb5a44356..0000000000 --- a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/RECORD b/setuptools/_vendor/jaraco.text-3.12.1.dist-info/RECORD deleted file mode 100644 index 19e2d8402a..0000000000 --- a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/RECORD +++ /dev/null @@ -1,20 +0,0 @@ -jaraco.text-3.12.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -jaraco.text-3.12.1.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -jaraco.text-3.12.1.dist-info/METADATA,sha256=AzWdm6ViMfDOPoQMfLWn2zgBQSGJScyqeN29TcuWXVI,3658 -jaraco.text-3.12.1.dist-info/RECORD,, -jaraco.text-3.12.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -jaraco.text-3.12.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 -jaraco.text-3.12.1.dist-info/top_level.txt,sha256=0JnN3LfXH4LIRfXL-QFOGCJzQWZO3ELx4R1d_louoQM,7 -jaraco/text/Lorem ipsum.txt,sha256=N_7c_79zxOufBY9HZ3yzMgOkNv-TkOTTio4BydrSjgs,1335 -jaraco/text/__init__.py,sha256=Y2YUqXR_orUoDaY4SkPRe6ZZhb5HUHB_Ah9RCNsVyho,16250 -jaraco/text/__pycache__/__init__.cpython-312.pyc,, -jaraco/text/__pycache__/layouts.cpython-312.pyc,, -jaraco/text/__pycache__/show-newlines.cpython-312.pyc,, -jaraco/text/__pycache__/strip-prefix.cpython-312.pyc,, -jaraco/text/__pycache__/to-dvorak.cpython-312.pyc,, -jaraco/text/__pycache__/to-qwerty.cpython-312.pyc,, -jaraco/text/layouts.py,sha256=HTC8aSTLZ7uXipyOXapRMC158juecjK6RVwitfmZ9_w,643 -jaraco/text/show-newlines.py,sha256=WGQa65e8lyhb92LUOLqVn6KaCtoeVgVws6WtSRmLk6w,904 -jaraco/text/strip-prefix.py,sha256=NfVXV8JVNo6nqcuYASfMV7_y4Eo8zMQqlCOGvAnRIVw,412 -jaraco/text/to-dvorak.py,sha256=1SNcbSsvISpXXg-LnybIHHY-RUFOQr36zcHkY1pWFqw,119 -jaraco/text/to-qwerty.py,sha256=s4UMQUnPwFn_dB5uZC27BurHOQcYondBfzIpVL5pEzw,119 diff --git a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/WHEEL b/setuptools/_vendor/jaraco.text-3.12.1.dist-info/WHEEL deleted file mode 100644 index bab98d6758..0000000000 --- a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.43.0) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/top_level.txt b/setuptools/_vendor/jaraco.text-3.12.1.dist-info/top_level.txt deleted file mode 100644 index f6205a5f19..0000000000 --- a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jaraco diff --git a/setuptools/_vendor/jaraco.text-4.0.0.dist-info/INSTALLER b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/inflect-7.3.1.dist-info/LICENSE b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/LICENSE similarity index 100% rename from setuptools/_vendor/inflect-7.3.1.dist-info/LICENSE rename to setuptools/_vendor/jaraco.text-4.0.0.dist-info/LICENSE diff --git a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/METADATA b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/METADATA similarity index 93% rename from setuptools/_vendor/jaraco.text-3.12.1.dist-info/METADATA rename to setuptools/_vendor/jaraco.text-4.0.0.dist-info/METADATA index 0258a380f4..797b9da733 100644 --- a/setuptools/_vendor/jaraco.text-3.12.1.dist-info/METADATA +++ b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/METADATA @@ -1,9 +1,9 @@ Metadata-Version: 2.1 Name: jaraco.text -Version: 3.12.1 +Version: 4.0.0 Summary: Module for text manipulation Author-email: "Jason R. Coombs" -Project-URL: Homepage, https://github.com/jaraco/jaraco.text +Project-URL: Source, https://github.com/jaraco/jaraco.text Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License @@ -15,7 +15,6 @@ License-File: LICENSE Requires-Dist: jaraco.functools Requires-Dist: jaraco.context >=4.1 Requires-Dist: autocommand -Requires-Dist: inflect Requires-Dist: more-itertools Requires-Dist: importlib-resources ; python_version < "3.9" Provides-Extra: doc @@ -25,14 +24,16 @@ Requires-Dist: rst.linker >=1.9 ; extra == 'doc' Requires-Dist: furo ; extra == 'doc' Requires-Dist: sphinx-lint ; extra == 'doc' Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'doc' +Provides-Extra: inflect +Requires-Dist: inflect ; extra == 'inflect' Provides-Extra: test Requires-Dist: pytest !=8.1.*,>=6 ; extra == 'test' Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'test' Requires-Dist: pytest-cov ; extra == 'test' Requires-Dist: pytest-mypy ; extra == 'test' Requires-Dist: pytest-enabler >=2.2 ; extra == 'test' -Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'test' Requires-Dist: pathlib2 ; (python_version < "3.10") and extra == 'test' +Requires-Dist: pytest-ruff >=0.2.1 ; (sys_platform != "cygwin") and extra == 'test' .. image:: https://img.shields.io/pypi/v/jaraco.text.svg :target: https://pypi.org/project/jaraco.text diff --git a/setuptools/_vendor/jaraco.text-4.0.0.dist-info/RECORD b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/RECORD new file mode 100644 index 0000000000..af65a9daa8 --- /dev/null +++ b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/RECORD @@ -0,0 +1,14 @@ +jaraco.text-4.0.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +jaraco.text-4.0.0.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 +jaraco.text-4.0.0.dist-info/METADATA,sha256=XC_QkBLJVPE5sQYkl41TNaZUw0AUzQb29GbKaD28nFY,3731 +jaraco.text-4.0.0.dist-info/RECORD,, +jaraco.text-4.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jaraco.text-4.0.0.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91 +jaraco.text-4.0.0.dist-info/top_level.txt,sha256=0JnN3LfXH4LIRfXL-QFOGCJzQWZO3ELx4R1d_louoQM,7 +jaraco/text/Lorem ipsum.txt,sha256=N_7c_79zxOufBY9HZ3yzMgOkNv-TkOTTio4BydrSjgs,1335 +jaraco/text/__init__.py,sha256=lazNYXo8IhOR1bFigLAyGiiQao6jtO3KGWh8bZZPx3c,16762 +jaraco/text/layouts.py,sha256=HTC8aSTLZ7uXipyOXapRMC158juecjK6RVwitfmZ9_w,643 +jaraco/text/show-newlines.py,sha256=jT0vp4gLhG20hX2lTB-zKo_i3NgKzj79yRAdz4eMzIM,903 +jaraco/text/strip-prefix.py,sha256=NfVXV8JVNo6nqcuYASfMV7_y4Eo8zMQqlCOGvAnRIVw,412 +jaraco/text/to-dvorak.py,sha256=36nPPsiifwv6RfpAb--3zpgbIx8ohnnI1aR29IJTO9s,118 +jaraco/text/to-qwerty.py,sha256=IQoFY9v7vLTEybcput4KBYm_5GR35pmtgZ_xyrmdTgI,118 diff --git a/setuptools/_vendor/more_itertools-10.3.0.dist-info/REQUESTED b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/more_itertools-10.3.0.dist-info/REQUESTED rename to setuptools/_vendor/jaraco.text-4.0.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/inflect-7.3.1.dist-info/WHEEL b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/WHEEL similarity index 65% rename from setuptools/_vendor/inflect-7.3.1.dist-info/WHEEL rename to setuptools/_vendor/jaraco.text-4.0.0.dist-info/WHEEL index 564c6724e4..ecaf39f3c3 100644 --- a/setuptools/_vendor/inflect-7.3.1.dist-info/WHEEL +++ b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: setuptools (70.2.0) +Generator: setuptools (71.1.0) Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/top_level.txt b/setuptools/_vendor/jaraco.text-4.0.0.dist-info/top_level.txt similarity index 100% rename from setuptools/_vendor/jaraco.collections-5.1.0.dist-info/top_level.txt rename to setuptools/_vendor/jaraco.text-4.0.0.dist-info/top_level.txt diff --git a/setuptools/_vendor/jaraco/collections/__init__.py b/setuptools/_vendor/jaraco/collections/__init__.py deleted file mode 100644 index 0d501cf9e9..0000000000 --- a/setuptools/_vendor/jaraco/collections/__init__.py +++ /dev/null @@ -1,1091 +0,0 @@ -from __future__ import annotations - -import collections.abc -import copy -import functools -import itertools -import operator -import random -import re -from collections.abc import Container, Iterable, Mapping -from typing import TYPE_CHECKING, Any, Callable, Dict, TypeVar, Union, overload - -import jaraco.text - -if TYPE_CHECKING: - from _operator import _SupportsComparison - - from _typeshed import SupportsKeysAndGetItem - from typing_extensions import Self - - _RangeMapKT = TypeVar('_RangeMapKT', bound=_SupportsComparison) -else: - # _SupportsComparison doesn't exist at runtime, - # but _RangeMapKT is used in RangeMap's superclass' type parameters - _RangeMapKT = TypeVar('_RangeMapKT') - -_T = TypeVar('_T') -_VT = TypeVar('_VT') - -_Matchable = Union[Callable, Container, Iterable, re.Pattern] - - -def _dispatch(obj: _Matchable) -> Callable: - # can't rely on singledispatch for Union[Container, Iterable] - # due to ambiguity - # (https://peps.python.org/pep-0443/#abstract-base-classes). - if isinstance(obj, re.Pattern): - return obj.fullmatch - # mypy issue: https://github.com/python/mypy/issues/11071 - if not isinstance(obj, Callable): # type: ignore[arg-type] - if not isinstance(obj, Container): - obj = set(obj) # type: ignore[arg-type] - obj = obj.__contains__ - return obj # type: ignore[return-value] - - -class Projection(collections.abc.Mapping): - """ - Project a set of keys over a mapping - - >>> sample = {'a': 1, 'b': 2, 'c': 3} - >>> prj = Projection(['a', 'c', 'd'], sample) - >>> dict(prj) - {'a': 1, 'c': 3} - - Projection also accepts an iterable or callable or pattern. - - >>> iter_prj = Projection(iter('acd'), sample) - >>> call_prj = Projection(lambda k: ord(k) in (97, 99, 100), sample) - >>> pat_prj = Projection(re.compile(r'[acd]'), sample) - >>> prj == iter_prj == call_prj == pat_prj - True - - Keys should only appear if they were specified and exist in the space. - Order is retained. - - >>> list(prj) - ['a', 'c'] - - Attempting to access a key not in the projection - results in a KeyError. - - >>> prj['b'] - Traceback (most recent call last): - ... - KeyError: 'b' - - Use the projection to update another dict. - - >>> target = {'a': 2, 'b': 2} - >>> target.update(prj) - >>> target - {'a': 1, 'b': 2, 'c': 3} - - Projection keeps a reference to the original dict, so - modifying the original dict may modify the Projection. - - >>> del sample['a'] - >>> dict(prj) - {'c': 3} - """ - - def __init__(self, keys: _Matchable, space: Mapping): - self._match = _dispatch(keys) - self._space = space - - def __getitem__(self, key): - if not self._match(key): - raise KeyError(key) - return self._space[key] - - def _keys_resolved(self): - return filter(self._match, self._space) - - def __iter__(self): - return self._keys_resolved() - - def __len__(self): - return len(tuple(self._keys_resolved())) - - -class Mask(Projection): - """ - The inverse of a :class:`Projection`, masking out keys. - - >>> sample = {'a': 1, 'b': 2, 'c': 3} - >>> msk = Mask(['a', 'c', 'd'], sample) - >>> dict(msk) - {'b': 2} - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - # self._match = compose(operator.not_, self._match) - self._match = lambda key, orig=self._match: not orig(key) - - -def dict_map(function, dictionary): - """ - Return a new dict with function applied to values of dictionary. - - >>> dict_map(lambda x: x+1, dict(a=1, b=2)) - {'a': 2, 'b': 3} - """ - return dict((key, function(value)) for key, value in dictionary.items()) - - -class RangeMap(Dict[_RangeMapKT, _VT]): - """ - A dictionary-like object that uses the keys as bounds for a range. - Inclusion of the value for that range is determined by the - key_match_comparator, which defaults to less-than-or-equal. - A value is returned for a key if it is the first key that matches in - the sorted list of keys. - - One may supply keyword parameters to be passed to the sort function used - to sort keys (i.e. key, reverse) as sort_params. - - Create a map that maps 1-3 -> 'a', 4-6 -> 'b' - - >>> r = RangeMap({3: 'a', 6: 'b'}) # boy, that was easy - >>> r[1], r[2], r[3], r[4], r[5], r[6] - ('a', 'a', 'a', 'b', 'b', 'b') - - Even float values should work so long as the comparison operator - supports it. - - >>> r[4.5] - 'b' - - Notice that the way rangemap is defined, it must be open-ended - on one side. - - >>> r[0] - 'a' - >>> r[-1] - 'a' - - One can close the open-end of the RangeMap by using undefined_value - - >>> r = RangeMap({0: RangeMap.undefined_value, 3: 'a', 6: 'b'}) - >>> r[0] - Traceback (most recent call last): - ... - KeyError: 0 - - One can get the first or last elements in the range by using RangeMap.Item - - >>> last_item = RangeMap.Item(-1) - >>> r[last_item] - 'b' - - .last_item is a shortcut for Item(-1) - - >>> r[RangeMap.last_item] - 'b' - - Sometimes it's useful to find the bounds for a RangeMap - - >>> r.bounds() - (0, 6) - - RangeMap supports .get(key, default) - - >>> r.get(0, 'not found') - 'not found' - - >>> r.get(7, 'not found') - 'not found' - - One often wishes to define the ranges by their left-most values, - which requires use of sort params and a key_match_comparator. - - >>> r = RangeMap({1: 'a', 4: 'b'}, - ... sort_params=dict(reverse=True), - ... key_match_comparator=operator.ge) - >>> r[1], r[2], r[3], r[4], r[5], r[6] - ('a', 'a', 'a', 'b', 'b', 'b') - - That wasn't nearly as easy as before, so an alternate constructor - is provided: - - >>> r = RangeMap.left({1: 'a', 4: 'b', 7: RangeMap.undefined_value}) - >>> r[1], r[2], r[3], r[4], r[5], r[6] - ('a', 'a', 'a', 'b', 'b', 'b') - - """ - - def __init__( - self, - source: ( - SupportsKeysAndGetItem[_RangeMapKT, _VT] | Iterable[tuple[_RangeMapKT, _VT]] - ), - sort_params: Mapping[str, Any] = {}, - key_match_comparator: Callable[[_RangeMapKT, _RangeMapKT], bool] = operator.le, - ): - dict.__init__(self, source) - self.sort_params = sort_params - self.match = key_match_comparator - - @classmethod - def left( - cls, - source: ( - SupportsKeysAndGetItem[_RangeMapKT, _VT] | Iterable[tuple[_RangeMapKT, _VT]] - ), - ) -> Self: - return cls( - source, sort_params=dict(reverse=True), key_match_comparator=operator.ge - ) - - def __getitem__(self, item: _RangeMapKT) -> _VT: - sorted_keys = sorted(self.keys(), **self.sort_params) - if isinstance(item, RangeMap.Item): - result = self.__getitem__(sorted_keys[item]) - else: - key = self._find_first_match_(sorted_keys, item) - result = dict.__getitem__(self, key) - if result is RangeMap.undefined_value: - raise KeyError(key) - return result - - @overload # type: ignore[override] # Signature simplified over dict and Mapping - def get(self, key: _RangeMapKT, default: _T) -> _VT | _T: ... - @overload - def get(self, key: _RangeMapKT, default: None = None) -> _VT | None: ... - def get(self, key: _RangeMapKT, default: _T | None = None) -> _VT | _T | None: - """ - Return the value for key if key is in the dictionary, else default. - If default is not given, it defaults to None, so that this method - never raises a KeyError. - """ - try: - return self[key] - except KeyError: - return default - - def _find_first_match_( - self, keys: Iterable[_RangeMapKT], item: _RangeMapKT - ) -> _RangeMapKT: - is_match = functools.partial(self.match, item) - matches = filter(is_match, keys) - try: - return next(matches) - except StopIteration: - raise KeyError(item) from None - - def bounds(self) -> tuple[_RangeMapKT, _RangeMapKT]: - sorted_keys = sorted(self.keys(), **self.sort_params) - return (sorted_keys[RangeMap.first_item], sorted_keys[RangeMap.last_item]) - - # some special values for the RangeMap - undefined_value = type('RangeValueUndefined', (), {})() - - class Item(int): - """RangeMap Item""" - - first_item = Item(0) - last_item = Item(-1) - - -def __identity(x): - return x - - -def sorted_items(d, key=__identity, reverse=False): - """ - Return the items of the dictionary sorted by the keys. - - >>> sample = dict(foo=20, bar=42, baz=10) - >>> tuple(sorted_items(sample)) - (('bar', 42), ('baz', 10), ('foo', 20)) - - >>> reverse_string = lambda s: ''.join(reversed(s)) - >>> tuple(sorted_items(sample, key=reverse_string)) - (('foo', 20), ('bar', 42), ('baz', 10)) - - >>> tuple(sorted_items(sample, reverse=True)) - (('foo', 20), ('baz', 10), ('bar', 42)) - """ - - # wrap the key func so it operates on the first element of each item - def pairkey_key(item): - return key(item[0]) - - return sorted(d.items(), key=pairkey_key, reverse=reverse) - - -class KeyTransformingDict(dict): - """ - A dict subclass that transforms the keys before they're used. - Subclasses may override the default transform_key to customize behavior. - """ - - @staticmethod - def transform_key(key): # pragma: nocover - return key - - def __init__(self, *args, **kargs): - super().__init__() - # build a dictionary using the default constructs - d = dict(*args, **kargs) - # build this dictionary using transformed keys. - for item in d.items(): - self.__setitem__(*item) - - def __setitem__(self, key, val): - key = self.transform_key(key) - super().__setitem__(key, val) - - def __getitem__(self, key): - key = self.transform_key(key) - return super().__getitem__(key) - - def __contains__(self, key): - key = self.transform_key(key) - return super().__contains__(key) - - def __delitem__(self, key): - key = self.transform_key(key) - return super().__delitem__(key) - - def get(self, key, *args, **kwargs): - key = self.transform_key(key) - return super().get(key, *args, **kwargs) - - def setdefault(self, key, *args, **kwargs): - key = self.transform_key(key) - return super().setdefault(key, *args, **kwargs) - - def pop(self, key, *args, **kwargs): - key = self.transform_key(key) - return super().pop(key, *args, **kwargs) - - def matching_key_for(self, key): - """ - Given a key, return the actual key stored in self that matches. - Raise KeyError if the key isn't found. - """ - try: - return next(e_key for e_key in self.keys() if e_key == key) - except StopIteration as err: - raise KeyError(key) from err - - -class FoldedCaseKeyedDict(KeyTransformingDict): - """ - A case-insensitive dictionary (keys are compared as insensitive - if they are strings). - - >>> d = FoldedCaseKeyedDict() - >>> d['heLlo'] = 'world' - >>> list(d.keys()) == ['heLlo'] - True - >>> list(d.values()) == ['world'] - True - >>> d['hello'] == 'world' - True - >>> 'hello' in d - True - >>> 'HELLO' in d - True - >>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'}))) - {'heLlo': 'world'} - >>> d = FoldedCaseKeyedDict({'heLlo': 'world'}) - >>> print(d['hello']) - world - >>> print(d['Hello']) - world - >>> list(d.keys()) - ['heLlo'] - >>> d = FoldedCaseKeyedDict({'heLlo': 'world', 'Hello': 'world'}) - >>> list(d.values()) - ['world'] - >>> key, = d.keys() - >>> key in ['heLlo', 'Hello'] - True - >>> del d['HELLO'] - >>> d - {} - - get should work - - >>> d['Sumthin'] = 'else' - >>> d.get('SUMTHIN') - 'else' - >>> d.get('OTHER', 'thing') - 'thing' - >>> del d['sumthin'] - - setdefault should also work - - >>> d['This'] = 'that' - >>> print(d.setdefault('this', 'other')) - that - >>> len(d) - 1 - >>> print(d['this']) - that - >>> print(d.setdefault('That', 'other')) - other - >>> print(d['THAT']) - other - - Make it pop! - - >>> print(d.pop('THAT')) - other - - To retrieve the key in its originally-supplied form, use matching_key_for - - >>> print(d.matching_key_for('this')) - This - - >>> d.matching_key_for('missing') - Traceback (most recent call last): - ... - KeyError: 'missing' - """ - - @staticmethod - def transform_key(key): - return jaraco.text.FoldedCase(key) - - -class DictAdapter: - """ - Provide a getitem interface for attributes of an object. - - Let's say you want to get at the string.lowercase property in a formatted - string. It's easy with DictAdapter. - - >>> import string - >>> print("lowercase is %(ascii_lowercase)s" % DictAdapter(string)) - lowercase is abcdefghijklmnopqrstuvwxyz - """ - - def __init__(self, wrapped_ob): - self.object = wrapped_ob - - def __getitem__(self, name): - return getattr(self.object, name) - - -class ItemsAsAttributes: - """ - Mix-in class to enable a mapping object to provide items as - attributes. - - >>> C = type('C', (dict, ItemsAsAttributes), dict()) - >>> i = C() - >>> i['foo'] = 'bar' - >>> i.foo - 'bar' - - Natural attribute access takes precedence - - >>> i.foo = 'henry' - >>> i.foo - 'henry' - - But as you might expect, the mapping functionality is preserved. - - >>> i['foo'] - 'bar' - - A normal attribute error should be raised if an attribute is - requested that doesn't exist. - - >>> i.missing - Traceback (most recent call last): - ... - AttributeError: 'C' object has no attribute 'missing' - - It also works on dicts that customize __getitem__ - - >>> missing_func = lambda self, key: 'missing item' - >>> C = type( - ... 'C', - ... (dict, ItemsAsAttributes), - ... dict(__missing__ = missing_func), - ... ) - >>> i = C() - >>> i.missing - 'missing item' - >>> i.foo - 'missing item' - """ - - def __getattr__(self, key): - try: - return getattr(super(), key) - except AttributeError as e: - # attempt to get the value from the mapping (return self[key]) - # but be careful not to lose the original exception context. - noval = object() - - def _safe_getitem(cont, key, missing_result): - try: - return cont[key] - except KeyError: - return missing_result - - result = _safe_getitem(self, key, noval) - if result is not noval: - return result - # raise the original exception, but use the original class - # name, not 'super'. - (message,) = e.args - message = message.replace('super', self.__class__.__name__, 1) - e.args = (message,) - raise - - -def invert_map(map): - """ - Given a dictionary, return another dictionary with keys and values - switched. If any of the values resolve to the same key, raises - a ValueError. - - >>> numbers = dict(a=1, b=2, c=3) - >>> letters = invert_map(numbers) - >>> letters[1] - 'a' - >>> numbers['d'] = 3 - >>> invert_map(numbers) - Traceback (most recent call last): - ... - ValueError: Key conflict in inverted mapping - """ - res = dict((v, k) for k, v in map.items()) - if not len(res) == len(map): - raise ValueError('Key conflict in inverted mapping') - return res - - -class IdentityOverrideMap(dict): - """ - A dictionary that by default maps each key to itself, but otherwise - acts like a normal dictionary. - - >>> d = IdentityOverrideMap() - >>> d[42] - 42 - >>> d['speed'] = 'speedo' - >>> print(d['speed']) - speedo - """ - - def __missing__(self, key): - return key - - -class DictStack(list, collections.abc.MutableMapping): - """ - A stack of dictionaries that behaves as a view on those dictionaries, - giving preference to the last. - - >>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)]) - >>> stack['a'] - 2 - >>> stack['b'] - 2 - >>> stack['c'] - 2 - >>> len(stack) - 3 - >>> stack.push(dict(a=3)) - >>> stack['a'] - 3 - >>> stack['a'] = 4 - >>> set(stack.keys()) == set(['a', 'b', 'c']) - True - >>> set(stack.items()) == set([('a', 4), ('b', 2), ('c', 2)]) - True - >>> dict(**stack) == dict(stack) == dict(a=4, c=2, b=2) - True - >>> d = stack.pop() - >>> stack['a'] - 2 - >>> d = stack.pop() - >>> stack['a'] - 1 - >>> stack.get('b', None) - >>> 'c' in stack - True - >>> del stack['c'] - >>> dict(stack) - {'a': 1} - """ - - def __iter__(self): - dicts = list.__iter__(self) - return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts))) - - def __getitem__(self, key): - for scope in reversed(tuple(list.__iter__(self))): - if key in scope: - return scope[key] - raise KeyError(key) - - push = list.append - - def __contains__(self, other): - return collections.abc.Mapping.__contains__(self, other) - - def __len__(self): - return len(list(iter(self))) - - def __setitem__(self, key, item): - last = list.__getitem__(self, -1) - return last.__setitem__(key, item) - - def __delitem__(self, key): - last = list.__getitem__(self, -1) - return last.__delitem__(key) - - # workaround for mypy confusion - def pop(self, *args, **kwargs): - return list.pop(self, *args, **kwargs) - - -class BijectiveMap(dict): - """ - A Bijective Map (two-way mapping). - - Implemented as a simple dictionary of 2x the size, mapping values back - to keys. - - Note, this implementation may be incomplete. If there's not a test for - your use case below, it's likely to fail, so please test and send pull - requests or patches for additional functionality needed. - - - >>> m = BijectiveMap() - >>> m['a'] = 'b' - >>> m == {'a': 'b', 'b': 'a'} - True - >>> print(m['b']) - a - - >>> m['c'] = 'd' - >>> len(m) - 2 - - Some weird things happen if you map an item to itself or overwrite a - single key of a pair, so it's disallowed. - - >>> m['e'] = 'e' - Traceback (most recent call last): - ValueError: Key cannot map to itself - - >>> m['d'] = 'e' - Traceback (most recent call last): - ValueError: Key/Value pairs may not overlap - - >>> m['e'] = 'd' - Traceback (most recent call last): - ValueError: Key/Value pairs may not overlap - - >>> print(m.pop('d')) - c - - >>> 'c' in m - False - - >>> m = BijectiveMap(dict(a='b')) - >>> len(m) - 1 - >>> print(m['b']) - a - - >>> m = BijectiveMap() - >>> m.update(a='b') - >>> m['b'] - 'a' - - >>> del m['b'] - >>> len(m) - 0 - >>> 'a' in m - False - """ - - def __init__(self, *args, **kwargs): - super().__init__() - self.update(*args, **kwargs) - - def __setitem__(self, item, value): - if item == value: - raise ValueError("Key cannot map to itself") - overlap = ( - item in self - and self[item] != value - or value in self - and self[value] != item - ) - if overlap: - raise ValueError("Key/Value pairs may not overlap") - super().__setitem__(item, value) - super().__setitem__(value, item) - - def __delitem__(self, item): - self.pop(item) - - def __len__(self): - return super().__len__() // 2 - - def pop(self, key, *args, **kwargs): - mirror = self[key] - super().__delitem__(mirror) - return super().pop(key, *args, **kwargs) - - def update(self, *args, **kwargs): - # build a dictionary using the default constructs - d = dict(*args, **kwargs) - # build this dictionary using transformed keys. - for item in d.items(): - self.__setitem__(*item) - - -class FrozenDict(collections.abc.Mapping, collections.abc.Hashable): - """ - An immutable mapping. - - >>> a = FrozenDict(a=1, b=2) - >>> b = FrozenDict(a=1, b=2) - >>> a == b - True - - >>> a == dict(a=1, b=2) - True - >>> dict(a=1, b=2) == a - True - >>> 'a' in a - True - >>> type(hash(a)) is type(0) - True - >>> set(iter(a)) == {'a', 'b'} - True - >>> len(a) - 2 - >>> a['a'] == a.get('a') == 1 - True - - >>> a['c'] = 3 - Traceback (most recent call last): - ... - TypeError: 'FrozenDict' object does not support item assignment - - >>> a.update(y=3) - Traceback (most recent call last): - ... - AttributeError: 'FrozenDict' object has no attribute 'update' - - Copies should compare equal - - >>> copy.copy(a) == a - True - - Copies should be the same type - - >>> isinstance(copy.copy(a), FrozenDict) - True - - FrozenDict supplies .copy(), even though - collections.abc.Mapping doesn't demand it. - - >>> a.copy() == a - True - >>> a.copy() is not a - True - """ - - __slots__ = ['__data'] - - def __new__(cls, *args, **kwargs): - self = super().__new__(cls) - self.__data = dict(*args, **kwargs) - return self - - # Container - def __contains__(self, key): - return key in self.__data - - # Hashable - def __hash__(self): - return hash(tuple(sorted(self.__data.items()))) - - # Mapping - def __iter__(self): - return iter(self.__data) - - def __len__(self): - return len(self.__data) - - def __getitem__(self, key): - return self.__data[key] - - # override get for efficiency provided by dict - def get(self, *args, **kwargs): - return self.__data.get(*args, **kwargs) - - # override eq to recognize underlying implementation - def __eq__(self, other): - if isinstance(other, FrozenDict): - other = other.__data - return self.__data.__eq__(other) - - def copy(self): - "Return a shallow copy of self" - return copy.copy(self) - - -class Enumeration(ItemsAsAttributes, BijectiveMap): - """ - A convenient way to provide enumerated values - - >>> e = Enumeration('a b c') - >>> e['a'] - 0 - - >>> e.a - 0 - - >>> e[1] - 'b' - - >>> set(e.names) == set('abc') - True - - >>> set(e.codes) == set(range(3)) - True - - >>> e.get('d') is None - True - - Codes need not start with 0 - - >>> e = Enumeration('a b c', range(1, 4)) - >>> e['a'] - 1 - - >>> e[3] - 'c' - """ - - def __init__(self, names, codes=None): - if isinstance(names, str): - names = names.split() - if codes is None: - codes = itertools.count() - super().__init__(zip(names, codes)) - - @property - def names(self): - return (key for key in self if isinstance(key, str)) - - @property - def codes(self): - return (self[name] for name in self.names) - - -class Everything: - """ - A collection "containing" every possible thing. - - >>> 'foo' in Everything() - True - - >>> import random - >>> random.randint(1, 999) in Everything() - True - - >>> random.choice([None, 'foo', 42, ('a', 'b', 'c')]) in Everything() - True - """ - - def __contains__(self, other): - return True - - -class InstrumentedDict(collections.UserDict): - """ - Instrument an existing dictionary with additional - functionality, but always reference and mutate - the original dictionary. - - >>> orig = {'a': 1, 'b': 2} - >>> inst = InstrumentedDict(orig) - >>> inst['a'] - 1 - >>> inst['c'] = 3 - >>> orig['c'] - 3 - >>> inst.keys() == orig.keys() - True - """ - - def __init__(self, data): - super().__init__() - self.data = data - - -class Least: - """ - A value that is always lesser than any other - - >>> least = Least() - >>> 3 < least - False - >>> 3 > least - True - >>> least < 3 - True - >>> least <= 3 - True - >>> least > 3 - False - >>> 'x' > least - True - >>> None > least - True - """ - - def __le__(self, other): - return True - - __lt__ = __le__ - - def __ge__(self, other): - return False - - __gt__ = __ge__ - - -class Greatest: - """ - A value that is always greater than any other - - >>> greatest = Greatest() - >>> 3 < greatest - True - >>> 3 > greatest - False - >>> greatest < 3 - False - >>> greatest > 3 - True - >>> greatest >= 3 - True - >>> 'x' > greatest - False - >>> None > greatest - False - """ - - def __ge__(self, other): - return True - - __gt__ = __ge__ - - def __le__(self, other): - return False - - __lt__ = __le__ - - -def pop_all(items): - """ - Clear items in place and return a copy of items. - - >>> items = [1, 2, 3] - >>> popped = pop_all(items) - >>> popped is items - False - >>> popped - [1, 2, 3] - >>> items - [] - """ - result, items[:] = items[:], [] - return result - - -class FreezableDefaultDict(collections.defaultdict): - """ - Often it is desirable to prevent the mutation of - a default dict after its initial construction, such - as to prevent mutation during iteration. - - >>> dd = FreezableDefaultDict(list) - >>> dd[0].append('1') - >>> dd.freeze() - >>> dd[1] - [] - >>> len(dd) - 1 - """ - - def __missing__(self, key): - return getattr(self, '_frozen', super().__missing__)(key) - - def freeze(self): - self._frozen = lambda key: self.default_factory() - - -class Accumulator: - def __init__(self, initial=0): - self.val = initial - - def __call__(self, val): - self.val += val - return self.val - - -class WeightedLookup(RangeMap): - """ - Given parameters suitable for a dict representing keys - and a weighted proportion, return a RangeMap representing - spans of values proportial to the weights: - - >>> even = WeightedLookup(a=1, b=1) - - [0, 1) -> a - [1, 2) -> b - - >>> lk = WeightedLookup(a=1, b=2) - - [0, 1) -> a - [1, 3) -> b - - >>> lk[.5] - 'a' - >>> lk[1.5] - 'b' - - Adds ``.random()`` to select a random weighted value: - - >>> lk.random() in ['a', 'b'] - True - - >>> choices = [lk.random() for x in range(1000)] - - Statistically speaking, choices should be .5 a:b - >>> ratio = choices.count('a') / choices.count('b') - >>> .4 < ratio < .6 - True - """ - - def __init__(self, *args, **kwargs): - raw = dict(*args, **kwargs) - - # allocate keys by weight - indexes = map(Accumulator(), raw.values()) - super().__init__(zip(indexes, raw.keys()), key_match_comparator=operator.lt) - - def random(self): - lower, upper = self.bounds() - selector = random.random() * upper - return self[selector] diff --git a/setuptools/_vendor/jaraco/context.py b/setuptools/_vendor/jaraco/context/__init__.py similarity index 73% rename from setuptools/_vendor/jaraco/context.py rename to setuptools/_vendor/jaraco/context/__init__.py index 61b27135df..41ad609edd 100644 --- a/setuptools/_vendor/jaraco/context.py +++ b/setuptools/_vendor/jaraco/context/__init__.py @@ -1,17 +1,18 @@ from __future__ import annotations import contextlib +import errno import functools import operator import os +import platform import shutil +import stat import subprocess import sys import tempfile import urllib.request -import warnings -from typing import Iterator - +from collections.abc import Iterator if sys.version_info < (3, 12): from backports import tarfile @@ -41,7 +42,16 @@ def tarball( url, target_dir: str | os.PathLike | None = None ) -> Iterator[str | os.PathLike]: """ - Get a tarball, extract it, yield, then clean up. + Get a URL to a tarball, download, extract, yield, then clean up. + + Assumes everything in the tarball is prefixed with a common + directory. That common path is stripped and the contents + are extracted to ``target_dir``, similar to passing + ``-C {target} --strip-components 1`` to the ``tar`` command. + + Uses the streaming protocol to extract the contents from a + stream in a single pass without loading the whole file into + memory. >>> import urllib.request >>> url = getfixture('tarfile_served') @@ -51,23 +61,35 @@ def tarball( >>> with tb as extracted: ... contents = pathlib.Path(extracted, 'contents.txt').read_text(encoding='utf-8') >>> assert not os.path.exists(extracted) + + If the target is not specified, contents are extracted to a + directory relative to the current working directory named after + the name of the file as extracted from the URL. + + >>> target = getfixture('tmp_path') + >>> with pushd(target), tarball(url): + ... target.joinpath('served').is_dir() + True """ if target_dir is None: target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '') - # In the tar command, use --strip-components=1 to strip the first path and - # then - # use -C to cause the files to be extracted to {target_dir}. This ensures - # that we always know where the files were extracted. os.mkdir(target_dir) try: req = urllib.request.urlopen(url) with tarfile.open(fileobj=req, mode='r|*') as tf: - tf.extractall(path=target_dir, filter=strip_first_component) + tf.extractall(path=target_dir, filter=_default_filter) yield target_dir finally: shutil.rmtree(target_dir) +def _compose_tarfile_filters(*filters): + def compose_two(f1, f2): + return lambda member, path: f1(f2(member, path), path) + + return functools.reduce(compose_two, filters, lambda member, path: member) + + def strip_first_component( member: tarfile.TarInfo, path, @@ -76,6 +98,9 @@ def strip_first_component( return member +_default_filter = _compose_tarfile_filters(tarfile.data_filter, strip_first_component) + + def _compose(*cmgrs): """ Compose any number of dependent context managers into a single one. @@ -107,43 +132,31 @@ def composed(*args, **kwargs): tarball_cwd = _compose(pushd, tarball) +""" +A tarball context with the current working directory pointing to the contents. +""" -@contextlib.contextmanager -def tarball_context(*args, **kwargs): - warnings.warn( - "tarball_context is deprecated. Use tarball or tarball_cwd instead.", - DeprecationWarning, - stacklevel=2, - ) - pushd_ctx = kwargs.pop('pushd', pushd) - with tarball(*args, **kwargs) as tball, pushd_ctx(tball) as dir: - yield dir - - -def infer_compression(url): +def remove_readonly(func, path, exc_info): """ - Given a URL or filename, infer the compression code for tar. - - >>> infer_compression('http://foo/bar.tar.gz') - 'z' - >>> infer_compression('http://foo/bar.tgz') - 'z' - >>> infer_compression('file.bz') - 'j' - >>> infer_compression('file.xz') - 'J' + Add support for removing read-only files on Windows. """ - warnings.warn( - "infer_compression is deprecated with no replacement", - DeprecationWarning, - stacklevel=2, + _, exc, _ = exc_info + if func in (os.rmdir, os.remove, os.unlink) and exc.errno == errno.EACCES: + # change the file to be readable,writable,executable: 0777 + os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + # retry + func(path) + else: + raise + + +def robust_remover(): + return ( + functools.partial(shutil.rmtree, onerror=remove_readonly) + if platform.system() == 'Windows' + else shutil.rmtree ) - # cheat and just assume it's the last two characters - compression_indicator = url[-2:] - mapping = dict(gz='z', bz='j', xz='J') - # Assume 'z' (gzip) if no match - return mapping.get(compression_indicator, 'z') @contextlib.contextmanager @@ -155,7 +168,6 @@ def temp_dir(remover=shutil.rmtree): >>> import pathlib >>> with temp_dir() as the_dir: ... assert os.path.isdir(the_dir) - ... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents', encoding='utf-8') >>> assert not os.path.exists(the_dir) """ temp_dir = tempfile.mkdtemp() @@ -165,44 +177,36 @@ def temp_dir(remover=shutil.rmtree): remover(temp_dir) +robust_temp_dir = functools.partial(temp_dir, remover=robust_remover()) + + @contextlib.contextmanager -def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir): +def repo_context( + url, branch: str | None = None, quiet: bool = True, dest_ctx=robust_temp_dir +): """ Check out the repo indicated by url. If dest_ctx is supplied, it should be a context manager to yield the target directory for the check out. + + >>> getfixture('ensure_git') + >>> getfixture('needs_internet') + >>> repo = repo_context('https://github.com/jaraco/jaraco.context') + >>> with repo as dest: + ... listing = os.listdir(dest) + >>> 'README.rst' in listing + True """ exe = 'git' if 'git' in url else 'hg' with dest_ctx() as repo_dir: cmd = [exe, 'clone', url, repo_dir] - if branch: - cmd.extend(['--branch', branch]) - devnull = open(os.path.devnull, 'w') - stdout = devnull if quiet else None - subprocess.check_call(cmd, stdout=stdout) + cmd.extend(['--branch', branch] * bool(branch)) + stream = subprocess.DEVNULL if quiet else None + subprocess.check_call(cmd, stdout=stream, stderr=stream) yield repo_dir -def null(): - """ - A null context suitable to stand in for a meaningful context. - - >>> with null() as value: - ... assert value is None - - This context is most useful when dealing with two or more code - branches but only some need a context. Wrap the others in a null - context to provide symmetry across all options. - """ - warnings.warn( - "null is deprecated. Use contextlib.nullcontext", - DeprecationWarning, - stacklevel=2, - ) - return contextlib.nullcontext() - - class ExceptionTrap: """ A context manager that will catch certain exceptions and provide an @@ -329,7 +333,9 @@ class suppress(contextlib.suppress, contextlib.ContextDecorator): class on_interrupt(contextlib.ContextDecorator): """ - Replace a KeyboardInterrupt with SystemExit(1) + Replace a KeyboardInterrupt with SystemExit(1). + + Useful in conjunction with console entry point functions. >>> def do_interrupt(): ... raise KeyboardInterrupt() diff --git a/setuptools/_vendor/inflect/py.typed b/setuptools/_vendor/jaraco/context/py.typed similarity index 100% rename from setuptools/_vendor/inflect/py.typed rename to setuptools/_vendor/jaraco/context/py.typed diff --git a/setuptools/_vendor/jaraco/functools/__init__.py b/setuptools/_vendor/jaraco/functools/__init__.py index ca6c22fa9b..df32e2e924 100644 --- a/setuptools/_vendor/jaraco/functools/__init__.py +++ b/setuptools/_vendor/jaraco/functools/__init__.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import collections.abc import functools import inspect @@ -6,6 +8,7 @@ import time import types import warnings +from typing import Callable, TypeVar import more_itertools @@ -14,6 +17,14 @@ def compose(*funcs): """ Compose any number of unary functions into a single unary function. + Comparable to + `function composition `_ + in mathematics: + + ``h = g ∘ f`` implies ``h(x) = g(f(x))``. + + In Python, ``h = compose(g, f)``. + >>> import textwrap >>> expected = str.strip(textwrap.dedent(compose.__doc__)) >>> strip_and_dedent = compose(str.strip, textwrap.dedent) @@ -279,6 +290,26 @@ def invoke(f, /, *args, **kwargs): return f +_T = TypeVar('_T') + + +def passthrough(func: Callable[..., object]) -> Callable[[_T], _T]: + """ + Wrap the function to always return the first parameter. + + >>> passthrough(print)('3') + 3 + '3' + """ + + @functools.wraps(func) + def wrapper(first: _T, *args, **kwargs) -> _T: + func(first, *args, **kwargs) + return first + + return wrapper + + class Throttler: """Rate-limit a function (or other callable).""" @@ -410,6 +441,16 @@ def wrapper(param, /, *args, **kwargs): return wrapper +def none_as(value, replacement=None): + """ + >>> none_as(None, 'foo') + 'foo' + >>> none_as('bar', 'foo') + 'bar' + """ + return replacement if value is None else value + + def assign_params(func, namespace): """ Assign parameters from namespace where func solicits. @@ -476,7 +517,7 @@ def save_method_args(method): >>> my_ob._saved_method.args () """ - args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') + args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') # noqa: PYI024 # Internal; stubs used for typing @functools.wraps(method) def wrapper(self, /, *args, **kwargs): @@ -631,3 +672,51 @@ def splat(func): {'msg': 'unknown', 'code': 0} """ return functools.wraps(func)(functools.partial(_splat_inner, func=func)) + + +_T = TypeVar('_T') + + +def chainable(method: Callable[[_T, ...], None]) -> Callable[[_T, ...], _T]: + """ + Wrap an instance method to always return self. + + + >>> class Dingus: + ... @chainable + ... def set_attr(self, name, val): + ... setattr(self, name, val) + >>> d = Dingus().set_attr('a', 'eh!') + >>> d.a + 'eh!' + >>> d2 = Dingus().set_attr('a', 'eh!').set_attr('b', 'bee!') + >>> d2.a + d2.b + 'eh!bee!' + + Enforces that the return value is null. + + >>> class BorkedDingus: + ... @chainable + ... def set_attr(self, name, val): + ... setattr(self, name, val) + ... return len(name) + >>> BorkedDingus().set_attr('a', 'eh!') + Traceback (most recent call last): + ... + AssertionError + """ + + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + assert method(self, *args, **kwargs) is None + return self + + return wrapper + + +def noop(*args, **kwargs): + """ + A no-operation function that does nothing. + + >>> noop(1, 2, three=3) + """ diff --git a/setuptools/_vendor/jaraco/functools/__init__.pyi b/setuptools/_vendor/jaraco/functools/__init__.pyi index 19191bf93e..6f834bf06d 100644 --- a/setuptools/_vendor/jaraco/functools/__init__.pyi +++ b/setuptools/_vendor/jaraco/functools/__init__.pyi @@ -1,7 +1,6 @@ from collections.abc import Callable, Hashable, Iterator from functools import partial from operator import methodcaller -import sys from typing import ( Any, Generic, @@ -10,14 +9,12 @@ from typing import ( overload, ) -if sys.version_info >= (3, 10): - from typing import Concatenate, ParamSpec -else: - from typing_extensions import Concatenate, ParamSpec +from typing_extensions import Concatenate, ParamSpec, TypeVarTuple, Unpack _P = ParamSpec('_P') _R = TypeVar('_R') _T = TypeVar('_T') +_Ts = TypeVarTuple('_Ts') _R1 = TypeVar('_R1') _R2 = TypeVar('_R2') _V = TypeVar('_V') @@ -66,10 +63,10 @@ def method_cache( cache_wrapper: Callable[[Callable[..., _R]], _MethodCacheWrapper[_R]] = ..., ) -> _MethodCacheWrapper[_R] | _ProxyMethodCacheWrapper[_R]: ... def apply( - transform: Callable[[_R], _T] + transform: Callable[[_R], _T], ) -> Callable[[Callable[_P, _R]], Callable[_P, _T]]: ... def result_invoke( - action: Callable[[_R], Any] + action: Callable[[_R], Any], ) -> Callable[[Callable[_P, _R]], Callable[_P, _R]]: ... def invoke( f: Callable[_P, _R], /, *args: _P.args, **kwargs: _P.kwargs @@ -95,23 +92,23 @@ method_caller: Callable[..., methodcaller] def retry_call( func: Callable[..., _R], cleanup: Callable[..., None] = ..., - retries: int | float = ..., + retries: float = ..., trap: type[BaseException] | tuple[type[BaseException], ...] = ..., ) -> _R: ... def retry( cleanup: Callable[..., None] = ..., - retries: int | float = ..., + retries: float = ..., trap: type[BaseException] | tuple[type[BaseException], ...] = ..., ) -> Callable[[Callable[..., _R]], Callable[..., _R]]: ... def print_yielded(func: Callable[_P, Iterator[Any]]) -> Callable[_P, None]: ... def pass_none( - func: Callable[Concatenate[_T, _P], _R] + func: Callable[Concatenate[_T, _P], _R], ) -> Callable[Concatenate[_T, _P], _R]: ... def assign_params( func: Callable[..., _R], namespace: dict[str, Any] ) -> partial[_R]: ... def save_method_args( - method: Callable[Concatenate[_S, _P], _R] + method: Callable[Concatenate[_S, _P], _R], ) -> Callable[Concatenate[_S, _P], _R]: ... def except_( *exceptions: type[BaseException], replace: Any = ..., use: Any = ... @@ -123,3 +120,4 @@ def bypass_when( def bypass_unless( check: Any, ) -> Callable[[Callable[[_T], _R]], Callable[[_T], _T | _R]]: ... +def splat(func: Callable[[Unpack[_Ts]], _R]) -> Callable[[tuple[Unpack[_Ts]]], _R]: ... diff --git a/setuptools/_vendor/jaraco/text/__init__.py b/setuptools/_vendor/jaraco/text/__init__.py index 0fabd0c3f0..8567200ad7 100644 --- a/setuptools/_vendor/jaraco/text/__init__.py +++ b/setuptools/_vendor/jaraco/text/__init__.py @@ -1,15 +1,17 @@ -import re +import functools import itertools +import re import textwrap -import functools + +from typing import Iterable try: from importlib.resources import files # type: ignore except ImportError: # pragma: nocover from importlib_resources import files # type: ignore -from jaraco.functools import compose, method_cache from jaraco.context import ExceptionTrap +from jaraco.functools import compose, method_cache def substitution(old, new): @@ -554,7 +556,14 @@ def yield_lines(iterable): @yield_lines.register(str) def _(text): - return filter(_nonblank, map(str.strip, text.splitlines())) + return clean(text.splitlines()) + + +def clean(lines: Iterable[str]): + """ + Yield non-blank, non-comment elements from lines. + """ + return filter(_nonblank, map(str.strip, lines)) def drop_comment(line): @@ -622,3 +631,17 @@ def read_newlines(filename, limit=1024): with open(filename, encoding='utf-8') as fp: fp.read(limit) return fp.newlines + + +def lines_from(input): + """ + Generate lines from a :class:`importlib.resources.abc.Traversable` path. + + >>> lines = lines_from(files(__name__).joinpath('Lorem ipsum.txt')) + >>> next(lines) + 'Lorem ipsum...' + >>> next(lines) + 'Curabitur pretium...' + """ + with input.open(encoding='utf-8') as stream: + yield from stream diff --git a/setuptools/_vendor/jaraco/text/show-newlines.py b/setuptools/_vendor/jaraco/text/show-newlines.py index e11d1ba428..ef4cc54c9e 100644 --- a/setuptools/_vendor/jaraco/text/show-newlines.py +++ b/setuptools/_vendor/jaraco/text/show-newlines.py @@ -1,6 +1,5 @@ import autocommand import inflect - from more_itertools import always_iterable import jaraco.text diff --git a/setuptools/_vendor/jaraco/text/to-dvorak.py b/setuptools/_vendor/jaraco/text/to-dvorak.py index a6d5da80b3..14c8981e44 100644 --- a/setuptools/_vendor/jaraco/text/to-dvorak.py +++ b/setuptools/_vendor/jaraco/text/to-dvorak.py @@ -2,5 +2,4 @@ from . import layouts - __name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_dvorak) diff --git a/setuptools/_vendor/jaraco/text/to-qwerty.py b/setuptools/_vendor/jaraco/text/to-qwerty.py index abe2728662..23596fda93 100644 --- a/setuptools/_vendor/jaraco/text/to-qwerty.py +++ b/setuptools/_vendor/jaraco/text/to-qwerty.py @@ -2,5 +2,4 @@ from . import layouts - __name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_qwerty) diff --git a/setuptools/_vendor/jaraco_context-6.1.0.dist-info/INSTALLER b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/METADATA b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/METADATA similarity index 68% rename from setuptools/_vendor/jaraco.context-5.3.0.dist-info/METADATA rename to setuptools/_vendor/jaraco_context-6.1.0.dist-info/METADATA index a36f7c5e82..8fb5e53da3 100644 --- a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/METADATA +++ b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/METADATA @@ -1,33 +1,40 @@ -Metadata-Version: 2.1 +Metadata-Version: 2.4 Name: jaraco.context -Version: 5.3.0 +Version: 6.1.0 Summary: Useful decorators and context managers -Home-page: https://github.com/jaraco/jaraco.context -Author: Jason R. Coombs -Author-email: jaraco@jaraco.com +Author-email: "Jason R. Coombs" +License-Expression: MIT +Project-URL: Source, https://github.com/jaraco/jaraco.context Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3 :: Only -Requires-Python: >=3.8 +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst License-File: LICENSE -Requires-Dist: backports.tarfile ; python_version < "3.12" -Provides-Extra: docs -Requires-Dist: sphinx >=3.5 ; extra == 'docs' -Requires-Dist: jaraco.packaging >=9.3 ; extra == 'docs' -Requires-Dist: rst.linker >=1.9 ; extra == 'docs' -Requires-Dist: furo ; extra == 'docs' -Requires-Dist: sphinx-lint ; extra == 'docs' -Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'docs' -Provides-Extra: testing -Requires-Dist: pytest !=8.1.1,>=6 ; extra == 'testing' -Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'testing' -Requires-Dist: pytest-cov ; extra == 'testing' -Requires-Dist: pytest-mypy ; extra == 'testing' -Requires-Dist: pytest-enabler >=2.2 ; extra == 'testing' -Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'testing' -Requires-Dist: portend ; extra == 'testing' +Requires-Dist: backports.tarfile; python_version < "3.12" +Provides-Extra: test +Requires-Dist: pytest!=8.1.*,>=6; extra == "test" +Requires-Dist: jaraco.test>=5.6.0; extra == "test" +Requires-Dist: portend; extra == "test" +Provides-Extra: doc +Requires-Dist: sphinx>=3.5; extra == "doc" +Requires-Dist: jaraco.packaging>=9.3; extra == "doc" +Requires-Dist: rst.linker>=1.9; extra == "doc" +Requires-Dist: furo; extra == "doc" +Requires-Dist: sphinx-lint; extra == "doc" +Requires-Dist: jaraco.tidelift>=1.4; extra == "doc" +Provides-Extra: check +Requires-Dist: pytest-checkdocs>=2.4; extra == "check" +Requires-Dist: pytest-ruff>=0.2.1; sys_platform != "cygwin" and extra == "check" +Provides-Extra: cover +Requires-Dist: pytest-cov; extra == "cover" +Provides-Extra: enabler +Requires-Dist: pytest-enabler>=3.4; extra == "enabler" +Provides-Extra: type +Requires-Dist: pytest-mypy>=1.0.1; extra == "type" +Requires-Dist: mypy<1.19; platform_python_implementation == "PyPy" and extra == "type" +Dynamic: license-file .. image:: https://img.shields.io/pypi/v/jaraco.context.svg :target: https://pypi.org/project/jaraco.context @@ -38,14 +45,14 @@ Requires-Dist: portend ; extra == 'testing' :target: https://github.com/jaraco/jaraco.context/actions?query=workflow%3A%22tests%22 :alt: tests -.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json :target: https://github.com/astral-sh/ruff :alt: Ruff .. image:: https://readthedocs.org/projects/jaracocontext/badge/?version=latest :target: https://jaracocontext.readthedocs.io/en/latest/?badge=latest -.. image:: https://img.shields.io/badge/skeleton-2024-informational +.. image:: https://img.shields.io/badge/skeleton-2025-informational :target: https://blog.jaraco.com/skeleton .. image:: https://tidelift.com/badges/package/pypi/jaraco.context diff --git a/setuptools/_vendor/jaraco_context-6.1.0.dist-info/RECORD b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/RECORD new file mode 100644 index 0000000000..e5169f6659 --- /dev/null +++ b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/RECORD @@ -0,0 +1,9 @@ +jaraco/context/__init__.py,sha256=br1ydYGo1Xr_Pu1anuEdd-QrjUiz_EY5L_5E4C03L4w,9809 +jaraco/context/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jaraco_context-6.1.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +jaraco_context-6.1.0.dist-info/METADATA,sha256=BDXr_FIFXFqZdO0gwXG2RUOD6vnbsVCIFLp62XxZ1xI,4270 +jaraco_context-6.1.0.dist-info/RECORD,, +jaraco_context-6.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jaraco_context-6.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +jaraco_context-6.1.0.dist-info/licenses/LICENSE,sha256=l1WhhRlmbl8PTK49qtPXASvK5IpgCzEjfXXp_hNOZoM,1076 +jaraco_context-6.1.0.dist-info/top_level.txt,sha256=0JnN3LfXH4LIRfXL-QFOGCJzQWZO3ELx4R1d_louoQM,7 diff --git a/setuptools/_vendor/packaging-24.2.dist-info/REQUESTED b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/packaging-24.2.dist-info/REQUESTED rename to setuptools/_vendor/jaraco_context-6.1.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/WHEEL b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/WHEEL similarity index 65% rename from setuptools/_vendor/jaraco.collections-5.1.0.dist-info/WHEEL rename to setuptools/_vendor/jaraco_context-6.1.0.dist-info/WHEEL index 50e1e84e4a..e7fa31b6f3 100644 --- a/setuptools/_vendor/jaraco.collections-5.1.0.dist-info/WHEEL +++ b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: setuptools (73.0.1) +Generator: setuptools (80.9.0) Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/jaraco_context-6.1.0.dist-info/licenses/LICENSE b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000..c891f411dc --- /dev/null +++ b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/licenses/LICENSE @@ -0,0 +1,18 @@ +MIT License + +Copyright (c) 2026 + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO +EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/top_level.txt b/setuptools/_vendor/jaraco_context-6.1.0.dist-info/top_level.txt similarity index 100% rename from setuptools/_vendor/jaraco.context-5.3.0.dist-info/top_level.txt rename to setuptools/_vendor/jaraco_context-6.1.0.dist-info/top_level.txt diff --git a/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/INSTALLER b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/METADATA b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/METADATA similarity index 59% rename from setuptools/_vendor/jaraco.functools-4.0.1.dist-info/METADATA rename to setuptools/_vendor/jaraco_functools-4.4.0.dist-info/METADATA index c865140ab2..f2150dd88b 100644 --- a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/METADATA +++ b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/METADATA @@ -1,34 +1,39 @@ -Metadata-Version: 2.1 +Metadata-Version: 2.4 Name: jaraco.functools -Version: 4.0.1 +Version: 4.4.0 Summary: Functools like those found in stdlib Author-email: "Jason R. Coombs" -Project-URL: Homepage, https://github.com/jaraco/jaraco.functools +License-Expression: MIT +Project-URL: Source, https://github.com/jaraco/jaraco.functools Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3 :: Only -Requires-Python: >=3.8 +Requires-Python: >=3.9 Description-Content-Type: text/x-rst License-File: LICENSE -Requires-Dist: more-itertools -Provides-Extra: docs -Requires-Dist: sphinx >=3.5 ; extra == 'docs' -Requires-Dist: sphinx <7.2.5 ; extra == 'docs' -Requires-Dist: jaraco.packaging >=9.3 ; extra == 'docs' -Requires-Dist: rst.linker >=1.9 ; extra == 'docs' -Requires-Dist: furo ; extra == 'docs' -Requires-Dist: sphinx-lint ; extra == 'docs' -Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'docs' -Provides-Extra: testing -Requires-Dist: pytest >=6 ; extra == 'testing' -Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'testing' -Requires-Dist: pytest-cov ; extra == 'testing' -Requires-Dist: pytest-enabler >=2.2 ; extra == 'testing' -Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'testing' -Requires-Dist: jaraco.classes ; extra == 'testing' -Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: more_itertools +Provides-Extra: test +Requires-Dist: pytest!=8.1.*,>=6; extra == "test" +Requires-Dist: jaraco.classes; extra == "test" +Provides-Extra: doc +Requires-Dist: sphinx>=3.5; extra == "doc" +Requires-Dist: jaraco.packaging>=9.3; extra == "doc" +Requires-Dist: rst.linker>=1.9; extra == "doc" +Requires-Dist: furo; extra == "doc" +Requires-Dist: sphinx-lint; extra == "doc" +Requires-Dist: jaraco.tidelift>=1.4; extra == "doc" +Provides-Extra: check +Requires-Dist: pytest-checkdocs>=2.4; extra == "check" +Requires-Dist: pytest-ruff>=0.2.1; sys_platform != "cygwin" and extra == "check" +Provides-Extra: cover +Requires-Dist: pytest-cov; extra == "cover" +Provides-Extra: enabler +Requires-Dist: pytest-enabler>=3.4; extra == "enabler" +Provides-Extra: type +Requires-Dist: pytest-mypy>=1.0.1; extra == "type" +Requires-Dist: mypy<1.19; platform_python_implementation == "PyPy" and extra == "type" +Dynamic: license-file .. image:: https://img.shields.io/pypi/v/jaraco.functools.svg :target: https://pypi.org/project/jaraco.functools @@ -39,14 +44,14 @@ Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extr :target: https://github.com/jaraco/jaraco.functools/actions?query=workflow%3A%22tests%22 :alt: tests -.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json :target: https://github.com/astral-sh/ruff :alt: Ruff .. image:: https://readthedocs.org/projects/jaracofunctools/badge/?version=latest :target: https://jaracofunctools.readthedocs.io/en/latest/?badge=latest -.. image:: https://img.shields.io/badge/skeleton-2024-informational +.. image:: https://img.shields.io/badge/skeleton-2025-informational :target: https://blog.jaraco.com/skeleton .. image:: https://tidelift.com/badges/package/pypi/jaraco.functools diff --git a/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/RECORD b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/RECORD new file mode 100644 index 0000000000..3516c9f039 --- /dev/null +++ b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/RECORD @@ -0,0 +1,10 @@ +jaraco/functools/__init__.py,sha256=ZJx9cMs2Nvk2xGUl8OjVGkpjdOaNlSzJrN4dGglgX2g,18599 +jaraco/functools/__init__.pyi,sha256=K4DcbnYIHE5QlMxqf9-cVp-WhycrhuTao4J7O7TMq4Y,3907 +jaraco/functools/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jaraco_functools-4.4.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +jaraco_functools-4.4.0.dist-info/METADATA,sha256=LnnajcNGmSSr46yLIqP-tWkqeb-fR7vIa2U11hhkGEk,2960 +jaraco_functools-4.4.0.dist-info/RECORD,, +jaraco_functools-4.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jaraco_functools-4.4.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +jaraco_functools-4.4.0.dist-info/licenses/LICENSE,sha256=WlfLTbheKi3YjCkGKJCK3VfjRRRJ4KmnH9-zh3b9dZ0,1076 +jaraco_functools-4.4.0.dist-info/top_level.txt,sha256=0JnN3LfXH4LIRfXL-QFOGCJzQWZO3ELx4R1d_louoQM,7 diff --git a/setuptools/_vendor/platformdirs-4.2.2.dist-info/REQUESTED b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/platformdirs-4.2.2.dist-info/REQUESTED rename to setuptools/_vendor/jaraco_functools-4.4.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/WHEEL b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/WHEEL similarity index 65% rename from setuptools/_vendor/jaraco.context-5.3.0.dist-info/WHEEL rename to setuptools/_vendor/jaraco_functools-4.4.0.dist-info/WHEEL index bab98d6758..e7fa31b6f3 100644 --- a/setuptools/_vendor/jaraco.context-5.3.0.dist-info/WHEEL +++ b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.43.0) +Generator: setuptools (80.9.0) Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/licenses/LICENSE b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000..f60bd57201 --- /dev/null +++ b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/licenses/LICENSE @@ -0,0 +1,18 @@ +MIT License + +Copyright (c) 2025 + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO +EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/setuptools/_vendor/jaraco.functools-4.0.1.dist-info/top_level.txt b/setuptools/_vendor/jaraco_functools-4.4.0.dist-info/top_level.txt similarity index 100% rename from setuptools/_vendor/jaraco.functools-4.0.1.dist-info/top_level.txt rename to setuptools/_vendor/jaraco_functools-4.4.0.dist-info/top_level.txt diff --git a/setuptools/_vendor/more_itertools-10.3.0.dist-info/INSTALLER b/setuptools/_vendor/more_itertools-10.3.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/more_itertools-10.3.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/more_itertools-10.3.0.dist-info/RECORD b/setuptools/_vendor/more_itertools-10.3.0.dist-info/RECORD deleted file mode 100644 index f15f3fcdc5..0000000000 --- a/setuptools/_vendor/more_itertools-10.3.0.dist-info/RECORD +++ /dev/null @@ -1,16 +0,0 @@ -more_itertools-10.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -more_itertools-10.3.0.dist-info/LICENSE,sha256=CfHIyelBrz5YTVlkHqm4fYPAyw_QB-te85Gn4mQ8GkY,1053 -more_itertools-10.3.0.dist-info/METADATA,sha256=BFO90O-fLNiVQMpj7oIS5ztzgJUUQZ3TA32P5HH3N-A,36293 -more_itertools-10.3.0.dist-info/RECORD,, -more_itertools-10.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -more_itertools-10.3.0.dist-info/WHEEL,sha256=rSgq_JpHF9fHR1lx53qwg_1-2LypZE_qmcuXbVUq948,81 -more_itertools/__init__.py,sha256=dtAbGjTDmn_ghiU5YXfhyDy0phAlXVdt5klZA5fUa-Q,149 -more_itertools/__init__.pyi,sha256=5B3eTzON1BBuOLob1vCflyEb2lSd6usXQQ-Cv-hXkeA,43 -more_itertools/__pycache__/__init__.cpython-312.pyc,, -more_itertools/__pycache__/more.cpython-312.pyc,, -more_itertools/__pycache__/recipes.cpython-312.pyc,, -more_itertools/more.py,sha256=1E5kzFncRKTDw0cYv1yRXMgDdunstLQd1QStcnL6U90,148370 -more_itertools/more.pyi,sha256=iXXeqt48Nxe8VGmIWpkVXuKpR2FYNuu2DU8nQLWCCu0,21484 -more_itertools/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -more_itertools/recipes.py,sha256=WedhhfhGVgr6zii8fIbGJVmRTw0ZKRiLKnYBDGJv4nY,28591 -more_itertools/recipes.pyi,sha256=T_mdGpcFdfrP3JSWbwzYP9JyNV-Go-7RPfpxfftAWlA,4617 diff --git a/setuptools/_vendor/more_itertools-10.3.0.dist-info/WHEEL b/setuptools/_vendor/more_itertools-10.3.0.dist-info/WHEEL deleted file mode 100644 index db4a255f3a..0000000000 --- a/setuptools/_vendor/more_itertools-10.3.0.dist-info/WHEEL +++ /dev/null @@ -1,4 +0,0 @@ -Wheel-Version: 1.0 -Generator: flit 3.8.0 -Root-Is-Purelib: true -Tag: py3-none-any diff --git a/setuptools/_vendor/more_itertools-10.8.0.dist-info/INSTALLER b/setuptools/_vendor/more_itertools-10.8.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/more_itertools-10.8.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/more_itertools-10.3.0.dist-info/METADATA b/setuptools/_vendor/more_itertools-10.8.0.dist-info/METADATA similarity index 89% rename from setuptools/_vendor/more_itertools-10.3.0.dist-info/METADATA rename to setuptools/_vendor/more_itertools-10.8.0.dist-info/METADATA index fb41b0cfe6..bb7a3db109 100644 --- a/setuptools/_vendor/more_itertools-10.3.0.dist-info/METADATA +++ b/setuptools/_vendor/more_itertools-10.8.0.dist-info/METADATA @@ -1,25 +1,27 @@ -Metadata-Version: 2.1 +Metadata-Version: 2.4 Name: more-itertools -Version: 10.3.0 +Version: 10.8.0 Summary: More routines for operating on iterables, beyond itertools Keywords: itertools,iterator,iteration,filter,peek,peekable,chunk,chunked Author-email: Erik Rose -Requires-Python: >=3.8 +Requires-Python: >=3.9 Description-Content-Type: text/x-rst +License-Expression: MIT Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Natural Language :: English -Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Software Development :: Libraries +License-File: LICENSE +Project-URL: Documentation, https://more-itertools.readthedocs.io/en/stable/ Project-URL: Homepage, https://github.com/more-itertools/more-itertools ============== @@ -84,6 +86,7 @@ Python iterables. | | `interleave `_, | | | `interleave_longest `_, | | | `interleave_evenly `_, | +| | `interleave_randomly `_, | | | `zip_offset `_, | | | `zip_equal `_, | | | `zip_broadcast `_, | @@ -104,6 +107,8 @@ Python iterables. | | `is_sorted `_, | | | `all_equal `_, | | | `all_unique `_, | +| | `argmin `_, | +| | `argmax `_, | | | `minmax `_, | | | `first_true `_, | | | `quantify `_, | @@ -123,6 +128,7 @@ Python iterables. | | `filter_map `_, | | | `iter_suppress `_, | | | `nth_or_last `_, | +| | `extract `_, | | | `unique_in_window `_, | | | `before_and_after `_, | | | `nth `_, | @@ -141,36 +147,46 @@ Python iterables. | | `idft `_, | | | `convolve `_, | | | `dotproduct `_, | -| | `factor `_, | | | `matmul `_, | | | `polynomial_from_roots `_, | | | `polynomial_derivative `_, | | | `polynomial_eval `_, | -| | `sieve `_, | | | `sum_of_squares `_, | +| | `running_median `_, | | | `totient `_ | +------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| Combinatorics | `distinct_permutations `_, | -| | `distinct_combinations `_, | -| | `circular_shifts `_, | +| Integer math | `factor `_, | +| | `is_prime `_, | +| | `multinomial `_, | +| | `nth_prime `_, | +| | `sieve `_ | ++------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| Combinatorics | `circular_shifts `_, | +| | `derangements `_, | +| | `gray_product `_, | +| | `outer_product `_, | | | `partitions `_, | | | `set_partitions `_, | -| | `product_index `_, | +| | `powerset `_, | +| | `powerset_of_sets `_ | +| +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| | `distinct_combinations `_, | +| | `distinct_permutations `_ | +| +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | `combination_index `_, | -| | `permutation_index `_, | | | `combination_with_replacement_index `_, | -| | `gray_product `_, | -| | `outer_product `_, | -| | `powerset `_, | -| | `powerset_of_sets `_, | -| | `random_product `_, | -| | `random_permutation `_, | +| | `permutation_index `_, | +| | `product_index `_ | +| +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ +| | `nth_combination `_, | +| | `nth_combination_with_replacement `_, | +| | `nth_permutation `_, | +| | `nth_product `_ | +| +-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | `random_combination `_, | | | `random_combination_with_replacement `_, | -| | `nth_product `_, | -| | `nth_permutation `_, | -| | `nth_combination `_, | -| | `nth_combination_with_replacement `_ | +| | `random_permutation `_, | +| | `random_product `_ | +------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Wrapping | `always_iterable `_, | | | `always_reversible `_, | @@ -185,6 +201,7 @@ Python iterables. | | `numeric_range `_, | | | `side_effect `_, | | | `iterate `_, | +| | `loops `_, | | | `difference `_, | | | `make_decorator `_, | | | `SequenceView `_, | @@ -194,7 +211,7 @@ Python iterables. | | `consume `_, | | | `tabulate `_, | | | `repeatfunc `_, | -| | `reshape `_ | +| | `reshape `_, | | | `doublestarmap `_ | +------------------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -247,7 +264,7 @@ Blog posts about ``more-itertools``: * `Yo, I heard you like decorators `__ * `Tour of Python Itertools `__ (`Alternate `__) -* `Real-World Python More Itertools `_ +* `Real-World Python More Itertools `_ Development diff --git a/setuptools/_vendor/more_itertools-10.8.0.dist-info/RECORD b/setuptools/_vendor/more_itertools-10.8.0.dist-info/RECORD new file mode 100644 index 0000000000..61ef7d6dd0 --- /dev/null +++ b/setuptools/_vendor/more_itertools-10.8.0.dist-info/RECORD @@ -0,0 +1,13 @@ +more_itertools-10.8.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +more_itertools-10.8.0.dist-info/METADATA,sha256=arNRUUWr5YsGfwh8hnYxz0z11lP-2BuWQu4SCGw5BLg,39413 +more_itertools-10.8.0.dist-info/RECORD,, +more_itertools-10.8.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +more_itertools-10.8.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +more_itertools-10.8.0.dist-info/licenses/LICENSE,sha256=CfHIyelBrz5YTVlkHqm4fYPAyw_QB-te85Gn4mQ8GkY,1053 +more_itertools/__init__.py,sha256=5F7E_zpoGcEBW_T_3WE0WYYt8j-gJodIuiBcOJxrOv8,149 +more_itertools/__init__.pyi,sha256=5B3eTzON1BBuOLob1vCflyEb2lSd6usXQQ-Cv-hXkeA,43 +more_itertools/more.py,sha256=mNPKKu5UI7lRL460vgm0QTCWFiGMVCMosSPxVSdibos,163690 +more_itertools/more.pyi,sha256=fpEgNX3O66wY5cnT-s5VYDKNUpAcaCyU3iP84It3OOM,27119 +more_itertools/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +more_itertools/recipes.py,sha256=Ma-kuBNZDFhaQDbIJgRmnrG86WzaupbOyUV3v8je3xw,41811 +more_itertools/recipes.pyi,sha256=LNRwN-OL3nkMfQAqx-PPc1fBaetUObb_Z6mdePyzh1c,6226 diff --git a/setuptools/_vendor/tomli-2.0.1.dist-info/REQUESTED b/setuptools/_vendor/more_itertools-10.8.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/tomli-2.0.1.dist-info/REQUESTED rename to setuptools/_vendor/more_itertools-10.8.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/packaging-24.2.dist-info/WHEEL b/setuptools/_vendor/more_itertools-10.8.0.dist-info/WHEEL similarity index 71% rename from setuptools/_vendor/packaging-24.2.dist-info/WHEEL rename to setuptools/_vendor/more_itertools-10.8.0.dist-info/WHEEL index e3c6feefa2..d8b9936dad 100644 --- a/setuptools/_vendor/packaging-24.2.dist-info/WHEEL +++ b/setuptools/_vendor/more_itertools-10.8.0.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: flit 3.10.1 +Generator: flit 3.12.0 Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/more_itertools-10.3.0.dist-info/LICENSE b/setuptools/_vendor/more_itertools-10.8.0.dist-info/licenses/LICENSE similarity index 100% rename from setuptools/_vendor/more_itertools-10.3.0.dist-info/LICENSE rename to setuptools/_vendor/more_itertools-10.8.0.dist-info/licenses/LICENSE diff --git a/setuptools/_vendor/more_itertools/__init__.py b/setuptools/_vendor/more_itertools/__init__.py index 9c4662fc31..24216c5c1f 100644 --- a/setuptools/_vendor/more_itertools/__init__.py +++ b/setuptools/_vendor/more_itertools/__init__.py @@ -3,4 +3,4 @@ from .more import * # noqa from .recipes import * # noqa -__version__ = '10.3.0' +__version__ = '10.8.0' diff --git a/setuptools/_vendor/more_itertools/more.py b/setuptools/_vendor/more_itertools/more.py index 7b481907da..bf501956ae 100755 --- a/setuptools/_vendor/more_itertools/more.py +++ b/setuptools/_vendor/more_itertools/more.py @@ -3,8 +3,9 @@ from collections import Counter, defaultdict, deque, abc from collections.abc import Sequence +from contextlib import suppress from functools import cached_property, partial, reduce, wraps -from heapq import heapify, heapreplace, heappop +from heapq import heapify, heapreplace from itertools import ( chain, combinations, @@ -14,6 +15,7 @@ dropwhile, groupby, islice, + permutations, repeat, starmap, takewhile, @@ -21,10 +23,20 @@ zip_longest, product, ) -from math import comb, e, exp, factorial, floor, fsum, log, perm, tau +from math import comb, e, exp, factorial, floor, fsum, log, log1p, perm, tau +from math import ceil from queue import Empty, Queue -from random import random, randrange, uniform -from operator import itemgetter, mul, sub, gt, lt, ge, le +from random import random, randrange, shuffle, uniform +from operator import ( + attrgetter, + is_not, + itemgetter, + lt, + mul, + neg, + sub, + gt, +) from sys import hexversion, maxsize from time import monotonic @@ -33,9 +45,12 @@ _zip_equal, UnequalIterablesError, consume, + first_true, flatten, - pairwise, + is_prime, + nth, powerset, + sieve, take, unique_everseen, all_equal, @@ -50,6 +65,8 @@ 'all_unique', 'always_iterable', 'always_reversible', + 'argmax', + 'argmin', 'bucket', 'callback_iter', 'chunked', @@ -63,6 +80,7 @@ 'consumer', 'count_cycle', 'countable', + 'derangements', 'dft', 'difference', 'distinct_combinations', @@ -74,6 +92,7 @@ 'duplicates_justseen', 'classify_unique', 'exactly_n', + 'extract', 'filter_except', 'filter_map', 'first', @@ -86,6 +105,7 @@ 'interleave', 'interleave_evenly', 'interleave_longest', + 'interleave_randomly', 'intersperse', 'is_sorted', 'islice_extended', @@ -104,6 +124,7 @@ 'minmax', 'nth_or_last', 'nth_permutation', + 'nth_prime', 'nth_product', 'nth_combination_with_replacement', 'numeric_range', @@ -156,7 +177,34 @@ ] # math.sumprod is available for Python 3.12+ -_fsumprod = getattr(math, 'sumprod', lambda x, y: fsum(map(mul, x, y))) +try: + from math import sumprod as _fsumprod + +except ImportError: # pragma: no cover + # Extended precision algorithms from T. J. Dekker, + # "A Floating-Point Technique for Extending the Available Precision" + # https://csclub.uwaterloo.ca/~pbarfuss/dekker1971.pdf + # Formulas: (5.5) (5.6) and (5.8). Code: mul12() + + def dl_split(x: float): + "Split a float into two half-precision components." + t = x * 134217729.0 # Veltkamp constant = 2.0 ** 27 + 1 + hi = t - (t - x) + lo = x - hi + return hi, lo + + def dl_mul(x, y): + "Lossless multiplication." + xx_hi, xx_lo = dl_split(x) + yy_hi, yy_lo = dl_split(y) + p = xx_hi * yy_hi + q = xx_hi * yy_lo + xx_lo * yy_hi + z = p + q + zz = p - z + q + xx_lo * yy_lo + return z, zz + + def _fsumprod(p, q): + return fsum(chain.from_iterable(map(dl_mul, p, q))) def chunked(iterable, n, strict=False): @@ -189,7 +237,7 @@ def ret(): raise ValueError('iterable is not divisible by n.') yield chunk - return iter(ret()) + return ret() else: return iterator @@ -215,8 +263,8 @@ def first(iterable, default=_marker): return item if default is _marker: raise ValueError( - 'first() was called on an empty iterable, and no ' - 'default value was provided.' + 'first() was called on an empty iterable, ' + 'and no default value was provided.' ) return default @@ -237,15 +285,14 @@ def last(iterable, default=_marker): if isinstance(iterable, Sequence): return iterable[-1] # Work around https://bugs.python.org/issue38525 - elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0): + if getattr(iterable, '__reversed__', None): return next(reversed(iterable)) - else: - return deque(iterable, maxlen=1)[-1] + return deque(iterable, maxlen=1)[-1] except (IndexError, TypeError, StopIteration): if default is _marker: raise ValueError( - 'last() was called on an empty iterable, and no default was ' - 'provided.' + 'last() was called on an empty iterable, ' + 'and no default value was provided.' ) return default @@ -467,34 +514,46 @@ def wrapper(*args, **kwargs): def ilen(iterable): """Return the number of items in *iterable*. - >>> ilen(x for x in range(1000000) if x % 3 == 0) - 333334 + For example, there are 168 prime numbers below 1,000: + + >>> ilen(sieve(1000)) + 168 + + Equivalent to, but faster than:: + + def ilen(iterable): + count = 0 + for _ in iterable: + count += 1 + return count - This consumes the iterable, so handle with care. + This fully consumes the iterable, so handle with care. """ - # This approach was selected because benchmarks showed it's likely the - # fastest of the known implementations at the time of writing. - # See GitHub tracker: #236, #230. - counter = count() - deque(zip(iterable, counter), maxlen=0) - return next(counter) + # This is the "most beautiful of the fast variants" of this function. + # If you think you can improve on it, please ensure that your version + # is both 10x faster and 10x more beautiful. + return sum(compress(repeat(1), zip(iterable))) def iterate(func, start): """Return ``start``, ``func(start)``, ``func(func(start))``, ... - >>> from itertools import islice - >>> list(islice(iterate(lambda x: 2*x, 1), 10)) + Produces an infinite iterator. To add a stopping condition, + use :func:`take`, ``takewhile``, or :func:`takewhile_inclusive`:. + + >>> take(10, iterate(lambda x: 2*x, 1)) [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + >>> collatz = lambda x: 3*x + 1 if x%2==1 else x // 2 + >>> list(takewhile_inclusive(lambda x: x!=1, iterate(collatz, 10))) + [10, 5, 16, 8, 4, 2, 1] + """ - while True: - yield start - try: + with suppress(StopIteration): + while True: + yield start start = func(start) - except StopIteration: - break def with_iter(context_manager): @@ -528,7 +587,7 @@ def one(iterable, too_short=None, too_long=None): >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... - ValueError: too many items in iterable (expected 1)' + ValueError: too few items in iterable (expected 1)' >>> too_short = IndexError('too few items') >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): @@ -556,27 +615,16 @@ def one(iterable, too_short=None, too_long=None): contents less destructively. """ - it = iter(iterable) - - try: - first_value = next(it) - except StopIteration as exc: - raise ( - too_short or ValueError('too few items in iterable (expected 1)') - ) from exc - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - - return first_value + iterator = iter(iterable) + for first in iterator: + for second in iterator: + msg = ( + f'Expected exactly one item in iterable, but got {first!r}, ' + f'{second!r}, and perhaps more.' + ) + raise too_long or ValueError(msg) + return first + raise too_short or ValueError('too few items in iterable (expected 1)') def raise_(exception, *args): @@ -586,8 +634,8 @@ def raise_(exception, *args): def strictly_n(iterable, n, too_short=None, too_long=None): """Validate that *iterable* has exactly *n* items and return them if it does. If it has fewer than *n* items, call function *too_short* - with those items. If it has more than *n* items, call function - *too_long* with the first ``n + 1`` items. + with the actual number of items. If it has more than *n* items, call function + *too_long* with the number ``n + 1``. >>> iterable = ['a', 'b', 'c', 'd'] >>> n = 4 @@ -633,31 +681,29 @@ def strictly_n(iterable, n, too_short=None, too_long=None): if too_short is None: too_short = lambda item_count: raise_( ValueError, - 'Too few items in iterable (got {})'.format(item_count), + f'Too few items in iterable (got {item_count})', ) if too_long is None: too_long = lambda item_count: raise_( ValueError, - 'Too many items in iterable (got at least {})'.format(item_count), + f'Too many items in iterable (got at least {item_count})', ) it = iter(iterable) - for i in range(n): - try: - item = next(it) - except StopIteration: - too_short(i) - return - else: - yield item - try: - next(it) - except StopIteration: - pass - else: + sent = 0 + for item in islice(it, n): + yield item + sent += 1 + + if sent < n: + too_short(sent) + return + + for item in it: too_long(n + 1) + return def distinct_permutations(iterable, r=None): @@ -666,15 +712,15 @@ def distinct_permutations(iterable, r=None): >>> sorted(distinct_permutations([1, 0, 1])) [(0, 1, 1), (1, 0, 1), (1, 1, 0)] - Equivalent to ``set(permutations(iterable))``, except duplicates are not - generated and thrown away. For larger input sequences this is much more - efficient. + Equivalent to yielding from ``set(permutations(iterable))``, except + duplicates are not generated and thrown away. For larger input sequences + this is much more efficient. Duplicate permutations arise when there are duplicated elements in the input iterable. The number of items returned is `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of items input, and each `x_i` is the count of a distinct item in the input - sequence. + sequence. The function :func:`multinomial` computes this directly. If *r* is given, only the *r*-length permutations are yielded. @@ -683,6 +729,25 @@ def distinct_permutations(iterable, r=None): >>> sorted(distinct_permutations(range(3), r=2)) [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + *iterable* need not be sortable, but note that using equal (``x == y``) + but non-identical (``id(x) != id(y)``) elements may produce surprising + behavior. For example, ``1`` and ``True`` are equal but non-identical: + + >>> list(distinct_permutations([1, True, '3'])) # doctest: +SKIP + [ + (1, True, '3'), + (1, '3', True), + ('3', 1, True) + ] + >>> list(distinct_permutations([1, 2, '3'])) # doctest: +SKIP + [ + (1, 2, '3'), + (1, '3', 2), + (2, 1, '3'), + (2, '3', 1), + ('3', 1, 2), + ('3', 2, 1) + ] """ # Algorithm: https://w.wiki/Qai @@ -749,18 +814,110 @@ def _partial(A, r): i += 1 head[i:], tail[:] = tail[: r - i], tail[r - i :] - items = sorted(iterable) + items = list(iterable) + + try: + items.sort() + sortable = True + except TypeError: + sortable = False + + indices_dict = defaultdict(list) + + for item in items: + indices_dict[items.index(item)].append(item) + + indices = [items.index(item) for item in items] + indices.sort() + + equivalent_items = {k: cycle(v) for k, v in indices_dict.items()} + + def permuted_items(permuted_indices): + return tuple( + next(equivalent_items[index]) for index in permuted_indices + ) size = len(items) if r is None: r = size + # functools.partial(_partial, ... ) + algorithm = _full if (r == size) else partial(_partial, r=r) + if 0 < r <= size: - return _full(items) if (r == size) else _partial(items, r) + if sortable: + return algorithm(items) + else: + return ( + permuted_items(permuted_indices) + for permuted_indices in algorithm(indices) + ) return iter(() if r else ((),)) +def derangements(iterable, r=None): + """Yield successive derangements of the elements in *iterable*. + + A derangement is a permutation in which no element appears at its original + index. In other words, a derangement is a permutation that has no fixed points. + + Suppose Alice, Bob, Carol, and Dave are playing Secret Santa. + The code below outputs all of the different ways to assign gift recipients + such that nobody is assigned to himself or herself: + + >>> for d in derangements(['Alice', 'Bob', 'Carol', 'Dave']): + ... print(', '.join(d)) + Bob, Alice, Dave, Carol + Bob, Carol, Dave, Alice + Bob, Dave, Alice, Carol + Carol, Alice, Dave, Bob + Carol, Dave, Alice, Bob + Carol, Dave, Bob, Alice + Dave, Alice, Bob, Carol + Dave, Carol, Alice, Bob + Dave, Carol, Bob, Alice + + If *r* is given, only the *r*-length derangements are yielded. + + >>> sorted(derangements(range(3), 2)) + [(1, 0), (1, 2), (2, 0)] + >>> sorted(derangements([0, 2, 3], 2)) + [(2, 0), (2, 3), (3, 0)] + + Elements are treated as unique based on their position, not on their value. + + Consider the Secret Santa example with two *different* people who have + the *same* name. Then there are two valid gift assignments even though + it might appear that a person is assigned to themselves: + + >>> names = ['Alice', 'Bob', 'Bob'] + >>> list(derangements(names)) + [('Bob', 'Bob', 'Alice'), ('Bob', 'Alice', 'Bob')] + + To avoid confusion, make the inputs distinct: + + >>> deduped = [f'{name}{index}' for index, name in enumerate(names)] + >>> list(derangements(deduped)) + [('Bob1', 'Bob2', 'Alice0'), ('Bob2', 'Alice0', 'Bob1')] + + The number of derangements of a set of size *n* is known as the + "subfactorial of n". For n > 0, the subfactorial is: + ``round(math.factorial(n) / math.e)``. + + References: + + * Article: https://www.numberanalytics.com/blog/ultimate-guide-to-derangements-in-combinatorics + * Sizes: https://oeis.org/A000166 + """ + xs = tuple(iterable) + ys = tuple(range(len(xs))) + return compress( + permutations(xs, r=r), + map(all, map(map, repeat(is_not), repeat(ys), permutations(ys, r=r))), + ) + + def intersperse(e, iterable, n=1): """Intersperse filler element *e* among the items in *iterable*, leaving *n* items between each filler element. @@ -855,10 +1012,10 @@ def windowed(seq, n, fillvalue=None, step=1): if step < 1: raise ValueError('step must be >= 1') - iterable = iter(seq) + iterator = iter(seq) # Generate first window - window = deque(islice(iterable, n), maxlen=n) + window = deque(islice(iterator, n), maxlen=n) # Deal with the first window not being full if not window: @@ -871,7 +1028,7 @@ def windowed(seq, n, fillvalue=None, step=1): # Create the filler for the next windows. The padding ensures # we have just enough elements to fill the last window. padding = (fillvalue,) * (n - 1 if step >= n else step - 1) - filler = map(window.append, chain(iterable, padding)) + filler = map(window.append, chain(iterator, padding)) # Generate the rest of the windows for _ in islice(filler, step - 1, None, step): @@ -892,7 +1049,7 @@ def substrings(iterable): """ # The length-1 substrings seq = [] - for item in iter(iterable): + for item in iterable: seq.append(item) yield (item,) seq = tuple(seq) @@ -1025,7 +1182,7 @@ def __iter__(self): if self._validator(item_value): self._cache[item_value].append(item) - yield from self._cache.keys() + return iter(self._cache) def __getitem__(self, value): if not self._validator(value): @@ -1071,10 +1228,8 @@ def spy(iterable, n=1): [1, 2, 3, 4, 5] """ - it = iter(iterable) - head = take(n, it) - - return head.copy(), chain(head, it) + p, q = tee(iterable) + return take(n, q), p def interleave(*iterables): @@ -1103,8 +1258,10 @@ def interleave_longest(*iterables): is large). """ - i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker)) - return (x for x in i if x is not _marker) + for xs in zip_longest(*iterables, fillvalue=_marker): + for x in xs: + if x is not _marker: + yield x def interleave_evenly(iterables, lengths=None): @@ -1173,6 +1330,29 @@ def interleave_evenly(iterables, lengths=None): errors[i] += delta_primary +def interleave_randomly(*iterables): + """Repeatedly select one of the input *iterables* at random and yield the next + item from it. + + >>> iterables = [1, 2, 3], 'abc', (True, False, None) + >>> list(interleave_randomly(*iterables)) # doctest: +SKIP + ['a', 'b', 1, 'c', True, False, None, 2, 3] + + The relative order of the items in each input iterable will preserved. Note the + sequences of items with this property are not equally likely to be generated. + + """ + iterators = [iter(e) for e in iterables] + while iterators: + idx = randrange(len(iterators)) + try: + yield next(iterators[idx]) + except StopIteration: + # equivalent to `list.pop` but slightly faster + iterators[idx] = iterators[-1] + del iterators[-1] + + def collapse(iterable, base_type=None, levels=None): """Flatten an iterable with multiple levels of nesting (e.g., a list of lists of tuples) into non-iterable types. @@ -1323,7 +1503,7 @@ def ret(): raise ValueError("seq is not divisible by n.") yield _slice - return iter(ret()) + return ret() else: return iterator @@ -1398,7 +1578,7 @@ def split_before(iterable, pred, maxsplit=-1): if pred(item) and buf: yield buf if maxsplit == 1: - yield [item] + list(it) + yield [item, *it] return buf = [] maxsplit -= 1 @@ -1479,7 +1659,7 @@ def split_when(iterable, pred, maxsplit=-1): if pred(cur_item, next_item): yield buf if maxsplit == 1: - yield [next_item] + list(it) + yield [next_item, *it] return buf = [] maxsplit -= 1 @@ -1504,15 +1684,15 @@ def split_into(iterable, sizes): [[1, 2], [3, 4, 5]] If the sum of *sizes* is larger than the length of *iterable*, fewer items - will be returned in the iteration that overruns *iterable* and further + will be returned in the iteration that overruns the *iterable* and further lists will be empty: >>> list(split_into([1,2,3,4], [1,2,3,4])) [[1], [2, 3], [4], []] When a ``None`` object is encountered in *sizes*, the returned list will - contain items up to the end of *iterable* the same way that itertools.slice - does: + contain items up to the end of *iterable* the same way that + :func:`itertools.slice` does: >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] @@ -1559,25 +1739,25 @@ def padded(iterable, fillvalue=None, n=None, next_multiple=False): [1, 2, 3, 4, 5] """ - iterable = iter(iterable) - iterable_with_repeat = chain(iterable, repeat(fillvalue)) + iterator = iter(iterable) + iterator_with_repeat = chain(iterator, repeat(fillvalue)) if n is None: - return iterable_with_repeat + return iterator_with_repeat elif n < 1: raise ValueError('n must be at least 1') elif next_multiple: def slice_generator(): - for first in iterable: + for first in iterator: yield (first,) - yield islice(iterable_with_repeat, n - 1) + yield islice(iterator_with_repeat, n - 1) # While elements exist produce slices of size n return chain.from_iterable(slice_generator()) else: # Ensure the first batch is at least size n then iterate - return chain(islice(iterable_with_repeat, n), iterable) + return chain(islice(iterator_with_repeat, n), iterator) def repeat_each(iterable, n=2): @@ -1674,7 +1854,7 @@ def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): def zip_equal(*iterables): - """``zip`` the input *iterables* together, but raise + """``zip`` the input *iterables* together but raise ``UnequalIterablesError`` if they aren't all the same length. >>> it_1 = range(3) @@ -1743,7 +1923,9 @@ def zip_offset(*iterables, offsets, longest=False, fillvalue=None): return zip(*staggered) -def sort_together(iterables, key_list=(0,), key=None, reverse=False): +def sort_together( + iterables, key_list=(0,), key=None, reverse=False, strict=False +): """Return the input iterables sorted together, with *key_list* as the priority for sorting. All iterables are trimmed to the length of the shortest one. @@ -1782,6 +1964,10 @@ def sort_together(iterables, key_list=(0,), key=None, reverse=False): >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True) [(3, 2, 1), ('a', 'b', 'c')] + If the *strict* keyword argument is ``True``, then + ``UnequalIterablesError`` will be raised if any of the iterables have + different lengths. + """ if key is None: # if there is no key function, the key argument to sorted is an @@ -1804,8 +1990,9 @@ def sort_together(iterables, key_list=(0,), key=None, reverse=False): *get_key_items(zipped_items) ) + zipper = zip_equal if strict else zip return list( - zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse)) + zipper(*sorted(zipper(*iterables), key=key_argument, reverse=reverse)) ) @@ -1829,7 +2016,7 @@ def unzip(iterable): :func:`itertools.tee` and thus may require significant storage. """ - head, iterable = spy(iter(iterable)) + head, iterable = spy(iterable) if not head: # empty iterable, e.g. zip([], [], []) return () @@ -1837,25 +2024,17 @@ def unzip(iterable): head = head[0] iterables = tee(iterable, len(head)) - def itemgetter(i): - def getter(obj): - try: - return obj[i] - except IndexError: - # basically if we have an iterable like - # iter([(1, 2, 3), (4, 5), (6,)]) - # the second unzipped iterable would fail at the third tuple - # since it would try to access tup[1] - # same with the third unzipped iterable and the second tuple - # to support these "improperly zipped" iterables, - # we create a custom itemgetter - # which just stops the unzipped iterables - # at first length mismatch - raise StopIteration - - return getter - - return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables)) + # If we have an iterable like iter([(1, 2, 3), (4, 5), (6,)]), + # the second unzipped iterable fails at the third tuple since + # it tries to access (6,)[1]. + # Same with the third unzipped iterable and the second tuple. + # To support these "improperly zipped" iterables, we suppress + # the IndexError, which just stops the unzipped iterables at + # first length mismatch. + return tuple( + iter_suppress(map(itemgetter(i), it), IndexError) + for i, it in enumerate(iterables) + ) def divide(n, iterable): @@ -2080,7 +2259,7 @@ class numeric_range(abc.Sequence, abc.Hashable): >>> list(numeric_range(3, -1, -1.0)) [3.0, 2.0, 1.0, 0.0] - Be aware of the limitations of floating point numbers; the representation + Be aware of the limitations of floating-point numbers; the representation of the yielded numbers may be surprising. ``datetime.datetime`` objects can be used for *start* and *stop*, if *step* @@ -2113,13 +2292,11 @@ def __init__(self, *args): self._start, self._stop, self._step = args elif argc == 0: raise TypeError( - 'numeric_range expected at least ' - '1 argument, got {}'.format(argc) + f'numeric_range expected at least 1 argument, got {argc}' ) else: raise TypeError( - 'numeric_range expected at most ' - '3 arguments, got {}'.format(argc) + f'numeric_range expected at most 3 arguments, got {argc}' ) self._zero = type(self._step)(0) @@ -2182,7 +2359,7 @@ def __getitem__(self, key): else: raise TypeError( 'numeric range indices must be ' - 'integers or slices, not {}'.format(type(key).__name__) + f'integers or slices, not {type(key).__name__}' ) def __hash__(self): @@ -2223,13 +2400,10 @@ def __reduce__(self): def __repr__(self): if self._step == 1: - return "numeric_range({}, {})".format( - repr(self._start), repr(self._stop) - ) - else: - return "numeric_range({}, {}, {})".format( - repr(self._start), repr(self._stop), repr(self._step) - ) + return f"numeric_range({self._start!r}, {self._stop!r})" + return ( + f"numeric_range({self._start!r}, {self._stop!r}, {self._step!r})" + ) def __reversed__(self): return iter( @@ -2253,7 +2427,7 @@ def index(self, value): if r == self._zero: return int(q) - raise ValueError("{} is not in numeric range".format(value)) + raise ValueError(f"{value} is not in numeric range") def _get_by_index(self, i): if i < 0: @@ -2272,11 +2446,11 @@ def count_cycle(iterable, n=None): [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')] """ - iterable = tuple(iterable) - if not iterable: + seq = tuple(iterable) + if not seq: return iter(()) counter = count() if n is None else range(n) - return ((i, item) for i in counter for item in iterable) + return zip(repeat_each(counter, len(seq)), cycle(seq)) def mark_ends(iterable): @@ -2300,20 +2474,13 @@ def mark_ends(iterable): 300 """ it = iter(iterable) - - try: - b = next(it) - except StopIteration: - return - - try: - for i in count(): + for a in it: + first = True + for b in it: + yield first, False, a a = b - b = next(it) - yield i == 0, False, a - - except StopIteration: - yield i == 0, True, a + first = False + yield first, True, a def locate(iterable, pred=bool, window_size=None): @@ -2365,7 +2532,7 @@ def locate(iterable, pred=bool, window_size=None): def longest_common_prefix(iterables): - """Yield elements of the longest common prefix amongst given *iterables*. + """Yield elements of the longest common prefix among given *iterables*. >>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf'])) 'ab' @@ -2439,8 +2606,8 @@ class islice_extended: """An extension of :func:`itertools.islice` that supports negative values for *stop*, *start*, and *step*. - >>> iterable = iter('abcdefgh') - >>> list(islice_extended(iterable, -4, -1)) + >>> iterator = iter('abcdefgh') + >>> list(islice_extended(iterator, -4, -1)) ['e', 'f', 'g'] Slices with negative values require some caching of *iterable*, but this @@ -2454,8 +2621,8 @@ class islice_extended: You can also use slice notation directly: - >>> iterable = map(str, count()) - >>> it = islice_extended(iterable)[10:20:2] + >>> iterator = map(str, count()) + >>> it = islice_extended(iterator)[10:20:2] >>> list(it) ['10', '12', '14', '16', '18'] @@ -2464,19 +2631,19 @@ class islice_extended: def __init__(self, iterable, *args): it = iter(iterable) if args: - self._iterable = _islice_helper(it, slice(*args)) + self._iterator = _islice_helper(it, slice(*args)) else: - self._iterable = it + self._iterator = it def __iter__(self): return self def __next__(self): - return next(self._iterable) + return next(self._iterator) def __getitem__(self, key): if isinstance(key, slice): - return islice_extended(_islice_helper(self._iterable, key)) + return islice_extended(_islice_helper(self._iterator, key)) raise TypeError('islice_extended.__getitem__ argument must be a slice') @@ -2512,8 +2679,15 @@ def _islice_helper(it, s): if n <= 0: return - for index, item in islice(cache, 0, n, step): - yield item + for index in range(n): + if index % step == 0: + # pop and yield the item. + # We don't want to use an intermediate variable + # it would extend the lifetime of the current item + yield cache.popleft()[1] + else: + # just pop and discard the item + cache.popleft() elif (stop is not None) and (stop < 0): # Advance to the start position next(islice(it, start, start), None) @@ -2523,9 +2697,14 @@ def _islice_helper(it, s): cache = deque(islice(it, -stop), maxlen=-stop) for index, item in enumerate(it): - cached_item = cache.popleft() if index % step == 0: - yield cached_item + # pop and yield the item. + # We don't want to use an intermediate variable + # it would extend the lifetime of the current item + yield cache.popleft() + else: + # just pop and discard the item + cache.popleft() cache.append(item) else: # When both start and stop are positive we have the normal case @@ -2595,7 +2774,7 @@ def always_reversible(iterable): return reversed(list(iterable)) -def consecutive_groups(iterable, ordering=lambda x: x): +def consecutive_groups(iterable, ordering=None): """Yield groups of consecutive items using :func:`itertools.groupby`. The *ordering* function determines whether two items are adjacent by returning their position. @@ -2612,12 +2791,11 @@ def consecutive_groups(iterable, ordering=lambda x: x): [30, 31, 32, 33] [40] - For finding runs of adjacent letters, try using the :meth:`index` method - of a string of letters: + To find runs of adjacent letters, apply :func:`ord` function + to convert letters to ordinals. - >>> from string import ascii_lowercase >>> iterable = 'abcdfgilmnop' - >>> ordering = ascii_lowercase.index + >>> ordering = ord >>> for group in consecutive_groups(iterable, ordering): ... print(list(group)) ['a', 'b', 'c', 'd'] @@ -2637,9 +2815,12 @@ def consecutive_groups(iterable, ordering=lambda x: x): [[1, 2], [11, 12], [21, 22]] """ - for k, g in groupby( - enumerate(iterable), key=lambda x: x[0] - ordering(x[1]) - ): + if ordering is None: + key = lambda x: x[0] - x[1] + else: + key = lambda x: x[0] - ordering(x[1]) + + for k, g in groupby(enumerate(iterable), key=key): yield map(itemgetter(1), g) @@ -2727,7 +2908,7 @@ def __len__(self): return len(self._target) def __repr__(self): - return '{}({})'.format(self.__class__.__name__, repr(self._target)) + return f'{self.__class__.__name__}({self._target!r})' class seekable: @@ -2747,8 +2928,6 @@ class seekable: >>> it.seek(0) >>> next(it), next(it), next(it) ('0', '1', '2') - >>> next(it) - '3' You can also seek forward: @@ -2756,15 +2935,29 @@ class seekable: >>> it.seek(10) >>> next(it) '10' - >>> it.relative_seek(-2) # Seeking relative to the current position - >>> next(it) - '9' >>> it.seek(20) # Seeking past the end of the source isn't a problem >>> list(it) [] >>> it.seek(0) # Resetting works even after hitting the end + >>> next(it) + '0' + + Call :meth:`relative_seek` to seek relative to the source iterator's + current position. + + >>> it = seekable((str(n) for n in range(20))) >>> next(it), next(it), next(it) ('0', '1', '2') + >>> it.relative_seek(2) + >>> next(it) + '5' + >>> it.relative_seek(-3) # Source is at '6', we move back to '3' + >>> next(it) + '3' + >>> it.relative_seek(-3) # Source is at '4', we move back to '1' + >>> next(it) + '1' + Call :meth:`peek` to look ahead one item without advancing the iterator: @@ -2873,8 +3066,10 @@ def seek(self, index): consume(self, remainder) def relative_seek(self, count): - index = len(self._cache) - self.seek(max(index + count, 0)) + if self._index is None: + self._index = len(self._cache) + + self.seek(max(self._index + count, 0)) class run_length: @@ -2903,7 +3098,7 @@ def encode(iterable): @staticmethod def decode(iterable): - return chain.from_iterable(repeat(k, n) for k, n in iterable) + return chain.from_iterable(starmap(repeat, iterable)) def exactly_n(iterable, n, predicate=bool): @@ -2921,17 +3116,37 @@ def exactly_n(iterable, n, predicate=bool): so avoid calling it on infinite iterables. """ - return len(take(n + 1, filter(predicate, iterable))) == n + return ilen(islice(filter(predicate, iterable), n + 1)) == n -def circular_shifts(iterable): - """Return a list of circular shifts of *iterable*. +def circular_shifts(iterable, steps=1): + """Yield the circular shifts of *iterable*. - >>> circular_shifts(range(4)) + >>> list(circular_shifts(range(4))) [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)] + + Set *steps* to the number of places to rotate to the left + (or to the right if negative). Defaults to 1. + + >>> list(circular_shifts(range(4), 2)) + [(0, 1, 2, 3), (2, 3, 0, 1)] + + >>> list(circular_shifts(range(4), -1)) + [(0, 1, 2, 3), (3, 0, 1, 2), (2, 3, 0, 1), (1, 2, 3, 0)] + """ - lst = list(iterable) - return take(len(lst), windowed(cycle(lst), len(lst))) + buffer = deque(iterable) + if steps == 0: + raise ValueError('Steps should be a non-zero integer') + + buffer.rotate(steps) + steps = -steps + n = len(buffer) + n //= math.gcd(n, steps) + + for _ in repeat(None, n): + buffer.rotate(steps) + yield tuple(buffer) def make_decorator(wrapping_func, result_index=0): @@ -3051,13 +3266,19 @@ def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): dictionary. """ - valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc ret = defaultdict(list) - for item in iterable: - key = keyfunc(item) - value = valuefunc(item) - ret[key].append(value) + + if valuefunc is None: + for item in iterable: + key = keyfunc(item) + ret[key].append(item) + + else: + for item in iterable: + key = keyfunc(item) + value = valuefunc(item) + ret[key].append(value) if reducefunc is not None: for key, value_list in ret.items(): @@ -3079,9 +3300,9 @@ def rlocate(iterable, pred=bool, window_size=None): Set *pred* to a custom function to, e.g., find the indexes for a particular item: - >>> iterable = iter('abcb') + >>> iterator = iter('abcb') >>> pred = lambda x: x == 'b' - >>> list(rlocate(iterable, pred)) + >>> list(rlocate(iterator, pred)) [3, 1] If *window_size* is given, then the *pred* function will be called with @@ -3147,7 +3368,7 @@ def replace(iterable, pred, substitutes, count=None, window_size=1): # Add padding such that the number of windows matches the length of the # iterable - it = chain(iterable, [_marker] * (window_size - 1)) + it = chain(iterable, repeat(_marker, window_size - 1)) windows = windowed(it, window_size) n = 0 @@ -3191,7 +3412,7 @@ def partitions(iterable): yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))] -def set_partitions(iterable, k=None): +def set_partitions(iterable, k=None, min_size=None, max_size=None): """ Yield the set partitions of *iterable* into *k* parts. Set partitions are not order-preserving. @@ -3215,6 +3436,20 @@ def set_partitions(iterable, k=None): ['b', 'ac'] ['a', 'b', 'c'] + if *min_size* and/or *max_size* are given, the minimum and/or maximum size + per block in partition is set. + + >>> iterable = 'abc' + >>> for part in set_partitions(iterable, min_size=2): + ... print([''.join(p) for p in part]) + ['abc'] + >>> for part in set_partitions(iterable, max_size=2): + ... print([''.join(p) for p in part]) + ['a', 'bc'] + ['ab', 'c'] + ['b', 'ac'] + ['a', 'b', 'c'] + """ L = list(iterable) n = len(L) @@ -3226,6 +3461,11 @@ def set_partitions(iterable, k=None): elif k > n: return + min_size = min_size if min_size is not None else 0 + max_size = max_size if max_size is not None else n + if min_size > max_size: + return + def set_partitions_helper(L, k): n = len(L) if k == 1: @@ -3242,9 +3482,15 @@ def set_partitions_helper(L, k): if k is None: for k in range(1, n + 1): - yield from set_partitions_helper(L, k) + yield from filter( + lambda z: all(min_size <= len(bk) <= max_size for bk in z), + set_partitions_helper(L, k), + ) else: - yield from set_partitions_helper(L, k) + yield from filter( + lambda z: all(min_size <= len(bk) <= max_size for bk in z), + set_partitions_helper(L, k), + ) class time_limited: @@ -3278,7 +3524,7 @@ def __init__(self, limit_seconds, iterable): if limit_seconds < 0: raise ValueError('limit_seconds must be positive') self.limit_seconds = limit_seconds - self._iterable = iter(iterable) + self._iterator = iter(iterable) self._start_time = monotonic() self.timed_out = False @@ -3289,7 +3535,7 @@ def __next__(self): if self.limit_seconds == 0: self.timed_out = True raise StopIteration - item = next(self._iterable) + item = next(self._iterator) if monotonic() - self._start_time > self.limit_seconds: self.timed_out = True raise StopIteration @@ -3320,39 +3566,31 @@ def only(iterable, default=None, too_long=None): Note that :func:`only` attempts to advance *iterable* twice to ensure there is only one item. See :func:`spy` or :func:`peekable` to check iterable contents less destructively. - """ - it = iter(iterable) - first_value = next(it, default) - - try: - second_value = next(it) - except StopIteration: - pass - else: - msg = ( - 'Expected exactly one item in iterable, but got {!r}, {!r}, ' - 'and perhaps more.'.format(first_value, second_value) - ) - raise too_long or ValueError(msg) - return first_value + """ + iterator = iter(iterable) + for first in iterator: + for second in iterator: + msg = ( + f'Expected exactly one item in iterable, but got {first!r}, ' + f'{second!r}, and perhaps more.' + ) + raise too_long or ValueError(msg) + return first + return default -def _ichunk(iterable, n): +def _ichunk(iterator, n): cache = deque() - chunk = islice(iterable, n) + chunk = islice(iterator, n) def generator(): - while True: - if cache: - yield cache.popleft() - else: - try: - item = next(chunk) - except StopIteration: - return + with suppress(StopIteration): + while True: + if cache: + yield cache.popleft() else: - yield item + yield next(chunk) def materialize_next(n=1): # if n not specified materialize everything @@ -3393,10 +3631,10 @@ def ichunked(iterable, n): [8, 9, 10, 11] """ - iterable = iter(iterable) + iterator = iter(iterable) while True: # Create new chunk - chunk, materialize_next = _ichunk(iterable, n) + chunk, materialize_next = _ichunk(iterator, n) # Check to see whether we're at the end of the source iterable if not materialize_next(): @@ -3513,7 +3751,7 @@ def map_except(function, iterable, *exceptions): pass -def map_if(iterable, pred, func, func_else=lambda x: x): +def map_if(iterable, pred, func, func_else=None): """Evaluate each item from *iterable* using *pred*. If the result is equivalent to ``True``, transform the item with *func* and yield it. Otherwise, transform the item with *func_else* and yield it. @@ -3531,36 +3769,37 @@ def map_if(iterable, pred, func, func_else=lambda x: x): ... lambda x: f'{sqrt(x):.2f}', lambda x: None)) [None, None, None, None, None, '0.00', '1.00', '1.41', '1.73', '2.00'] """ - for item in iterable: - yield func(item) if pred(item) else func_else(item) + if func_else is None: + for item in iterable: + yield func(item) if pred(item) else item -def _sample_unweighted(iterable, k): - # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li: - # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". + else: + for item in iterable: + yield func(item) if pred(item) else func_else(item) - # Fill up the reservoir (collection of samples) with the first `k` samples - reservoir = take(k, iterable) - # Generate random number that's the largest in a sample of k U(0,1) numbers - # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic - W = exp(log(random()) / k) +def _sample_unweighted(iterator, k, strict): + # Algorithm L in the 1994 paper by Kim-Hung Li: + # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))". - # The number of elements to skip before changing the reservoir is a random - # number with a geometric distribution. Sample it using random() and logs. - next_index = k + floor(log(random()) / log(1 - W)) + reservoir = list(islice(iterator, k)) + if strict and len(reservoir) < k: + raise ValueError('Sample larger than population') + W = 1.0 - for index, element in enumerate(iterable, k): - if index == next_index: + with suppress(StopIteration): + while True: + W *= random() ** (1 / k) + skip = floor(log(random()) / log1p(-W)) + element = next(islice(iterator, skip, None)) reservoir[randrange(k)] = element - # The new W is the largest in a sample of k U(0, `old_W`) numbers - W *= exp(log(random()) / k) - next_index += floor(log(random()) / log(1 - W)) + 1 + shuffle(reservoir) return reservoir -def _sample_weighted(iterable, k, weights): +def _sample_weighted(iterator, k, weights, strict): # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. : # "Weighted random sampling with a reservoir". @@ -3569,7 +3808,10 @@ def _sample_weighted(iterable, k, weights): # Fill up the reservoir (collection of samples) with the first `k` # weight-keys and elements, then heapify the list. - reservoir = take(k, zip(weight_keys, iterable)) + reservoir = take(k, zip(weight_keys, iterator)) + if strict and len(reservoir) < k: + raise ValueError('Sample larger than population') + heapify(reservoir) # The number of jumps before changing the reservoir is a random variable @@ -3577,7 +3819,7 @@ def _sample_weighted(iterable, k, weights): smallest_weight_key, _ = reservoir[0] weights_to_skip = log(random()) / smallest_weight_key - for weight, element in zip(weights, iterable): + for weight, element in zip(weights, iterator): if weight >= weights_to_skip: # The notation here is consistent with the paper, but we store # the weight-keys in log-space for better numerical stability. @@ -3591,44 +3833,127 @@ def _sample_weighted(iterable, k, weights): else: weights_to_skip -= weight - # Equivalent to [element for weight_key, element in sorted(reservoir)] - return [heappop(reservoir)[1] for _ in range(k)] + ret = [element for weight_key, element in reservoir] + shuffle(ret) + return ret + + +def _sample_counted(population, k, counts, strict): + element = None + remaining = 0 + + def feed(i): + # Advance *i* steps ahead and consume an element + nonlocal element, remaining + + while i + 1 > remaining: + i = i - remaining + element = next(population) + remaining = next(counts) + remaining -= i + 1 + return element + + with suppress(StopIteration): + reservoir = [] + for _ in range(k): + reservoir.append(feed(0)) + + if strict and len(reservoir) < k: + raise ValueError('Sample larger than population') + + with suppress(StopIteration): + W = 1.0 + while True: + W *= random() ** (1 / k) + skip = floor(log(random()) / log1p(-W)) + element = feed(skip) + reservoir[randrange(k)] = element + + shuffle(reservoir) + return reservoir -def sample(iterable, k, weights=None): +def sample(iterable, k, weights=None, *, counts=None, strict=False): """Return a *k*-length list of elements chosen (without replacement) - from the *iterable*. Like :func:`random.sample`, but works on iterables - of unknown length. + from the *iterable*. + + Similar to :func:`random.sample`, but works on inputs that aren't + indexable (such as sets and dictionaries) and on inputs where the + size isn't known in advance (such as generators). >>> iterable = range(100) >>> sample(iterable, 5) # doctest: +SKIP [81, 60, 96, 16, 4] - An iterable with *weights* may also be given: + For iterables with repeated elements, you may supply *counts* to + indicate the repeats. + + >>> iterable = ['a', 'b'] + >>> counts = [3, 4] # Equivalent to 'a', 'a', 'a', 'b', 'b', 'b', 'b' + >>> sample(iterable, k=3, counts=counts) # doctest: +SKIP + ['a', 'a', 'b'] + + An iterable with *weights* may be given: >>> iterable = range(100) >>> weights = (i * i + 1 for i in range(100)) >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP [79, 67, 74, 66, 78] - The algorithm can also be used to generate weighted random permutations. - The relative weight of each item determines the probability that it - appears late in the permutation. + Weighted selections are made without replacement. + After an element is selected, it is removed from the pool and the + relative weights of the other elements increase (this + does not match the behavior of :func:`random.sample`'s *counts* + parameter). Note that *weights* may not be used with *counts*. + + If the length of *iterable* is less than *k*, + ``ValueError`` is raised if *strict* is ``True`` and + all elements are returned (in shuffled order) if *strict* is ``False``. + + By default, the `Algorithm L `__ reservoir sampling + technique is used. When *weights* are provided, + `Algorithm A-ExpJ `__ is used instead. + + Notes on reproducibility: + + * The algorithms rely on inexact floating-point functions provided + by the underlying math library (e.g. ``log``, ``log1p``, and ``pow``). + Those functions can `produce slightly different results + `_ on + different builds. Accordingly, selections can vary across builds + even for the same seed. + + * The algorithms loop over the input and make selections based on + ordinal position, so selections from unordered collections (such as + sets) won't reproduce across sessions on the same platform using the + same seed. For example, this won't reproduce:: + + >> seed(8675309) + >> sample(set('abcdefghijklmnopqrstuvwxyz'), 10) + ['c', 'p', 'e', 'w', 's', 'a', 'j', 'd', 'n', 't'] - >>> data = "abcdefgh" - >>> weights = range(1, len(data) + 1) - >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP - ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f'] """ + iterator = iter(iterable) + + if k < 0: + raise ValueError('k must be non-negative') + if k == 0: return [] - iterable = iter(iterable) - if weights is None: - return _sample_unweighted(iterable, k) - else: + if weights is not None and counts is not None: + raise TypeError('weights and counts are mutually exclusive') + + elif weights is not None: weights = iter(weights) - return _sample_weighted(iterable, k, weights) + return _sample_weighted(iterator, k, weights, strict) + + elif counts is not None: + counts = iter(counts) + return _sample_counted(iterator, k, counts, strict) + + else: + return _sample_unweighted(iterator, k, strict) def is_sorted(iterable, key=None, reverse=False, strict=False): @@ -3650,12 +3975,17 @@ def is_sorted(iterable, key=None, reverse=False, strict=False): False The function returns ``False`` after encountering the first out-of-order - item. If there are no out-of-order items, the iterable is exhausted. + item, which means it may produce results that differ from the built-in + :func:`sorted` function for objects with unusual comparison dynamics + (like ``math.nan``). If there are no out-of-order items, the iterable is + exhausted. """ - - compare = (le if reverse else ge) if strict else (lt if reverse else gt) - it = iterable if key is None else map(key, iterable) - return not any(starmap(compare, pairwise(it))) + it = iterable if (key is None) else map(key, iterable) + a, b = tee(it) + next(b, None) + if reverse: + b, a = a, b + return all(map(lt, a, b)) if strict else not any(map(lt, b, a)) class AbortThread(BaseException): @@ -3919,7 +4249,7 @@ def nth_permutation(iterable, r, index): raise ValueError else: c = perm(n, r) - assert c > 0 # factortial(n)>0, and r 0 # factorial(n)>0, and r>> minmax([3, 1, 5]) (1, 5) @@ -4455,10 +4785,16 @@ def minmax(iterable_or_value, *others, key=None, default=_marker): Otherwise ``ValueError`` is raised. + This function makes a single pass over the input elements and takes care to + minimize the number of comparisons made during processing. + + Note that unlike the builtin ``max`` function, which always returns the first + item with the maximum value, this function may return another item when there are + ties. + This function is based on the - `recipe `__ by - Raymond Hettinger and takes care to minimize the number of comparisons - performed. + `recipe `__ by + Raymond Hettinger. """ iterable = (iterable_or_value, *others) if others else iterable_or_value @@ -4648,8 +4984,8 @@ def outer_product(func, xs, ys, *args, **kwargs): >>> xs = ['A', 'B', 'A', 'A', 'B', 'B', 'A', 'A', 'B', 'B'] >>> ys = ['X', 'X', 'X', 'Y', 'Z', 'Z', 'Y', 'Y', 'Z', 'Z'] - >>> rows = list(zip(xs, ys)) - >>> count_rows = lambda x, y: rows.count((x, y)) + >>> pair_counts = Counter(zip(xs, ys)) + >>> count_rows = lambda x, y: pair_counts[x, y] >>> list(outer_product(count_rows, sorted(set(xs)), sorted(set(ys)))) [(2, 3, 0), (1, 0, 4)] @@ -4714,9 +5050,11 @@ def powerset_of_sets(iterable): :func:`powerset_of_sets` takes care to minimize the number of hash operations performed. """ - sets = tuple(map(set, dict.fromkeys(map(frozenset, zip(iterable))))) - for r in range(len(sets) + 1): - yield from starmap(set().union, combinations(sets, r)) + sets = tuple(dict.fromkeys(map(frozenset, zip(iterable)))) + return chain.from_iterable( + starmap(set().union, combinations(sets, r)) + for r in range(len(sets) + 1) + ) def join_mappings(**field_to_map): @@ -4742,23 +5080,30 @@ def _complex_sumprod(v1, v2): Used by :func:`dft` and :func:`idft`. """ - r1 = chain((p.real for p in v1), (-p.imag for p in v1)) - r2 = chain((q.real for q in v2), (q.imag for q in v2)) - i1 = chain((p.real for p in v1), (p.imag for p in v1)) - i2 = chain((q.imag for q in v2), (q.real for q in v2)) + real = attrgetter('real') + imag = attrgetter('imag') + r1 = chain(map(real, v1), map(neg, map(imag, v1))) + r2 = chain(map(real, v2), map(imag, v2)) + i1 = chain(map(real, v1), map(imag, v1)) + i2 = chain(map(imag, v2), map(real, v2)) return complex(_fsumprod(r1, r2), _fsumprod(i1, i2)) def dft(xarr): - """Discrete Fourier Tranform. *xarr* is a sequence of complex numbers. + """Discrete Fourier Transform. *xarr* is a sequence of complex numbers. Yields the components of the corresponding transformed output vector. >>> import cmath - >>> xarr = [1, 2-1j, -1j, -1+2j] - >>> Xarr = [2, -2-2j, -2j, 4+4j] + >>> xarr = [1, 2-1j, -1j, -1+2j] # time domain + >>> Xarr = [2, -2-2j, -2j, 4+4j] # frequency domain + >>> magnitudes, phases = zip(*map(cmath.polar, Xarr)) >>> all(map(cmath.isclose, dft(xarr), Xarr)) True + Inputs are restricted to numeric types that can add and multiply + with a complex number. This includes int, float, complex, and + Fraction, but excludes Decimal. + See :func:`idft` for the inverse Discrete Fourier Transform. """ N = len(xarr) @@ -4769,16 +5114,20 @@ def dft(xarr): def idft(Xarr): - """Inverse Discrete Fourier Tranform. *Xarr* is a sequence of + """Inverse Discrete Fourier Transform. *Xarr* is a sequence of complex numbers. Yields the components of the corresponding inverse-transformed output vector. >>> import cmath - >>> xarr = [1, 2-1j, -1j, -1+2j] - >>> Xarr = [2, -2-2j, -2j, 4+4j] + >>> xarr = [1, 2-1j, -1j, -1+2j] # time domain + >>> Xarr = [2, -2-2j, -2j, 4+4j] # frequency domain >>> all(map(cmath.isclose, idft(Xarr), xarr)) True + Inputs are restricted to numeric types that can add and multiply + with a complex number. This includes int, float, complex, and + Fraction, but excludes Decimal. + See :func:`dft` for the Discrete Fourier Transform. """ N = len(Xarr) @@ -4804,3 +5153,151 @@ def doublestarmap(func, iterable): """ for item in iterable: yield func(**item) + + +def _nth_prime_bounds(n): + """Bounds for the nth prime (counting from 1): lb < p_n < ub.""" + # At and above 688,383, the lb/ub spread is under 0.003 * p_n. + + if n < 1: + raise ValueError + + if n < 6: + return (n, 2.25 * n) + + # https://en.wikipedia.org/wiki/Prime-counting_function#Inequalities + upper_bound = n * log(n * log(n)) + lower_bound = upper_bound - n + if n >= 688_383: + upper_bound -= n * (1.0 - (log(log(n)) - 2.0) / log(n)) + + return lower_bound, upper_bound + + +def nth_prime(n, *, approximate=False): + """Return the nth prime (counting from 0). + + >>> nth_prime(0) + 2 + >>> nth_prime(100) + 547 + + If *approximate* is set to True, will return a prime close + to the nth prime. The estimation is much faster than computing + an exact result. + + >>> nth_prime(200_000_000, approximate=True) # Exact result is 4222234763 + 4217820427 + + """ + lb, ub = _nth_prime_bounds(n + 1) + + if not approximate or n <= 1_000_000: + return nth(sieve(ceil(ub)), n) + + # Search from the midpoint and return the first odd prime + odd = floor((lb + ub) / 2) | 1 + return first_true(count(odd, step=2), pred=is_prime) + + +def argmin(iterable, *, key=None): + """ + Index of the first occurrence of a minimum value in an iterable. + + >>> argmin('efghabcdijkl') + 4 + >>> argmin([3, 2, 1, 0, 4, 2, 1, 0]) + 3 + + For example, look up a label corresponding to the position + of a value that minimizes a cost function:: + + >>> def cost(x): + ... "Days for a wound to heal given a subject's age." + ... return x**2 - 20*x + 150 + ... + >>> labels = ['homer', 'marge', 'bart', 'lisa', 'maggie'] + >>> ages = [ 35, 30, 10, 9, 1 ] + + # Fastest healing family member + >>> labels[argmin(ages, key=cost)] + 'bart' + + # Age with fastest healing + >>> min(ages, key=cost) + 10 + + """ + if key is not None: + iterable = map(key, iterable) + return min(enumerate(iterable), key=itemgetter(1))[0] + + +def argmax(iterable, *, key=None): + """ + Index of the first occurrence of a maximum value in an iterable. + + >>> argmax('abcdefghabcd') + 7 + >>> argmax([0, 1, 2, 3, 3, 2, 1, 0]) + 3 + + For example, identify the best machine learning model:: + + >>> models = ['svm', 'random forest', 'knn', 'naïve bayes'] + >>> accuracy = [ 68, 61, 84, 72 ] + + # Most accurate model + >>> models[argmax(accuracy)] + 'knn' + + # Best accuracy + >>> max(accuracy) + 84 + + """ + if key is not None: + iterable = map(key, iterable) + return max(enumerate(iterable), key=itemgetter(1))[0] + + +def extract(iterable, indices): + """Yield values at the specified indices. + + Example: + + >>> data = 'abcdefghijklmnopqrstuvwxyz' + >>> list(extract(data, [7, 4, 11, 11, 14])) + ['h', 'e', 'l', 'l', 'o'] + + The *iterable* is consumed lazily and can be infinite. + The *indices* are consumed immediately and must be finite. + + Raises ``IndexError`` if an index lies beyond the iterable. + Raises ``ValueError`` for negative indices. + """ + + iterator = iter(iterable) + index_and_position = sorted(zip(indices, count())) + + if index_and_position and index_and_position[0][0] < 0: + raise ValueError('Indices must be non-negative') + + buffer = {} + iterator_position = -1 + next_to_emit = 0 + + for index, order in index_and_position: + advance = index - iterator_position + if advance: + try: + value = next(islice(iterator, advance - 1, None)) + except StopIteration: + raise IndexError(index) + iterator_position = index + + buffer[order] = value + + while next_to_emit in buffer: + yield buffer.pop(next_to_emit) + next_to_emit += 1 diff --git a/setuptools/_vendor/more_itertools/more.pyi b/setuptools/_vendor/more_itertools/more.pyi index e946023259..b5e33f8b74 100644 --- a/setuptools/_vendor/more_itertools/more.pyi +++ b/setuptools/_vendor/more_itertools/more.pyi @@ -2,38 +2,168 @@ from __future__ import annotations -from types import TracebackType -from typing import ( - Any, - Callable, +import sys +import types + +from collections.abc import ( Container, - ContextManager, - Generic, Hashable, - Mapping, Iterable, Iterator, Mapping, - overload, Reversible, Sequence, Sized, - Type, +) +from contextlib import AbstractContextManager +from typing import ( + Any, + Callable, + Generic, TypeVar, + overload, type_check_only, ) from typing_extensions import Protocol +__all__ = [ + 'AbortThread', + 'SequenceView', + 'UnequalIterablesError', + 'adjacent', + 'all_unique', + 'always_iterable', + 'always_reversible', + 'argmax', + 'argmin', + 'bucket', + 'callback_iter', + 'chunked', + 'chunked_even', + 'circular_shifts', + 'collapse', + 'combination_index', + 'combination_with_replacement_index', + 'consecutive_groups', + 'constrained_batches', + 'consumer', + 'count_cycle', + 'countable', + 'derangements', + 'dft', + 'difference', + 'distinct_combinations', + 'distinct_permutations', + 'distribute', + 'divide', + 'doublestarmap', + 'duplicates_everseen', + 'duplicates_justseen', + 'classify_unique', + 'exactly_n', + 'extract', + 'filter_except', + 'filter_map', + 'first', + 'gray_product', + 'groupby_transform', + 'ichunked', + 'iequals', + 'idft', + 'ilen', + 'interleave', + 'interleave_evenly', + 'interleave_longest', + 'interleave_randomly', + 'intersperse', + 'is_sorted', + 'islice_extended', + 'iterate', + 'iter_suppress', + 'join_mappings', + 'last', + 'locate', + 'longest_common_prefix', + 'lstrip', + 'make_decorator', + 'map_except', + 'map_if', + 'map_reduce', + 'mark_ends', + 'minmax', + 'nth_or_last', + 'nth_permutation', + 'nth_prime', + 'nth_product', + 'nth_combination_with_replacement', + 'numeric_range', + 'one', + 'only', + 'outer_product', + 'padded', + 'partial_product', + 'partitions', + 'peekable', + 'permutation_index', + 'powerset_of_sets', + 'product_index', + 'raise_', + 'repeat_each', + 'repeat_last', + 'replace', + 'rlocate', + 'rstrip', + 'run_length', + 'sample', + 'seekable', + 'set_partitions', + 'side_effect', + 'sliced', + 'sort_together', + 'split_after', + 'split_at', + 'split_before', + 'split_into', + 'split_when', + 'spy', + 'stagger', + 'strip', + 'strictly_n', + 'substrings', + 'substrings_indexes', + 'takewhile_inclusive', + 'time_limited', + 'unique_in_window', + 'unique_to_each', + 'unzip', + 'value_chain', + 'windowed', + 'windowed_complete', + 'with_iter', + 'zip_broadcast', + 'zip_equal', + 'zip_offset', +] + # Type and type variable definitions _T = TypeVar('_T') _T1 = TypeVar('_T1') _T2 = TypeVar('_T2') +_T3 = TypeVar('_T3') +_T4 = TypeVar('_T4') +_T5 = TypeVar('_T5') _U = TypeVar('_U') _V = TypeVar('_V') _W = TypeVar('_W') _T_co = TypeVar('_T_co', covariant=True) _GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[Any]]) -_Raisable = BaseException | Type[BaseException] +_Raisable = BaseException | type[BaseException] + +# The type of isinstance's second argument (from typeshed builtins) +if sys.version_info >= (3, 10): + _ClassInfo = type | types.UnionType | tuple[_ClassInfo, ...] +else: + _ClassInfo = type | tuple[_ClassInfo, ...] @type_check_only class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ... @@ -80,7 +210,7 @@ def consumer(func: _GenFn) -> _GenFn: ... def ilen(iterable: Iterable[_T]) -> int: ... def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ... def with_iter( - context_manager: ContextManager[Iterable[_T]], + context_manager: AbstractContextManager[Iterable[_T]], ) -> Iterator[_T]: ... def one( iterable: Iterable[_T], @@ -97,6 +227,9 @@ def strictly_n( def distinct_permutations( iterable: Iterable[_T], r: int | None = ... ) -> Iterator[tuple[_T, ...]]: ... +def derangements( + iterable: Iterable[_T], r: int | None = None +) -> Iterator[tuple[_T, ...]]: ... def intersperse( e: _U, iterable: Iterable[_T], n: int = ... ) -> Iterator[_T | _U]: ... @@ -133,9 +266,10 @@ def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ... def interleave_evenly( iterables: list[Iterable[_T]], lengths: list[int] | None = ... ) -> Iterator[_T]: ... +def interleave_randomly(*iterables: Iterable[_T]) -> Iterable[_T]: ... def collapse( iterable: Iterable[Any], - base_type: type | None = ..., + base_type: _ClassInfo | None = ..., levels: int | None = ..., ) -> Iterator[Any]: ... @overload @@ -213,6 +347,7 @@ def stagger( class UnequalIterablesError(ValueError): def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ... +# zip_equal @overload def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ... @overload @@ -221,11 +356,35 @@ def zip_equal( ) -> Iterator[tuple[_T1, _T2]]: ... @overload def zip_equal( - __iter1: Iterable[_T], - __iter2: Iterable[_T], - __iter3: Iterable[_T], - *iterables: Iterable[_T], -) -> Iterator[tuple[_T, ...]]: ... + __iter1: Iterable[_T1], __iter2: Iterable[_T2], __iter3: Iterable[_T3] +) -> Iterator[tuple[_T1, _T2, _T3]]: ... +@overload +def zip_equal( + __iter1: Iterable[_T1], + __iter2: Iterable[_T2], + __iter3: Iterable[_T3], + __iter4: Iterable[_T4], +) -> Iterator[tuple[_T1, _T2, _T3, _T4]]: ... +@overload +def zip_equal( + __iter1: Iterable[_T1], + __iter2: Iterable[_T2], + __iter3: Iterable[_T3], + __iter4: Iterable[_T4], + __iter5: Iterable[_T5], +) -> Iterator[tuple[_T1, _T2, _T3, _T4, _T5]]: ... +@overload +def zip_equal( + __iter1: Iterable[Any], + __iter2: Iterable[Any], + __iter3: Iterable[Any], + __iter4: Iterable[Any], + __iter5: Iterable[Any], + __iter6: Iterable[Any], + *iterables: Iterable[Any], +) -> Iterator[tuple[Any, ...]]: ... + +# zip_offset @overload def zip_offset( __iter1: Iterable[_T1], @@ -285,12 +444,13 @@ def sort_together( key_list: Iterable[int] = ..., key: Callable[..., Any] | None = ..., reverse: bool = ..., + strict: bool = ..., ) -> list[tuple[_T, ...]]: ... def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ... def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ... def always_iterable( obj: object, - base_type: type | tuple[type | tuple[Any, ...], ...] | None = ..., + base_type: _ClassInfo | None = ..., ) -> Iterator[Any]: ... def adjacent( predicate: Callable[[_T], bool], @@ -317,42 +477,42 @@ def groupby_transform( keyfunc: None, valuefunc: Callable[[_T], _V], reducefunc: None, -) -> Iterable[tuple[_T, Iterable[_V]]]: ... +) -> Iterator[tuple[_T, Iterator[_V]]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: Callable[[_T], _V], reducefunc: None, -) -> Iterable[tuple[_U, Iterator[_V]]]: ... +) -> Iterator[tuple[_U, Iterator[_V]]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: None, valuefunc: None, reducefunc: Callable[[Iterator[_T]], _W], -) -> Iterable[tuple[_T, _W]]: ... +) -> Iterator[tuple[_T, _W]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: None, reducefunc: Callable[[Iterator[_T]], _W], -) -> Iterable[tuple[_U, _W]]: ... +) -> Iterator[tuple[_U, _W]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: None, valuefunc: Callable[[_T], _V], - reducefunc: Callable[[Iterable[_V]], _W], -) -> Iterable[tuple[_T, _W]]: ... + reducefunc: Callable[[Iterator[_V]], _W], +) -> Iterator[tuple[_T, _W]]: ... @overload def groupby_transform( iterable: Iterable[_T], keyfunc: Callable[[_T], _U], valuefunc: Callable[[_T], _V], - reducefunc: Callable[[Iterable[_V]], _W], -) -> Iterable[tuple[_U, _W]]: ... + reducefunc: Callable[[Iterator[_V]], _W], +) -> Iterator[tuple[_U, _W]]: ... class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]): @overload @@ -373,7 +533,7 @@ class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]): def __len__(self) -> int: ... def __reduce__( self, - ) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ... + ) -> tuple[type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ... def __repr__(self) -> str: ... def __reversed__(self) -> Iterator[_T]: ... def count(self, value: _T) -> int: ... @@ -408,7 +568,7 @@ class islice_extended(Generic[_T], Iterator[_T]): def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ... def consecutive_groups( - iterable: Iterable[_T], ordering: Callable[[_T], int] = ... + iterable: Iterable[_T], ordering: None | Callable[[_T], int] = ... ) -> Iterator[Iterator[_T]]: ... @overload def difference( @@ -454,7 +614,9 @@ class run_length: def exactly_n( iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ... ) -> bool: ... -def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ... +def circular_shifts( + iterable: Iterable[_T], steps: int = 1 +) -> list[tuple[_T, ...]]: ... def make_decorator( wrapping_func: Callable[..., _U], result_index: int = ... ) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ... @@ -500,7 +662,10 @@ def replace( ) -> Iterator[_T | _U]: ... def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ... def set_partitions( - iterable: Iterable[_T], k: int | None = ... + iterable: Iterable[_T], + k: int | None = ..., + min_size: int | None = ..., + max_size: int | None = ..., ) -> Iterator[list[list[_T]]]: ... class time_limited(Generic[_T], Iterator[_T]): @@ -525,12 +690,12 @@ def distinct_combinations( def filter_except( validator: Callable[[Any], object], iterable: Iterable[_T], - *exceptions: Type[BaseException], + *exceptions: type[BaseException], ) -> Iterator[_T]: ... def map_except( function: Callable[[Any], _U], iterable: Iterable[_T], - *exceptions: Type[BaseException], + *exceptions: type[BaseException], ) -> Iterator[_U]: ... def map_if( iterable: Iterable[Any], @@ -538,10 +703,22 @@ def map_if( func: Callable[[Any], Any], func_else: Callable[[Any], Any] | None = ..., ) -> Iterator[Any]: ... +def _sample_unweighted( + iterator: Iterator[_T], k: int, strict: bool +) -> list[_T]: ... +def _sample_counted( + population: Iterator[_T], k: int, counts: Iterable[int], strict: bool +) -> list[_T]: ... +def _sample_weighted( + iterator: Iterator[_T], k: int, weights: Iterator[float], strict: bool +) -> list[_T]: ... def sample( iterable: Iterable[_T], k: int, weights: Iterable[float] | None = ..., + *, + counts: Iterable[int] | None = ..., + strict: bool = False, ) -> list[_T]: ... def is_sorted( iterable: Iterable[_T], @@ -563,9 +740,9 @@ class callback_iter(Generic[_T], Iterator[_T]): def __enter__(self) -> callback_iter[_T]: ... def __exit__( self, - exc_type: Type[BaseException] | None, + exc_type: type[BaseException] | None, exc_value: BaseException | None, - traceback: TracebackType | None, + traceback: types.TracebackType | None, ) -> bool | None: ... def __iter__(self) -> callback_iter[_T]: ... def __next__(self) -> _T: ... @@ -577,7 +754,7 @@ class callback_iter(Generic[_T], Iterator[_T]): def windowed_complete( iterable: Iterable[_T], n: int -) -> Iterator[tuple[_T, ...]]: ... +) -> Iterator[tuple[tuple[_T, ...], tuple[_T, ...], tuple[_T, ...]]]: ... def all_unique( iterable: Iterable[_T], key: Callable[[_T], _U] | None = ... ) -> bool: ... @@ -608,9 +785,61 @@ class countable(Generic[_T], Iterator[_T]): items_seen: int def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ... +@overload +def zip_broadcast( + __obj1: _T | Iterable[_T], + *, + scalar_types: _ClassInfo | None = ..., + strict: bool = ..., +) -> Iterable[tuple[_T, ...]]: ... +@overload def zip_broadcast( + __obj1: _T | Iterable[_T], + __obj2: _T | Iterable[_T], + *, + scalar_types: _ClassInfo | None = ..., + strict: bool = ..., +) -> Iterable[tuple[_T, ...]]: ... +@overload +def zip_broadcast( + __obj1: _T | Iterable[_T], + __obj2: _T | Iterable[_T], + __obj3: _T | Iterable[_T], + *, + scalar_types: _ClassInfo | None = ..., + strict: bool = ..., +) -> Iterable[tuple[_T, ...]]: ... +@overload +def zip_broadcast( + __obj1: _T | Iterable[_T], + __obj2: _T | Iterable[_T], + __obj3: _T | Iterable[_T], + __obj4: _T | Iterable[_T], + *, + scalar_types: _ClassInfo | None = ..., + strict: bool = ..., +) -> Iterable[tuple[_T, ...]]: ... +@overload +def zip_broadcast( + __obj1: _T | Iterable[_T], + __obj2: _T | Iterable[_T], + __obj3: _T | Iterable[_T], + __obj4: _T | Iterable[_T], + __obj5: _T | Iterable[_T], + *, + scalar_types: _ClassInfo | None = ..., + strict: bool = ..., +) -> Iterable[tuple[_T, ...]]: ... +@overload +def zip_broadcast( + __obj1: _T | Iterable[_T], + __obj2: _T | Iterable[_T], + __obj3: _T | Iterable[_T], + __obj4: _T | Iterable[_T], + __obj5: _T | Iterable[_T], + __obj6: _T | Iterable[_T], *objects: _T | Iterable[_T], - scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ..., + scalar_types: _ClassInfo | None = ..., strict: bool = ..., ) -> Iterable[tuple[_T, ...]]: ... def unique_in_window( @@ -691,7 +920,7 @@ def outer_product( ) -> Iterator[tuple[_V, ...]]: ... def iter_suppress( iterable: Iterable[_T], - *exceptions: Type[BaseException], + *exceptions: type[BaseException], ) -> Iterator[_T]: ... def filter_map( func: Callable[[_T], _V | None], @@ -699,7 +928,7 @@ def filter_map( ) -> Iterator[_V]: ... def powerset_of_sets(iterable: Iterable[_T]) -> Iterator[set[_T]]: ... def join_mappings( - **field_to_map: Mapping[_T, _V] + **field_to_map: Mapping[_T, _V], ) -> dict[_T, dict[str, _V]]: ... def doublestarmap( func: Callable[..., _T], @@ -707,3 +936,14 @@ def doublestarmap( ) -> Iterator[_T]: ... def dft(xarr: Sequence[complex]) -> Iterator[complex]: ... def idft(Xarr: Sequence[complex]) -> Iterator[complex]: ... +def _nth_prime_ub(n: int) -> float: ... +def nth_prime(n: int, *, approximate: bool = ...) -> int: ... +def argmin( + iterable: Iterable[_T], *, key: Callable[[_T], _U] | None = ... +) -> int: ... +def argmax( + iterable: Iterable[_T], *, key: Callable[[_T], _U] | None = ... +) -> int: ... +def extract( + iterable: Iterable[_T], indices: Iterable[int] +) -> Iterator[_T]: ... diff --git a/setuptools/_vendor/more_itertools/recipes.py b/setuptools/_vendor/more_itertools/recipes.py index b32fa95533..dacf61407d 100644 --- a/setuptools/_vendor/more_itertools/recipes.py +++ b/setuptools/_vendor/more_itertools/recipes.py @@ -8,13 +8,15 @@ """ -import math -import operator +import random +from bisect import bisect_left, insort from collections import deque -from collections.abc import Sized -from functools import partial, reduce +from contextlib import suppress +from functools import lru_cache, partial, reduce +from heapq import heappush, heappushpop from itertools import ( + accumulate, chain, combinations, compress, @@ -25,9 +27,12 @@ product, repeat, starmap, + takewhile, tee, zip_longest, ) +from math import prod, comb, isqrt, gcd +from operator import mul, not_, itemgetter, getitem, index from random import randrange, sample, choice from sys import hexversion @@ -42,9 +47,12 @@ 'factor', 'flatten', 'grouper', + 'is_prime', 'iter_except', 'iter_index', + 'loops', 'matmul', + 'multinomial', 'ncycles', 'nth', 'nth_combination', @@ -65,6 +73,7 @@ 'random_product', 'repeatfunc', 'roundrobin', + 'running_median', 'sieve', 'sliding_window', 'subslices', @@ -86,17 +95,30 @@ # zip with strict is available for Python 3.10+ try: zip(strict=True) -except TypeError: +except TypeError: # pragma: no cover _zip_strict = zip -else: +else: # pragma: no cover _zip_strict = partial(zip, strict=True) + # math.sumprod is available for Python 3.12+ -_sumprod = getattr(math, 'sumprod', lambda x, y: dotproduct(x, y)) +try: + from math import sumprod as _sumprod +except ImportError: # pragma: no cover + _sumprod = lambda x, y: dotproduct(x, y) + + +# heapq max-heap functions are available for Python 3.14+ +try: + from heapq import heappush_max, heappushpop_max +except ImportError: # pragma: no cover + _max_heap_available = False +else: # pragma: no cover + _max_heap_available = True def take(n, iterable): - """Return first *n* items of the iterable as a list. + """Return first *n* items of the *iterable* as a list. >>> take(3, range(10)) [0, 1, 2] @@ -137,14 +159,12 @@ def tail(n, iterable): ['E', 'F', 'G'] """ - # If the given iterable has a length, then we can use islice to get its - # final elements. Note that if the iterable is not actually Iterable, - # either islice or deque will throw a TypeError. This is why we don't - # check if it is Iterable. - if isinstance(iterable, Sized): - yield from islice(iterable, max(0, len(iterable) - n), None) + try: + size = len(iterable) + except TypeError: + return iter(deque(iterable, maxlen=n)) else: - yield from iter(deque(iterable, maxlen=n)) + return islice(iterable, max(0, size - n), None) def consume(iterator, n=None): @@ -218,7 +238,12 @@ def all_equal(iterable, key=None): True """ - return len(list(islice(groupby(iterable, key), 2))) <= 1 + iterator = groupby(iterable, key) + for first in iterator: + for second in iterator: + return False + return True + return True def quantify(iterable, pred=bool): @@ -261,11 +286,14 @@ def ncycles(iterable, n): def dotproduct(vec1, vec2): """Returns the dot product of the two iterables. - >>> dotproduct([10, 10], [20, 20]) - 400 + >>> dotproduct([10, 15, 12], [0.65, 0.80, 1.25]) + 33.5 + >>> 10 * 0.65 + 15 * 0.80 + 12 * 1.25 + 33.5 + In Python 3.12 and later, use ``math.sumprod()`` instead. """ - return sum(map(operator.mul, vec1, vec2)) + return sum(map(mul, vec1, vec2)) def flatten(listOfLists): @@ -323,9 +351,9 @@ def _pairwise(iterable): try: from itertools import pairwise as itertools_pairwise -except ImportError: +except ImportError: # pragma: no cover pairwise = _pairwise -else: +else: # pragma: no cover def pairwise(iterable): return itertools_pairwise(iterable) @@ -390,26 +418,26 @@ def grouper(iterable, n, incomplete='fill', fillvalue=None): When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised. - >>> it = grouper('ABCDEFG', 3, incomplete='strict') - >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL + >>> iterator = grouper('ABCDEFG', 3, incomplete='strict') + >>> list(iterator) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... UnequalIterablesError """ - args = [iter(iterable)] * n + iterators = [iter(iterable)] * n if incomplete == 'fill': - return zip_longest(*args, fillvalue=fillvalue) + return zip_longest(*iterators, fillvalue=fillvalue) if incomplete == 'strict': - return _zip_equal(*args) + return _zip_equal(*iterators) if incomplete == 'ignore': - return zip(*args) + return zip(*iterators) else: raise ValueError('Expected fill, strict, or ignore') def roundrobin(*iterables): - """Yields an item from each iterable, alternating between them. + """Visit input iterables in a cycle until each is exhausted. >>> list(roundrobin('ABC', 'D', 'EF')) ['A', 'D', 'E', 'B', 'F', 'C'] @@ -451,7 +479,7 @@ def partition(pred, iterable): t1, t2, p = tee(iterable, 3) p1, p2 = tee(map(pred, p)) - return (compress(t1, map(operator.not_, p1)), compress(t2, p2)) + return (compress(t1, map(not_, p1)), compress(t2, p2)) def powerset(iterable): @@ -530,9 +558,9 @@ def unique_justseen(iterable, key=None): """ if key is None: - return map(operator.itemgetter(0), groupby(iterable)) + return map(itemgetter(0), groupby(iterable)) - return map(next, map(operator.itemgetter(1), groupby(iterable, key))) + return map(next, map(itemgetter(1), groupby(iterable, key))) def unique(iterable, key=None, reverse=False): @@ -551,7 +579,8 @@ def unique(iterable, key=None, reverse=False): The elements in *iterable* need not be hashable, but they must be comparable for sorting to work. """ - return unique_justseen(sorted(iterable, key=key, reverse=reverse), key=key) + sequenced = sorted(iterable, key=key, reverse=reverse) + return unique_justseen(sequenced, key=key) def iter_except(func, exception, first=None): @@ -576,13 +605,11 @@ def iter_except(func, exception, first=None): [] """ - try: + with suppress(exception): if first is not None: yield first() - while 1: + while True: yield func() - except exception: - pass def first_true(iterable, default=None, pred=None): @@ -618,7 +645,7 @@ def random_product(*args, repeat=1): ('a', 2, 'd', 3) This equivalent to taking a random selection from - ``itertools.product(*args, **kwarg)``. + ``itertools.product(*args, repeat=repeat)``. """ pools = [tuple(pool) for pool in args] * repeat @@ -734,19 +761,40 @@ def prepend(value, iterator): def convolve(signal, kernel): - """Convolve the iterable *signal* with the iterable *kernel*. + """Discrete linear convolution of two iterables. + Equivalent to polynomial multiplication. + + For example, multiplying ``(x² -x - 20)`` by ``(x - 3)`` + gives ``(x³ -4x² -17x + 60)``. + + >>> list(convolve([1, -1, -20], [1, -3])) + [1, -4, -17, 60] - >>> signal = (1, 2, 3, 4, 5) - >>> kernel = [3, 2, 1] - >>> list(convolve(signal, kernel)) - [3, 8, 14, 20, 26, 14, 5] + Examples of popular kinds of kernels: - Note: the input arguments are not interchangeable, as the *kernel* - is immediately consumed and stored. + * The kernel ``[0.25, 0.25, 0.25, 0.25]`` computes a moving average. + For image data, this blurs the image and reduces noise. + * The kernel ``[1/2, 0, -1/2]`` estimates the first derivative of + a function evaluated at evenly spaced inputs. + * The kernel ``[1, -2, 1]`` estimates the second derivative of a + function evaluated at evenly spaced inputs. + + Convolutions are mathematically commutative; however, the inputs are + evaluated differently. The signal is consumed lazily and can be + infinite. The kernel is fully consumed before the calculations begin. + + Supports all numeric types: int, float, complex, Decimal, Fraction. + + References: + + * Article: https://betterexplained.com/articles/intuitive-convolution/ + * Video by 3Blue1Brown: https://www.youtube.com/watch?v=KuXjwB4LzSA """ - # This implementation intentionally doesn't match the one in the itertools - # documentation. + # This implementation comes from an older version of the itertools + # documentation. While the newer implementation is a bit clearer, + # this one was kept because the inlined window logic is faster + # and it avoids an unnecessary deque-to-tuple conversion. kernel = tuple(kernel)[::-1] n = len(kernel) window = deque([0], maxlen=n) * n @@ -769,23 +817,9 @@ def before_and_after(predicate, it): Note that the first iterator must be fully consumed before the second iterator can generate valid results. """ - it = iter(it) - transition = [] - - def true_iterator(): - for elem in it: - if predicate(elem): - yield elem - else: - transition.append(elem) - return - - # Note: this is different from itertools recipes to allow nesting - # before_and_after remainders into before_and_after again. See tests - # for an example. - remainder_iterator = chain(transition, it) - - return true_iterator(), remainder_iterator + trues, after = tee(it) + trues = compress(takewhile(predicate, trues), zip(after)) + return trues, after def triplewise(iterable): @@ -795,8 +829,30 @@ def triplewise(iterable): [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] """ - for (a, _), (b, c) in pairwise(pairwise(iterable)): - yield a, b, c + # This deviates from the itertools documentation recipe - see + # https://github.com/more-itertools/more-itertools/issues/889 + t1, t2, t3 = tee(iterable, 3) + next(t3, None) + next(t3, None) + next(t2, None) + return zip(t1, t2, t3) + + +def _sliding_window_islice(iterable, n): + # Fast path for small, non-zero values of n. + iterators = tee(iterable, n) + for i, iterator in enumerate(iterators): + next(islice(iterator, i, i), None) + return zip(*iterators) + + +def _sliding_window_deque(iterable, n): + # Normal path for other values of n. + iterator = iter(iterable) + window = deque(islice(iterator, n - 1), maxlen=n) + for x in iterator: + window.append(x) + yield tuple(window) def sliding_window(iterable, n): @@ -812,11 +868,16 @@ def sliding_window(iterable, n): For a variant with more features, see :func:`windowed`. """ - it = iter(iterable) - window = deque(islice(it, n - 1), maxlen=n) - for x in it: - window.append(x) - yield tuple(window) + if n > 20: + return _sliding_window_deque(iterable, n) + elif n > 2: + return _sliding_window_islice(iterable, n) + elif n == 2: + return pairwise(iterable) + elif n == 1: + return zip(iterable) + else: + raise ValueError(f'n should be at least one, not {n}') def subslices(iterable): @@ -830,18 +891,29 @@ def subslices(iterable): """ seq = list(iterable) slices = starmap(slice, combinations(range(len(seq) + 1), 2)) - return map(operator.getitem, repeat(seq), slices) + return map(getitem, repeat(seq), slices) def polynomial_from_roots(roots): """Compute a polynomial's coefficients from its roots. - >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3) - >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60 + >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3) + >>> polynomial_from_roots(roots) # x³ - 4 x² - 17 x + 60 [1, -4, -17, 60] + + Note that polynomial coefficients are specified in descending power order. + + Supports all numeric types: int, float, complex, Decimal, Fraction. """ - factors = zip(repeat(1), map(operator.neg, roots)) - return list(reduce(convolve, factors, [1])) + + # This recipe differs from the one in itertools docs in that it + # applies list() after each call to convolve(). This avoids + # hitting stack limits with nested generators. + + poly = [1] + for root in roots: + poly = list(convolve(poly, (1, -root))) + return poly def iter_index(iterable, value, start=0, stop=None): @@ -872,19 +944,17 @@ def iter_index(iterable, value, start=0, stop=None): seq_index = getattr(iterable, 'index', None) if seq_index is None: # Slow path for general iterables - it = islice(iterable, start, stop) - for i, element in enumerate(it, start): + iterator = islice(iterable, start, stop) + for i, element in enumerate(iterator, start): if element is value or element == value: yield i else: # Fast path for sequences stop = len(iterable) if stop is None else stop i = start - 1 - try: + with suppress(ValueError): while True: yield (i := seq_index(value, i + 1, stop)) - except ValueError: - pass def sieve(n): @@ -892,20 +962,23 @@ def sieve(n): >>> list(sieve(30)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + """ + # This implementation comes from an older version of the itertools + # documentation. The newer implementation is easier to read but is + # less lazy. if n > 2: yield 2 start = 3 data = bytearray((0, 1)) * (n // 2) - limit = math.isqrt(n) + 1 - for p in iter_index(data, 1, start, limit): + for p in iter_index(data, 1, start, stop=isqrt(n) + 1): yield from iter_index(data, 1, start, p * p) data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p))) start = p * p yield from iter_index(data, 1, start) -def _batched(iterable, n, *, strict=False): +def _batched(iterable, n, *, strict=False): # pragma: no cover """Batch data into tuples of length *n*. If the number of items in *iterable* is not divisible by *n*: * The last batch will be shorter if *strict* is ``False``. @@ -918,23 +991,22 @@ def _batched(iterable, n, *, strict=False): """ if n < 1: raise ValueError('n must be at least one') - it = iter(iterable) - while batch := tuple(islice(it, n)): + iterator = iter(iterable) + while batch := tuple(islice(iterator, n)): if strict and len(batch) != n: raise ValueError('batched(): incomplete batch') yield batch -if hexversion >= 0x30D00A2: +if hexversion >= 0x30D00A2: # pragma: no cover from itertools import batched as itertools_batched def batched(iterable, n, *, strict=False): return itertools_batched(iterable, n, strict=strict) -else: - batched = _batched - batched.__doc__ = _batched.__doc__ +else: # pragma: no cover + batched = _batched def transpose(it): @@ -949,15 +1021,68 @@ def transpose(it): return _zip_strict(*it) -def reshape(matrix, cols): - """Reshape the 2-D input *matrix* to have a column count given by *cols*. +def _is_scalar(value, stringlike=(str, bytes)): + "Scalars are bytes, strings, and non-iterables." + try: + iter(value) + except TypeError: + return True + return isinstance(value, stringlike) + + +def _flatten_tensor(tensor): + "Depth-first iterator over scalars in a tensor." + iterator = iter(tensor) + while True: + try: + value = next(iterator) + except StopIteration: + return iterator + iterator = chain((value,), iterator) + if _is_scalar(value): + return iterator + iterator = chain.from_iterable(iterator) + + +def reshape(matrix, shape): + """Change the shape of a *matrix*. + + If *shape* is an integer, the matrix must be two dimensional + and the shape is interpreted as the desired number of columns: + + >>> matrix = [(0, 1), (2, 3), (4, 5)] + >>> cols = 3 + >>> list(reshape(matrix, cols)) + [(0, 1, 2), (3, 4, 5)] + + If *shape* is a tuple (or other iterable), the input matrix can have + any number of dimensions. It will first be flattened and then rebuilt + to the desired shape which can also be multidimensional: + + >>> matrix = [(0, 1), (2, 3), (4, 5)] # Start with a 3 x 2 matrix + + >>> list(reshape(matrix, (2, 3))) # Make a 2 x 3 matrix + [(0, 1, 2), (3, 4, 5)] + + >>> list(reshape(matrix, (6,))) # Make a vector of length six + [0, 1, 2, 3, 4, 5] + + >>> list(reshape(matrix, (2, 1, 3, 1))) # Make 2 x 1 x 3 x 1 tensor + [(((0,), (1,), (2,)),), (((3,), (4,), (5,)),)] + + Each dimension is assumed to be uniform, either all arrays or all scalars. + Flattening stops when the first value in a dimension is a scalar. + Scalars are bytes, strings, and non-iterables. + The reshape iterator stops when the requested shape is complete + or when the input is exhausted, whichever comes first. - >>> matrix = [(0, 1), (2, 3), (4, 5)] - >>> cols = 3 - >>> list(reshape(matrix, cols)) - [(0, 1, 2), (3, 4, 5)] """ - return batched(chain.from_iterable(matrix), cols) + if isinstance(shape, int): + return batched(chain.from_iterable(matrix), shape) + first_dim, *dims = shape + scalar_stream = _flatten_tensor(matrix) + reshaped = reduce(batched, reversed(dims), scalar_stream) + return islice(reshaped, first_dim) def matmul(m1, m2): @@ -968,40 +1093,84 @@ def matmul(m1, m2): The caller should ensure that the dimensions of the input matrices are compatible with each other. + + Supports all numeric types: int, float, complex, Decimal, Fraction. """ n = len(m2[0]) return batched(starmap(_sumprod, product(m1, transpose(m2))), n) +def _factor_pollard(n): + # Return a factor of n using Pollard's rho algorithm. + # Efficient when n is odd and composite. + for b in range(1, n): + x = y = 2 + d = 1 + while d == 1: + x = (x * x + b) % n + y = (y * y + b) % n + y = (y * y + b) % n + d = gcd(x - y, n) + if d != n: + return d + raise ValueError('prime or under 5') # pragma: no cover + + +_primes_below_211 = tuple(sieve(211)) + + def factor(n): """Yield the prime factors of n. >>> list(factor(360)) [2, 2, 2, 3, 3, 5] + + Finds small factors with trial division. Larger factors are + either verified as prime with ``is_prime`` or split into + smaller factors with Pollard's rho algorithm. """ - for prime in sieve(math.isqrt(n) + 1): + + # Corner case reduction + if n < 2: + return + + # Trial division reduction + for prime in _primes_below_211: while not n % prime: yield prime n //= prime - if n == 1: - return - if n > 1: - yield n + + # Pollard's rho reduction + primes = [] + todo = [n] if n > 1 else [] + for n in todo: + if n < 211**2 or is_prime(n): + primes.append(n) + else: + fact = _factor_pollard(n) + todo += (fact, n // fact) + yield from sorted(primes) def polynomial_eval(coefficients, x): """Evaluate a polynomial at a specific value. - Example: evaluating x^3 - 4 * x^2 - 17 * x + 60 at x = 2.5: + Computes with better numeric stability than Horner's method. + + Evaluate ``x^3 - 4 * x^2 - 17 * x + 60`` at ``x = 2.5``: >>> coefficients = [1, -4, -17, 60] >>> x = 2.5 >>> polynomial_eval(coefficients, x) 8.125 + + Note that polynomial coefficients are specified in descending power order. + + Supports all numeric types: int, float, complex, Decimal, Fraction. """ n = len(coefficients) if n == 0: - return x * 0 # coerce zero to the type of x + return type(x)(0) powers = map(pow, repeat(x), reversed(range(n))) return _sumprod(coefficients, powers) @@ -1011,6 +1180,8 @@ def sum_of_squares(it): >>> sum_of_squares([10, 20, 30]) 1400 + + Supports all numeric types: int, float, complex, Decimal, Fraction. """ return _sumprod(*tee(it)) @@ -1018,29 +1189,283 @@ def sum_of_squares(it): def polynomial_derivative(coefficients): """Compute the first derivative of a polynomial. - Example: evaluating the derivative of x^3 - 4 * x^2 - 17 * x + 60 + Evaluate the derivative of ``x³ - 4 x² - 17 x + 60``: >>> coefficients = [1, -4, -17, 60] >>> derivative_coefficients = polynomial_derivative(coefficients) >>> derivative_coefficients [3, -8, -17] + + Note that polynomial coefficients are specified in descending power order. + + Supports all numeric types: int, float, complex, Decimal, Fraction. """ n = len(coefficients) powers = reversed(range(1, n)) - return list(map(operator.mul, coefficients, powers)) + return list(map(mul, coefficients, powers)) def totient(n): """Return the count of natural numbers up to *n* that are coprime with *n*. - >>> totient(9) + Euler's totient function φ(n) gives the number of totatives. + Totative are integers k in the range 1 ≤ k ≤ n such that gcd(n, k) = 1. + + >>> n = 9 + >>> totient(n) 6 - >>> totient(12) - 4 - """ - # The itertools docs use unique_justseen instead of set; see - # https://github.com/more-itertools/more-itertools/issues/823 - for p in set(factor(n)): - n = n // p * (p - 1) + >>> totatives = [x for x in range(1, n) if gcd(n, x) == 1] + >>> totatives + [1, 2, 4, 5, 7, 8] + >>> len(totatives) + 6 + + Reference: https://en.wikipedia.org/wiki/Euler%27s_totient_function + + """ + for prime in set(factor(n)): + n -= n // prime return n + + +# Miller–Rabin primality test: https://oeis.org/A014233 +_perfect_tests = [ + (2047, (2,)), + (9080191, (31, 73)), + (4759123141, (2, 7, 61)), + (1122004669633, (2, 13, 23, 1662803)), + (2152302898747, (2, 3, 5, 7, 11)), + (3474749660383, (2, 3, 5, 7, 11, 13)), + (18446744073709551616, (2, 325, 9375, 28178, 450775, 9780504, 1795265022)), + ( + 3317044064679887385961981, + (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41), + ), +] + + +@lru_cache +def _shift_to_odd(n): + 'Return s, d such that 2**s * d == n' + s = ((n - 1) ^ n).bit_length() - 1 + d = n >> s + assert (1 << s) * d == n and d & 1 and s >= 0 + return s, d + + +def _strong_probable_prime(n, base): + assert (n > 2) and (n & 1) and (2 <= base < n) + + s, d = _shift_to_odd(n - 1) + + x = pow(base, d, n) + if x == 1 or x == n - 1: + return True + + for _ in range(s - 1): + x = x * x % n + if x == n - 1: + return True + + return False + + +# Separate instance of Random() that doesn't share state +# with the default user instance of Random(). +_private_randrange = random.Random().randrange + + +def is_prime(n): + """Return ``True`` if *n* is prime and ``False`` otherwise. + + Basic examples: + + >>> is_prime(37) + True + >>> is_prime(3 * 13) + False + >>> is_prime(18_446_744_073_709_551_557) + True + + Find the next prime over one billion: + + >>> next(filter(is_prime, count(10**9))) + 1000000007 + + Generate random primes up to 200 bits and up to 60 decimal digits: + + >>> from random import seed, randrange, getrandbits + >>> seed(18675309) + + >>> next(filter(is_prime, map(getrandbits, repeat(200)))) + 893303929355758292373272075469392561129886005037663238028407 + + >>> next(filter(is_prime, map(randrange, repeat(10**60)))) + 269638077304026462407872868003560484232362454342414618963649 + + This function is exact for values of *n* below 10**24. For larger inputs, + the probabilistic Miller-Rabin primality test has a less than 1 in 2**128 + chance of a false positive. + """ + + if n < 17: + return n in {2, 3, 5, 7, 11, 13} + + if not (n & 1 and n % 3 and n % 5 and n % 7 and n % 11 and n % 13): + return False + + for limit, bases in _perfect_tests: + if n < limit: + break + else: + bases = (_private_randrange(2, n - 1) for i in range(64)) + + return all(_strong_probable_prime(n, base) for base in bases) + + +def loops(n): + """Returns an iterable with *n* elements for efficient looping. + Like ``range(n)`` but doesn't create integers. + + >>> i = 0 + >>> for _ in loops(5): + ... i += 1 + >>> i + 5 + + """ + return repeat(None, n) + + +def multinomial(*counts): + """Number of distinct arrangements of a multiset. + + The expression ``multinomial(3, 4, 2)`` has several equivalent + interpretations: + + * In the expansion of ``(a + b + c)⁹``, the coefficient of the + ``a³b⁴c²`` term is 1260. + + * There are 1260 distinct ways to arrange 9 balls consisting of 3 reds, 4 + greens, and 2 blues. + + * There are 1260 unique ways to place 9 distinct objects into three bins + with sizes 3, 4, and 2. + + The :func:`multinomial` function computes the length of + :func:`distinct_permutations`. For example, there are 83,160 distinct + anagrams of the word "abracadabra": + + >>> from more_itertools import distinct_permutations, ilen + >>> ilen(distinct_permutations('abracadabra')) + 83160 + + This can be computed directly from the letter counts, 5a 2b 2r 1c 1d: + + >>> from collections import Counter + >>> list(Counter('abracadabra').values()) + [5, 2, 2, 1, 1] + >>> multinomial(5, 2, 2, 1, 1) + 83160 + + A binomial coefficient is a special case of multinomial where there are + only two categories. For example, the number of ways to arrange 12 balls + with 5 reds and 7 blues is ``multinomial(5, 7)`` or ``math.comb(12, 5)``. + + Likewise, factorial is a special case of multinomial where + the multiplicities are all just 1 so that + ``multinomial(1, 1, 1, 1, 1, 1, 1) == math.factorial(7)``. + + Reference: https://en.wikipedia.org/wiki/Multinomial_theorem + + """ + return prod(map(comb, accumulate(counts), counts)) + + +def _running_median_minheap_and_maxheap(iterator): # pragma: no cover + "Non-windowed running_median() for Python 3.14+" + + read = iterator.__next__ + lo = [] # max-heap + hi = [] # min-heap (same size as or one smaller than lo) + + with suppress(StopIteration): + while True: + heappush_max(lo, heappushpop(hi, read())) + yield lo[0] + + heappush(hi, heappushpop_max(lo, read())) + yield (lo[0] + hi[0]) / 2 + + +def _running_median_minheap_only(iterator): # pragma: no cover + "Backport of non-windowed running_median() for Python 3.13 and prior." + + read = iterator.__next__ + lo = [] # max-heap (actually a minheap with negated values) + hi = [] # min-heap (same size as or one smaller than lo) + + with suppress(StopIteration): + while True: + heappush(lo, -heappushpop(hi, read())) + yield -lo[0] + + heappush(hi, -heappushpop(lo, -read())) + yield (hi[0] - lo[0]) / 2 + + +def _running_median_windowed(iterator, maxlen): + "Yield median of values in a sliding window." + + window = deque() + ordered = [] + + for x in iterator: + window.append(x) + insort(ordered, x) + + if len(ordered) > maxlen: + i = bisect_left(ordered, window.popleft()) + del ordered[i] + + n = len(ordered) + m = n // 2 + yield ordered[m] if n & 1 else (ordered[m - 1] + ordered[m]) / 2 + + +def running_median(iterable, *, maxlen=None): + """Cumulative median of values seen so far or values in a sliding window. + + Set *maxlen* to a positive integer to specify the maximum size + of the sliding window. The default of *None* is equivalent to + an unbounded window. + + For example: + + >>> list(running_median([5.0, 9.0, 4.0, 12.0, 8.0, 9.0])) + [5.0, 7.0, 5.0, 7.0, 8.0, 8.5] + >>> list(running_median([5.0, 9.0, 4.0, 12.0, 8.0, 9.0], maxlen=3)) + [5.0, 7.0, 5.0, 9.0, 8.0, 9.0] + + Supports numeric types such as int, float, Decimal, and Fraction, + but not complex numbers which are unorderable. + + On version Python 3.13 and prior, max-heaps are simulated with + negative values. The negation causes Decimal inputs to apply context + rounding, making the results slightly different than that obtained + by statistics.median(). + """ + + iterator = iter(iterable) + + if maxlen is not None: + maxlen = index(maxlen) + if maxlen <= 0: + raise ValueError('Window size should be positive') + return _running_median_windowed(iterator, maxlen) + + if not _max_heap_available: + return _running_median_minheap_only(iterator) # pragma: no cover + + return _running_median_minheap_and_maxheap(iterator) # pragma: no cover diff --git a/setuptools/_vendor/more_itertools/recipes.pyi b/setuptools/_vendor/more_itertools/recipes.pyi index 739acec05f..de3d0a1777 100644 --- a/setuptools/_vendor/more_itertools/recipes.pyi +++ b/setuptools/_vendor/more_itertools/recipes.pyi @@ -2,22 +2,75 @@ from __future__ import annotations +from collections.abc import Iterable, Iterator, Sequence +from decimal import Decimal +from fractions import Fraction from typing import ( Any, Callable, - Iterable, - Iterator, - overload, - Sequence, - Type, TypeVar, + overload, ) +__all__ = [ + 'all_equal', + 'batched', + 'before_and_after', + 'consume', + 'convolve', + 'dotproduct', + 'first_true', + 'factor', + 'flatten', + 'grouper', + 'is_prime', + 'iter_except', + 'iter_index', + 'loops', + 'matmul', + 'multinomial', + 'ncycles', + 'nth', + 'nth_combination', + 'padnone', + 'pad_none', + 'pairwise', + 'partition', + 'polynomial_eval', + 'polynomial_from_roots', + 'polynomial_derivative', + 'powerset', + 'prepend', + 'quantify', + 'reshape', + 'random_combination_with_replacement', + 'random_combination', + 'random_permutation', + 'random_product', + 'repeatfunc', + 'roundrobin', + 'running_median', + 'sieve', + 'sliding_window', + 'subslices', + 'sum_of_squares', + 'tabulate', + 'tail', + 'take', + 'totient', + 'transpose', + 'triplewise', + 'unique', + 'unique_everseen', + 'unique_justseen', +] + # Type and type variable definitions _T = TypeVar('_T') _T1 = TypeVar('_T1') _T2 = TypeVar('_T2') _U = TypeVar('_U') +_NumberT = TypeVar("_NumberT", float, Decimal, Fraction) def take(n: int, iterable: Iterable[_T]) -> list[_T]: ... def tabulate( @@ -69,13 +122,13 @@ def unique( @overload def iter_except( func: Callable[[], _T], - exception: Type[BaseException] | tuple[Type[BaseException], ...], + exception: type[BaseException] | tuple[type[BaseException], ...], first: None = ..., ) -> Iterator[_T]: ... @overload def iter_except( func: Callable[[], _T], - exception: Type[BaseException] | tuple[Type[BaseException], ...], + exception: type[BaseException] | tuple[type[BaseException], ...], first: Callable[[], _U], ) -> Iterator[_T | _U]: ... @overload @@ -119,18 +172,34 @@ def iter_index( stop: int | None = ..., ) -> Iterator[int]: ... def sieve(n: int) -> Iterator[int]: ... -def batched( +def _batched( iterable: Iterable[_T], n: int, *, strict: bool = False -) -> Iterator[tuple[_T]]: ... +) -> Iterator[tuple[_T, ...]]: ... + +batched = _batched + def transpose( it: Iterable[Iterable[_T]], ) -> Iterator[tuple[_T, ...]]: ... +@overload def reshape( - matrix: Iterable[Iterable[_T]], cols: int + matrix: Iterable[Iterable[_T]], shape: int ) -> Iterator[tuple[_T, ...]]: ... +@overload +def reshape(matrix: Iterable[Any], shape: Iterable[int]) -> Iterator[Any]: ... def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[tuple[_T]]: ... +def _factor_trial(n: int) -> Iterator[int]: ... +def _factor_pollard(n: int) -> int: ... def factor(n: int) -> Iterator[int]: ... def polynomial_eval(coefficients: Sequence[_T], x: _U) -> _U: ... def sum_of_squares(it: Iterable[_T]) -> _T: ... def polynomial_derivative(coefficients: Sequence[_T]) -> list[_T]: ... def totient(n: int) -> int: ... +def _shift_to_odd(n: int) -> tuple[int, int]: ... +def _strong_probable_prime(n: int, base: int) -> bool: ... +def is_prime(n: int) -> bool: ... +def loops(n: int) -> Iterator[None]: ... +def multinomial(*counts: int) -> int: ... +def running_median( + iterable: Iterable[_NumberT], *, maxlen: int | None = ... +) -> Iterator[_NumberT]: ... diff --git a/setuptools/_vendor/packaging-24.2.dist-info/RECORD b/setuptools/_vendor/packaging-24.2.dist-info/RECORD deleted file mode 100644 index 678aa5a501..0000000000 --- a/setuptools/_vendor/packaging-24.2.dist-info/RECORD +++ /dev/null @@ -1,25 +0,0 @@ -packaging-24.2.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 -packaging-24.2.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 -packaging-24.2.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 -packaging-24.2.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 -packaging-24.2.dist-info/METADATA,sha256=ohH86s6k5mIfQxY2TS0LcSfADeOFa4BiCC-bxZV-pNs,3204 -packaging-24.2.dist-info/RECORD,, -packaging-24.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -packaging-24.2.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI,82 -packaging/__init__.py,sha256=dk4Ta_vmdVJxYHDcfyhvQNw8V3PgSBomKNXqg-D2JDY,494 -packaging/_elffile.py,sha256=cflAQAkE25tzhYmq_aCi72QfbT_tn891tPzfpbeHOwE,3306 -packaging/_manylinux.py,sha256=vl5OCoz4kx80H5rwXKeXWjl9WNISGmr4ZgTpTP9lU9c,9612 -packaging/_musllinux.py,sha256=p9ZqNYiOItGee8KcZFeHF_YcdhVwGHdK6r-8lgixvGQ,2694 -packaging/_parser.py,sha256=s_TvTvDNK0NrM2QB3VKThdWFM4Nc0P6JnkObkl3MjpM,10236 -packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 -packaging/_tokenizer.py,sha256=J6v5H7Jzvb-g81xp_2QACKwO7LxHQA6ikryMU7zXwN8,5273 -packaging/licenses/__init__.py,sha256=1x5M1nEYjcgwEbLt0dXwz2ukjr18DiCzC0sraQqJ-Ww,5715 -packaging/licenses/_spdx.py,sha256=oAm1ztPFwlsmCKe7lAAsv_OIOfS1cWDu9bNBkeu-2ns,48398 -packaging/markers.py,sha256=c89TNzB7ZdGYhkovm6PYmqGyHxXlYVaLW591PHUNKD8,10561 -packaging/metadata.py,sha256=YJibM7GYe4re8-0a3OlXmGS-XDgTEoO4tlBt2q25Bng,34762 -packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -packaging/requirements.py,sha256=gYyRSAdbrIyKDY66ugIDUQjRMvxkH2ALioTmX3tnL6o,2947 -packaging/specifiers.py,sha256=GG1wPNMcL0fMJO68vF53wKMdwnfehDcaI-r9NpTfilA,40074 -packaging/tags.py,sha256=CFqrJzAzc2XNGexerH__T-Y5Iwq7WbsYXsiLERLWxY0,21014 -packaging/utils.py,sha256=0F3Hh9OFuRgrhTgGZUl5K22Fv1YP2tZl1z_2gO6kJiA,5050 -packaging/version.py,sha256=olfyuk_DPbflNkJ4wBWetXQ17c74x3DB501degUv7DY,16676 diff --git a/setuptools/_vendor/packaging-25.0.dist-info/INSTALLER b/setuptools/_vendor/packaging-25.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/packaging-25.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/packaging-24.2.dist-info/METADATA b/setuptools/_vendor/packaging-25.0.dist-info/METADATA similarity index 96% rename from setuptools/_vendor/packaging-24.2.dist-info/METADATA rename to setuptools/_vendor/packaging-25.0.dist-info/METADATA index 1479c8694b..10b290a6cd 100644 --- a/setuptools/_vendor/packaging-24.2.dist-info/METADATA +++ b/setuptools/_vendor/packaging-25.0.dist-info/METADATA @@ -1,6 +1,6 @@ -Metadata-Version: 2.3 +Metadata-Version: 2.4 Name: packaging -Version: 24.2 +Version: 25.0 Summary: Core utilities for Python packages Author-email: Donald Stufft Requires-Python: >=3.8 @@ -21,6 +21,9 @@ Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Typing :: Typed +License-File: LICENSE +License-File: LICENSE.APACHE +License-File: LICENSE.BSD Project-URL: Documentation, https://packaging.pypa.io/ Project-URL: Source, https://github.com/pypa/packaging diff --git a/setuptools/_vendor/packaging-25.0.dist-info/RECORD b/setuptools/_vendor/packaging-25.0.dist-info/RECORD new file mode 100644 index 0000000000..08a804a1af --- /dev/null +++ b/setuptools/_vendor/packaging-25.0.dist-info/RECORD @@ -0,0 +1,25 @@ +packaging-25.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +packaging-25.0.dist-info/METADATA,sha256=W2EaYJw4_vw9YWv0XSCuyY-31T8kXayp4sMPyFx6woI,3281 +packaging-25.0.dist-info/RECORD,, +packaging-25.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +packaging-25.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +packaging-25.0.dist-info/licenses/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 +packaging-25.0.dist-info/licenses/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 +packaging-25.0.dist-info/licenses/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 +packaging/__init__.py,sha256=_0cDiPVf2S-bNfVmZguxxzmrIYWlyASxpqph4qsJWUc,494 +packaging/_elffile.py,sha256=UkrbDtW7aeq3qqoAfU16ojyHZ1xsTvGke_WqMTKAKd0,3286 +packaging/_manylinux.py,sha256=t4y_-dTOcfr36gLY-ztiOpxxJFGO2ikC11HgfysGxiM,9596 +packaging/_musllinux.py,sha256=p9ZqNYiOItGee8KcZFeHF_YcdhVwGHdK6r-8lgixvGQ,2694 +packaging/_parser.py,sha256=gYfnj0pRHflVc4RHZit13KNTyN9iiVcU2RUCGi22BwM,10221 +packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431 +packaging/_tokenizer.py,sha256=OYzt7qKxylOAJ-q0XyK1qAycyPRYLfMPdGQKRXkZWyI,5310 +packaging/licenses/__init__.py,sha256=VsK4o27CJXWfTi8r2ybJmsBoCdhpnBWuNrskaCVKP7U,5715 +packaging/licenses/_spdx.py,sha256=oAm1ztPFwlsmCKe7lAAsv_OIOfS1cWDu9bNBkeu-2ns,48398 +packaging/markers.py,sha256=P0we27jm1xUzgGMJxBjtUFCIWeBxTsMeJTOJ6chZmAY,12049 +packaging/metadata.py,sha256=8IZErqQQnNm53dZZuYq4FGU4_dpyinMeH1QFBIWIkfE,34739 +packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +packaging/requirements.py,sha256=gYyRSAdbrIyKDY66ugIDUQjRMvxkH2ALioTmX3tnL6o,2947 +packaging/specifiers.py,sha256=gtPu5DTc-F9baLq3FTGEK6dPhHGCuwwZetaY0PSV2gs,40055 +packaging/tags.py,sha256=41s97W9Zatrq2Ed7Rc3qeBDaHe8pKKvYq2mGjwahfXk,22745 +packaging/utils.py,sha256=0F3Hh9OFuRgrhTgGZUl5K22Fv1YP2tZl1z_2gO6kJiA,5050 +packaging/version.py,sha256=olfyuk_DPbflNkJ4wBWetXQ17c74x3DB501degUv7DY,16676 diff --git a/setuptools/_vendor/zipp-3.19.2.dist-info/REQUESTED b/setuptools/_vendor/packaging-25.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/zipp-3.19.2.dist-info/REQUESTED rename to setuptools/_vendor/packaging-25.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/WHEEL b/setuptools/_vendor/packaging-25.0.dist-info/WHEEL similarity index 71% rename from setuptools/_vendor/typing_extensions-4.12.2.dist-info/WHEEL rename to setuptools/_vendor/packaging-25.0.dist-info/WHEEL index 3b5e64b5e6..d8b9936dad 100644 --- a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/WHEEL +++ b/setuptools/_vendor/packaging-25.0.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: flit 3.9.0 +Generator: flit 3.12.0 Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/packaging-24.2.dist-info/LICENSE b/setuptools/_vendor/packaging-25.0.dist-info/licenses/LICENSE similarity index 100% rename from setuptools/_vendor/packaging-24.2.dist-info/LICENSE rename to setuptools/_vendor/packaging-25.0.dist-info/licenses/LICENSE diff --git a/setuptools/_vendor/packaging-24.2.dist-info/LICENSE.APACHE b/setuptools/_vendor/packaging-25.0.dist-info/licenses/LICENSE.APACHE similarity index 100% rename from setuptools/_vendor/packaging-24.2.dist-info/LICENSE.APACHE rename to setuptools/_vendor/packaging-25.0.dist-info/licenses/LICENSE.APACHE diff --git a/setuptools/_vendor/packaging-24.2.dist-info/LICENSE.BSD b/setuptools/_vendor/packaging-25.0.dist-info/licenses/LICENSE.BSD similarity index 100% rename from setuptools/_vendor/packaging-24.2.dist-info/LICENSE.BSD rename to setuptools/_vendor/packaging-25.0.dist-info/licenses/LICENSE.BSD diff --git a/setuptools/_vendor/packaging/__init__.py b/setuptools/_vendor/packaging/__init__.py index d79f73c574..d45c22cfd8 100644 --- a/setuptools/_vendor/packaging/__init__.py +++ b/setuptools/_vendor/packaging/__init__.py @@ -6,7 +6,7 @@ __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "24.2" +__version__ = "25.0" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" diff --git a/setuptools/_vendor/packaging/_elffile.py b/setuptools/_vendor/packaging/_elffile.py index 25f4282cc2..7a5afc33b0 100644 --- a/setuptools/_vendor/packaging/_elffile.py +++ b/setuptools/_vendor/packaging/_elffile.py @@ -69,8 +69,7 @@ def __init__(self, f: IO[bytes]) -> None: }[(self.capacity, self.encoding)] except KeyError as e: raise ELFInvalid( - f"unrecognized capacity ({self.capacity}) or " - f"encoding ({self.encoding})" + f"unrecognized capacity ({self.capacity}) or encoding ({self.encoding})" ) from e try: diff --git a/setuptools/_vendor/packaging/_manylinux.py b/setuptools/_vendor/packaging/_manylinux.py index 61339a6fcc..95f55762e8 100644 --- a/setuptools/_vendor/packaging/_manylinux.py +++ b/setuptools/_vendor/packaging/_manylinux.py @@ -161,8 +161,7 @@ def _parse_glibc_version(version_str: str) -> tuple[int, int]: m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) if not m: warnings.warn( - f"Expected glibc version with 2 components major.minor," - f" got: {version_str}", + f"Expected glibc version with 2 components major.minor, got: {version_str}", RuntimeWarning, stacklevel=2, ) diff --git a/setuptools/_vendor/packaging/_parser.py b/setuptools/_vendor/packaging/_parser.py index c1238c06ea..0007c0aa64 100644 --- a/setuptools/_vendor/packaging/_parser.py +++ b/setuptools/_vendor/packaging/_parser.py @@ -349,6 +349,5 @@ def _parse_marker_op(tokenizer: Tokenizer) -> Op: return Op(tokenizer.read().text) else: return tokenizer.raise_syntax_error( - "Expected marker operator, one of " - "<=, <, !=, ==, >=, >, ~=, ===, in, not in" + "Expected marker operator, one of <=, <, !=, ==, >=, >, ~=, ===, in, not in" ) diff --git a/setuptools/_vendor/packaging/_tokenizer.py b/setuptools/_vendor/packaging/_tokenizer.py index 89d041605c..d28a9b6cf5 100644 --- a/setuptools/_vendor/packaging/_tokenizer.py +++ b/setuptools/_vendor/packaging/_tokenizer.py @@ -68,7 +68,8 @@ def __str__(self) -> str: |platform[._](version|machine|python_implementation) |python_implementation |implementation_(name|version) - |extra + |extras? + |dependency_groups )\b """, re.VERBOSE, @@ -119,9 +120,9 @@ def check(self, name: str, *, peek: bool = False) -> bool: another check. If `peek` is set to `True`, the token is not loaded and would need to be checked again. """ - assert ( - self.next_token is None - ), f"Cannot check for {name!r}, already have {self.next_token!r}" + assert self.next_token is None, ( + f"Cannot check for {name!r}, already have {self.next_token!r}" + ) assert name in self.rules, f"Unknown token name: {name!r}" expression = self.rules[name] diff --git a/setuptools/_vendor/packaging/licenses/__init__.py b/setuptools/_vendor/packaging/licenses/__init__.py index 569156d6ca..6f7f9e6289 100644 --- a/setuptools/_vendor/packaging/licenses/__init__.py +++ b/setuptools/_vendor/packaging/licenses/__init__.py @@ -37,8 +37,8 @@ from packaging.licenses._spdx import EXCEPTIONS, LICENSES __all__ = [ - "NormalizedLicenseExpression", "InvalidLicenseExpression", + "NormalizedLicenseExpression", "canonicalize_license_expression", ] diff --git a/setuptools/_vendor/packaging/markers.py b/setuptools/_vendor/packaging/markers.py index fb7f49cf8c..e7cea57297 100644 --- a/setuptools/_vendor/packaging/markers.py +++ b/setuptools/_vendor/packaging/markers.py @@ -8,7 +8,7 @@ import os import platform import sys -from typing import Any, Callable, TypedDict, cast +from typing import AbstractSet, Any, Callable, Literal, TypedDict, Union, cast from ._parser import MarkerAtom, MarkerList, Op, Value, Variable from ._parser import parse_marker as _parse_marker @@ -17,6 +17,7 @@ from .utils import canonicalize_name __all__ = [ + "EvaluateContext", "InvalidMarker", "Marker", "UndefinedComparison", @@ -24,7 +25,9 @@ "default_environment", ] -Operator = Callable[[str, str], bool] +Operator = Callable[[str, Union[str, AbstractSet[str]]], bool] +EvaluateContext = Literal["metadata", "lock_file", "requirement"] +MARKERS_ALLOWING_SET = {"extras", "dependency_groups"} class InvalidMarker(ValueError): @@ -174,13 +177,14 @@ def _format_marker( } -def _eval_op(lhs: str, op: Op, rhs: str) -> bool: - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs, prereleases=True) +def _eval_op(lhs: str, op: Op, rhs: str | AbstractSet[str]) -> bool: + if isinstance(rhs, str): + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs, prereleases=True) oper: Operator | None = _operators.get(op.serialize()) if oper is None: @@ -189,19 +193,29 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool: return oper(lhs, rhs) -def _normalize(*values: str, key: str) -> tuple[str, ...]: +def _normalize( + lhs: str, rhs: str | AbstractSet[str], key: str +) -> tuple[str, str | AbstractSet[str]]: # PEP 685 – Comparison of extra names for optional distribution dependencies # https://peps.python.org/pep-0685/ # > When comparing extra names, tools MUST normalize the names being # > compared using the semantics outlined in PEP 503 for names if key == "extra": - return tuple(canonicalize_name(v) for v in values) + assert isinstance(rhs, str), "extra value must be a string" + return (canonicalize_name(lhs), canonicalize_name(rhs)) + if key in MARKERS_ALLOWING_SET: + if isinstance(rhs, str): # pragma: no cover + return (canonicalize_name(lhs), canonicalize_name(rhs)) + else: + return (canonicalize_name(lhs), {canonicalize_name(v) for v in rhs}) # other environment markers don't have such standards - return values + return lhs, rhs -def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool: +def _evaluate_markers( + markers: MarkerList, environment: dict[str, str | AbstractSet[str]] +) -> bool: groups: list[list[bool]] = [[]] for marker in markers: @@ -220,7 +234,7 @@ def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool: lhs_value = lhs.value environment_key = rhs.value rhs_value = environment[environment_key] - + assert isinstance(lhs_value, str), "lhs must be a string" lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) groups[-1].append(_eval_op(lhs_value, op, rhs_value)) else: @@ -298,22 +312,36 @@ def __eq__(self, other: Any) -> bool: return str(self) == str(other) - def evaluate(self, environment: dict[str, str] | None = None) -> bool: + def evaluate( + self, + environment: dict[str, str] | None = None, + context: EvaluateContext = "metadata", + ) -> bool: """Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or - part of the determined environment. + part of the determined environment. The *context* parameter specifies what + context the markers are being evaluated for, which influences what markers + are considered valid. Acceptable values are "metadata" (for core metadata; + default), "lock_file", and "requirement" (i.e. all other situations). The environment is determined from the current Python process. """ - current_environment = cast("dict[str, str]", default_environment()) - current_environment["extra"] = "" + current_environment = cast( + "dict[str, str | AbstractSet[str]]", default_environment() + ) + if context == "lock_file": + current_environment.update( + extras=frozenset(), dependency_groups=frozenset() + ) + elif context == "metadata": + current_environment["extra"] = "" if environment is not None: current_environment.update(environment) # The API used to allow setting extra to None. We need to handle this # case for backwards compatibility. - if current_environment["extra"] is None: + if "extra" in current_environment and current_environment["extra"] is None: current_environment["extra"] = "" return _evaluate_markers( @@ -321,11 +349,14 @@ def evaluate(self, environment: dict[str, str] | None = None) -> bool: ) -def _repair_python_full_version(env: dict[str, str]) -> dict[str, str]: +def _repair_python_full_version( + env: dict[str, str | AbstractSet[str]], +) -> dict[str, str | AbstractSet[str]]: """ Work around platform.python_version() returning something that is not PEP 440 compliant for non-tagged Python builds. """ - if env["python_full_version"].endswith("+"): - env["python_full_version"] += "local" + python_full_version = cast(str, env["python_full_version"]) + if python_full_version.endswith("+"): + env["python_full_version"] = f"{python_full_version}local" return env diff --git a/setuptools/_vendor/packaging/metadata.py b/setuptools/_vendor/packaging/metadata.py index 721f411cfc..3bd8602d36 100644 --- a/setuptools/_vendor/packaging/metadata.py +++ b/setuptools/_vendor/packaging/metadata.py @@ -678,8 +678,7 @@ def _process_license_files(self, value: list[str]) -> list[str]: ) if pathlib.PureWindowsPath(path).as_posix() != path: raise self._invalid_metadata( - f"{path!r} is invalid for {{field}}, " - "paths must use '/' delimiter" + f"{path!r} is invalid for {{field}}, paths must use '/' delimiter" ) paths.append(path) return paths diff --git a/setuptools/_vendor/packaging/specifiers.py b/setuptools/_vendor/packaging/specifiers.py index b30926af8b..c844804300 100644 --- a/setuptools/_vendor/packaging/specifiers.py +++ b/setuptools/_vendor/packaging/specifiers.py @@ -816,8 +816,7 @@ def __and__(self, other: SpecifierSet | str) -> SpecifierSet: specifier._prereleases = self._prereleases else: raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." + "Cannot combine SpecifierSets with True and False prerelease overrides." ) return specifier diff --git a/setuptools/_vendor/packaging/tags.py b/setuptools/_vendor/packaging/tags.py index f5903402ab..8522f59c4f 100644 --- a/setuptools/_vendor/packaging/tags.py +++ b/setuptools/_vendor/packaging/tags.py @@ -530,6 +530,43 @@ def ios_platforms( ) +def android_platforms( + api_level: int | None = None, abi: str | None = None +) -> Iterator[str]: + """ + Yields the :attr:`~Tag.platform` tags for Android. If this function is invoked on + non-Android platforms, the ``api_level`` and ``abi`` arguments are required. + + :param int api_level: The maximum `API level + `__ to return. Defaults + to the current system's version, as returned by ``platform.android_ver``. + :param str abi: The `Android ABI `__, + e.g. ``arm64_v8a``. Defaults to the current system's ABI , as returned by + ``sysconfig.get_platform``. Hyphens and periods will be replaced with + underscores. + """ + if platform.system() != "Android" and (api_level is None or abi is None): + raise TypeError( + "on non-Android platforms, the api_level and abi arguments are required" + ) + + if api_level is None: + # Python 3.13 was the first version to return platform.system() == "Android", + # and also the first version to define platform.android_ver(). + api_level = platform.android_ver().api_level # type: ignore[attr-defined] + + if abi is None: + abi = sysconfig.get_platform().split("-")[-1] + abi = _normalize_string(abi) + + # 16 is the minimum API level known to have enough features to support CPython + # without major patching. Yield every API level from the maximum down to the + # minimum, inclusive. + min_api_level = 16 + for ver in range(api_level, min_api_level - 1, -1): + yield f"android_{ver}_{abi}" + + def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: linux = _normalize_string(sysconfig.get_platform()) if not linux.startswith("linux_"): @@ -561,6 +598,8 @@ def platform_tags() -> Iterator[str]: return mac_platforms() elif platform.system() == "iOS": return ios_platforms() + elif platform.system() == "Android": + return android_platforms() elif platform.system() == "Linux": return _linux_platforms() else: diff --git a/setuptools/_vendor/platformdirs-4.2.2.dist-info/INSTALLER b/setuptools/_vendor/platformdirs-4.2.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/platformdirs-4.2.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/platformdirs-4.2.2.dist-info/RECORD b/setuptools/_vendor/platformdirs-4.2.2.dist-info/RECORD deleted file mode 100644 index 64c0c8ea2e..0000000000 --- a/setuptools/_vendor/platformdirs-4.2.2.dist-info/RECORD +++ /dev/null @@ -1,23 +0,0 @@ -platformdirs-4.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -platformdirs-4.2.2.dist-info/METADATA,sha256=zmsie01G1MtXR0wgIv5XpVeTO7idr0WWvfmxKsKWuGk,11429 -platformdirs-4.2.2.dist-info/RECORD,, -platformdirs-4.2.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -platformdirs-4.2.2.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87 -platformdirs-4.2.2.dist-info/licenses/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 -platformdirs/__init__.py,sha256=EMGE8qeHRR9CzDFr8kL3tA8hdZZniYjXBVZd0UGTWK0,22225 -platformdirs/__main__.py,sha256=HnsUQHpiBaiTxwcmwVw-nFaPdVNZtQIdi1eWDtI-MzI,1493 -platformdirs/__pycache__/__init__.cpython-312.pyc,, -platformdirs/__pycache__/__main__.cpython-312.pyc,, -platformdirs/__pycache__/android.cpython-312.pyc,, -platformdirs/__pycache__/api.cpython-312.pyc,, -platformdirs/__pycache__/macos.cpython-312.pyc,, -platformdirs/__pycache__/unix.cpython-312.pyc,, -platformdirs/__pycache__/version.cpython-312.pyc,, -platformdirs/__pycache__/windows.cpython-312.pyc,, -platformdirs/android.py,sha256=xZXY9Jd46WOsxT2U6-5HsNtDZ-IQqxcEUrBLl3hYk4o,9016 -platformdirs/api.py,sha256=QBYdUac2eC521ek_y53uD1Dcq-lJX8IgSRVd4InC6uc,8996 -platformdirs/macos.py,sha256=wftsbsvq6nZ0WORXSiCrZNkRHz_WKuktl0a6mC7MFkI,5580 -platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -platformdirs/unix.py,sha256=Cci9Wqt35dAMsg6HT9nRGHSBW5obb0pR3AE1JJnsCXg,10643 -platformdirs/version.py,sha256=r7F76tZRjgQKzrpx_I0_ZMQOMU-PS7eGnHD7zEK3KB0,411 -platformdirs/windows.py,sha256=IFpiohUBwxPtCzlyKwNtxyW4Jk8haa6W8o59mfrDXVo,10125 diff --git a/setuptools/_vendor/platformdirs-4.4.0.dist-info/INSTALLER b/setuptools/_vendor/platformdirs-4.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/platformdirs-4.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/platformdirs-4.2.2.dist-info/METADATA b/setuptools/_vendor/platformdirs-4.4.0.dist-info/METADATA similarity index 82% rename from setuptools/_vendor/platformdirs-4.2.2.dist-info/METADATA rename to setuptools/_vendor/platformdirs-4.4.0.dist-info/METADATA index ab51ef36ad..6b0908fb70 100644 --- a/setuptools/_vendor/platformdirs-4.2.2.dist-info/METADATA +++ b/setuptools/_vendor/platformdirs-4.4.0.dist-info/METADATA @@ -1,11 +1,12 @@ -Metadata-Version: 2.3 +Metadata-Version: 2.4 Name: platformdirs -Version: 4.2.2 +Version: 4.4.0 Summary: A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`. +Project-URL: Changelog, https://github.com/tox-dev/platformdirs/releases Project-URL: Documentation, https://platformdirs.readthedocs.io -Project-URL: Homepage, https://github.com/platformdirs/platformdirs -Project-URL: Source, https://github.com/platformdirs/platformdirs -Project-URL: Tracker, https://github.com/platformdirs/platformdirs/issues +Project-URL: Homepage, https://github.com/tox-dev/platformdirs +Project-URL: Source, https://github.com/tox-dev/platformdirs +Project-URL: Tracker, https://github.com/tox-dev/platformdirs/issues Maintainer-email: Bernát Gábor , Julian Berman , Ofek Lev , Ronny Pfannschmidt License-Expression: MIT License-File: LICENSE @@ -16,35 +17,41 @@ Classifier: License :: OSI Approved :: MIT License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Python: >=3.8 +Requires-Python: >=3.9 Provides-Extra: docs -Requires-Dist: furo>=2023.9.10; extra == 'docs' -Requires-Dist: proselint>=0.13; extra == 'docs' -Requires-Dist: sphinx-autodoc-typehints>=1.25.2; extra == 'docs' -Requires-Dist: sphinx>=7.2.6; extra == 'docs' +Requires-Dist: furo>=2024.8.6; extra == 'docs' +Requires-Dist: proselint>=0.14; extra == 'docs' +Requires-Dist: sphinx-autodoc-typehints>=3; extra == 'docs' +Requires-Dist: sphinx>=8.1.3; extra == 'docs' Provides-Extra: test Requires-Dist: appdirs==1.4.4; extra == 'test' Requires-Dist: covdefaults>=2.3; extra == 'test' -Requires-Dist: pytest-cov>=4.1; extra == 'test' -Requires-Dist: pytest-mock>=3.12; extra == 'test' -Requires-Dist: pytest>=7.4.3; extra == 'test' +Requires-Dist: pytest-cov>=6; extra == 'test' +Requires-Dist: pytest-mock>=3.14; extra == 'test' +Requires-Dist: pytest>=8.3.4; extra == 'test' Provides-Extra: type -Requires-Dist: mypy>=1.8; extra == 'type' +Requires-Dist: mypy>=1.14.1; extra == 'type' Description-Content-Type: text/x-rst The problem =========== -.. image:: https://github.com/platformdirs/platformdirs/actions/workflows/check.yml/badge.svg +.. image:: https://badge.fury.io/py/platformdirs.svg + :target: https://badge.fury.io/py/platformdirs +.. image:: https://img.shields.io/pypi/pyversions/platformdirs.svg + :target: https://pypi.python.org/pypi/platformdirs/ +.. image:: https://github.com/tox-dev/platformdirs/actions/workflows/check.yaml/badge.svg :target: https://github.com/platformdirs/platformdirs/actions +.. image:: https://static.pepy.tech/badge/platformdirs/month + :target: https://pepy.tech/project/platformdirs When writing desktop application, finding the right location to store user data and configuration varies per platform. Even for single-platform apps, there @@ -107,10 +114,14 @@ On macOS: >>> appauthor = "Acme" >>> user_data_dir(appname, appauthor) '/Users/trentm/Library/Application Support/SuperApp' - >>> site_data_dir(appname, appauthor) - '/Library/Application Support/SuperApp' + >>> user_config_dir(appname, appauthor) + '/Users/trentm/Library/Application Support/SuperApp' >>> user_cache_dir(appname, appauthor) '/Users/trentm/Library/Caches/SuperApp' + >>> site_data_dir(appname, appauthor) + '/Library/Application Support/SuperApp' + >>> site_config_dir(appname, appauthor) + '/Library/Application Support/SuperApp' >>> user_log_dir(appname, appauthor) '/Users/trentm/Library/Logs/SuperApp' >>> user_documents_dir() @@ -139,8 +150,14 @@ On Windows: 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp' >>> user_data_dir(appname, appauthor, roaming=True) 'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp' + >>> user_config_dir(appname, appauthor) + 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp' >>> user_cache_dir(appname, appauthor) 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache' + >>> site_data_dir(appname, appauthor) + 'C:\\ProgramData\\Acme\\SuperApp' + >>> site_config_dir(appname, appauthor) + 'C:\\ProgramData\\Acme\\SuperApp' >>> user_log_dir(appname, appauthor) 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs' >>> user_documents_dir() @@ -167,16 +184,21 @@ On Linux: >>> appauthor = "Acme" >>> user_data_dir(appname, appauthor) '/home/trentm/.local/share/SuperApp' + >>> user_config_dir(appname) + '/home/trentm/.config/SuperApp' + >>> user_cache_dir(appname, appauthor) + '/home/trentm/.cache/SuperApp' >>> site_data_dir(appname, appauthor) '/usr/local/share/SuperApp' >>> site_data_dir(appname, appauthor, multipath=True) '/usr/local/share/SuperApp:/usr/share/SuperApp' - >>> user_cache_dir(appname, appauthor) - '/home/trentm/.cache/SuperApp' + >>> site_config_dir(appname) + '/etc/xdg/SuperApp' + >>> os.environ["XDG_CONFIG_DIRS"] = "/etc:/usr/local/etc" + >>> site_config_dir(appname, multipath=True) + '/etc/SuperApp:/usr/local/etc/SuperApp' >>> user_log_dir(appname, appauthor) '/home/trentm/.local/state/SuperApp/log' - >>> user_config_dir(appname) - '/home/trentm/.config/SuperApp' >>> user_documents_dir() '/home/trentm/Documents' >>> user_downloads_dir() @@ -191,11 +213,6 @@ On Linux: '/home/trentm/Desktop' >>> user_runtime_dir(appname, appauthor) '/run/user/{os.getuid()}/SuperApp' - >>> site_config_dir(appname) - '/etc/xdg/SuperApp' - >>> os.environ["XDG_CONFIG_DIRS"] = "/etc:/usr/local/etc" - >>> site_config_dir(appname, multipath=True) - '/etc/SuperApp:/usr/local/etc/SuperApp' On Android:: @@ -204,12 +221,16 @@ On Android:: >>> appauthor = "Acme" >>> user_data_dir(appname, appauthor) '/data/data/com.myApp/files/SuperApp' + >>> user_config_dir(appname) + '/data/data/com.myApp/shared_prefs/SuperApp' >>> user_cache_dir(appname, appauthor) '/data/data/com.myApp/cache/SuperApp' + >>> site_data_dir(appname, appauthor) + '/data/data/com.myApp/files/SuperApp' + >>> site_config_dir(appname) + '/data/data/com.myApp/shared_prefs/SuperApp' >>> user_log_dir(appname, appauthor) '/data/data/com.myApp/cache/SuperApp/log' - >>> user_config_dir(appname) - '/data/data/com.myApp/shared_prefs/SuperApp' >>> user_documents_dir() '/storage/emulated/0/Documents' >>> user_downloads_dir() @@ -241,8 +262,14 @@ apps also support ``XDG_*`` environment variables. >>> dirs = PlatformDirs("SuperApp", "Acme") >>> dirs.user_data_dir '/Users/trentm/Library/Application Support/SuperApp' + >>> dirs.user_config_dir + '/Users/trentm/Library/Application Support/SuperApp' + >>> dirs.user_cache_dir + '/Users/trentm/Library/Caches/SuperApp' >>> dirs.site_data_dir '/Library/Application Support/SuperApp' + >>> dirs.site_config_dir + '/Library/Application Support/SuperApp' >>> dirs.user_cache_dir '/Users/trentm/Library/Caches/SuperApp' >>> dirs.user_log_dir @@ -273,10 +300,14 @@ dirs:: >>> dirs = PlatformDirs("SuperApp", "Acme", version="1.0") >>> dirs.user_data_dir '/Users/trentm/Library/Application Support/SuperApp/1.0' - >>> dirs.site_data_dir - '/Library/Application Support/SuperApp/1.0' + >>> dirs.user_config_dir + '/Users/trentm/Library/Application Support/SuperApp/1.0' >>> dirs.user_cache_dir '/Users/trentm/Library/Caches/SuperApp/1.0' + >>> dirs.site_data_dir + '/Library/Application Support/SuperApp/1.0' + >>> dirs.site_config_dir + '/Library/Application Support/SuperApp/1.0' >>> dirs.user_log_dir '/Users/trentm/Library/Logs/SuperApp/1.0' >>> dirs.user_documents_dir diff --git a/setuptools/_vendor/platformdirs-4.4.0.dist-info/RECORD b/setuptools/_vendor/platformdirs-4.4.0.dist-info/RECORD new file mode 100644 index 0000000000..09572f1780 --- /dev/null +++ b/setuptools/_vendor/platformdirs-4.4.0.dist-info/RECORD @@ -0,0 +1,15 @@ +platformdirs-4.4.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +platformdirs-4.4.0.dist-info/METADATA,sha256=u8UhbV9Md7-8VyJyZNUuZrzN5xzPeedeGmBG0CnTAiM,12831 +platformdirs-4.4.0.dist-info/RECORD,, +platformdirs-4.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +platformdirs-4.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +platformdirs-4.4.0.dist-info/licenses/LICENSE,sha256=KeD9YukphQ6G6yjD_czwzv30-pSHkBHP-z0NS-1tTbY,1089 +platformdirs/__init__.py,sha256=iORRy6_lZ9tXLvO0W6fJPn8QV7F532ivl-f2WGmabBc,22284 +platformdirs/__main__.py,sha256=HnsUQHpiBaiTxwcmwVw-nFaPdVNZtQIdi1eWDtI-MzI,1493 +platformdirs/android.py,sha256=r0DshVBf-RO1jXJGX8C4Til7F1XWt-bkdWMgmvEiaYg,9013 +platformdirs/api.py,sha256=wPHOlwOsfz2oqQZ6A2FcCu5kEAj-JondzoNOHYFQ0h8,9281 +platformdirs/macos.py,sha256=0XoOgin1NK7Qki7iskD-oS8xKxw6bXgoKEgdqpCRAFQ,6322 +platformdirs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +platformdirs/unix.py,sha256=WZmkUA--L3JNRGmz32s35YfoD3ica6xKIPdCV_HhLcs,10458 +platformdirs/version.py,sha256=i31fi3nNO19D2FdSx8aldD7IFLSqm2YrAo6SmkV0FLM,704 +platformdirs/windows.py,sha256=IFpiohUBwxPtCzlyKwNtxyW4Jk8haa6W8o59mfrDXVo,10125 diff --git a/setuptools/_vendor/inflect/compat/__init__.py b/setuptools/_vendor/platformdirs-4.4.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/inflect/compat/__init__.py rename to setuptools/_vendor/platformdirs-4.4.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/platformdirs-4.2.2.dist-info/WHEEL b/setuptools/_vendor/platformdirs-4.4.0.dist-info/WHEEL similarity index 67% rename from setuptools/_vendor/platformdirs-4.2.2.dist-info/WHEEL rename to setuptools/_vendor/platformdirs-4.4.0.dist-info/WHEEL index 516596c767..12228d414b 100644 --- a/setuptools/_vendor/platformdirs-4.2.2.dist-info/WHEEL +++ b/setuptools/_vendor/platformdirs-4.4.0.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: hatchling 1.24.2 +Generator: hatchling 1.27.0 Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/platformdirs-4.2.2.dist-info/licenses/LICENSE b/setuptools/_vendor/platformdirs-4.4.0.dist-info/licenses/LICENSE similarity index 100% rename from setuptools/_vendor/platformdirs-4.2.2.dist-info/licenses/LICENSE rename to setuptools/_vendor/platformdirs-4.4.0.dist-info/licenses/LICENSE diff --git a/setuptools/_vendor/platformdirs/__init__.py b/setuptools/_vendor/platformdirs/__init__.py index 3f7d9490d1..02daa5914a 100644 --- a/setuptools/_vendor/platformdirs/__init__.py +++ b/setuptools/_vendor/platformdirs/__init__.py @@ -19,18 +19,18 @@ from pathlib import Path from typing import Literal +if sys.platform == "win32": + from platformdirs.windows import Windows as _Result +elif sys.platform == "darwin": + from platformdirs.macos import MacOS as _Result +else: + from platformdirs.unix import Unix as _Result -def _set_platform_dir_class() -> type[PlatformDirsABC]: - if sys.platform == "win32": - from platformdirs.windows import Windows as Result # noqa: PLC0415 - elif sys.platform == "darwin": - from platformdirs.macos import MacOS as Result # noqa: PLC0415 - else: - from platformdirs.unix import Unix as Result # noqa: PLC0415 +def _set_platform_dir_class() -> type[PlatformDirsABC]: if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system": if os.getenv("SHELL") or os.getenv("PREFIX"): - return Result + return _Result from platformdirs.android import _android_folder # noqa: PLC0415 @@ -39,16 +39,20 @@ def _set_platform_dir_class() -> type[PlatformDirsABC]: return Android # return to avoid redefinition of a result - return Result + return _Result -PlatformDirs = _set_platform_dir_class() #: Currently active platform +if TYPE_CHECKING: + # Work around mypy issue: https://github.com/python/mypy/issues/10962 + PlatformDirs = _Result +else: + PlatformDirs = _set_platform_dir_class() #: Currently active platform AppDirs = PlatformDirs #: Backwards compatibility with appdirs def user_data_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, roaming: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -72,7 +76,7 @@ def user_data_dir( def site_data_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, multipath: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -96,7 +100,7 @@ def site_data_dir( def user_config_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, roaming: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -120,7 +124,7 @@ def user_config_dir( def site_config_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, multipath: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -144,7 +148,7 @@ def site_config_dir( def user_cache_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -168,7 +172,7 @@ def user_cache_dir( def site_cache_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -192,7 +196,7 @@ def site_cache_dir( def user_state_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, roaming: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -216,7 +220,7 @@ def user_state_dir( def user_log_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -270,7 +274,7 @@ def user_desktop_dir() -> str: def user_runtime_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -294,7 +298,7 @@ def user_runtime_dir( def site_runtime_dir( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -318,7 +322,7 @@ def site_runtime_dir( def user_data_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, roaming: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -342,7 +346,7 @@ def user_data_path( def site_data_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, multipath: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -366,7 +370,7 @@ def site_data_path( def user_config_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, roaming: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -390,7 +394,7 @@ def user_config_path( def site_config_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, multipath: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -414,7 +418,7 @@ def site_config_path( def site_cache_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -438,7 +442,7 @@ def site_cache_path( def user_cache_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -462,7 +466,7 @@ def user_cache_path( def user_state_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, roaming: bool = False, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -486,7 +490,7 @@ def user_state_path( def user_log_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -540,7 +544,7 @@ def user_desktop_path() -> Path: def user_runtime_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 @@ -564,7 +568,7 @@ def user_runtime_path( def site_runtime_path( appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, opinion: bool = True, # noqa: FBT001, FBT002 ensure_exists: bool = False, # noqa: FBT001, FBT002 diff --git a/setuptools/_vendor/platformdirs/android.py b/setuptools/_vendor/platformdirs/android.py index afd3141c72..92efc852d3 100644 --- a/setuptools/_vendor/platformdirs/android.py +++ b/setuptools/_vendor/platformdirs/android.py @@ -23,7 +23,7 @@ class Android(PlatformDirsABC): @property def user_data_dir(self) -> str: """:return: data directory tied to the user, e.g. ``/data/user///files/``""" - return self._append_app_name_and_version(cast(str, _android_folder()), "files") + return self._append_app_name_and_version(cast("str", _android_folder()), "files") @property def site_data_dir(self) -> str: @@ -36,7 +36,7 @@ def user_config_dir(self) -> str: :return: config directory tied to the user, e.g. \ ``/data/user///shared_prefs/`` """ - return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs") + return self._append_app_name_and_version(cast("str", _android_folder()), "shared_prefs") @property def site_config_dir(self) -> str: @@ -46,7 +46,7 @@ def site_config_dir(self) -> str: @property def user_cache_dir(self) -> str: """:return: cache directory tied to the user, e.g.,``/data/user///cache/``""" - return self._append_app_name_and_version(cast(str, _android_folder()), "cache") + return self._append_app_name_and_version(cast("str", _android_folder()), "cache") @property def site_cache_dir(self) -> str: @@ -117,7 +117,7 @@ def site_runtime_dir(self) -> str: @lru_cache(maxsize=1) -def _android_folder() -> str | None: # noqa: C901, PLR0912 +def _android_folder() -> str | None: # noqa: C901 """:return: base folder for the Android OS or None if it cannot be found""" result: str | None = None # type checker isn't happy with our "import android", just don't do this when type checking see diff --git a/setuptools/_vendor/platformdirs/api.py b/setuptools/_vendor/platformdirs/api.py index c50caa648a..251600e6d1 100644 --- a/setuptools/_vendor/platformdirs/api.py +++ b/setuptools/_vendor/platformdirs/api.py @@ -8,7 +8,8 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Iterator, Literal + from collections.abc import Iterator + from typing import Literal class PlatformDirsABC(ABC): # noqa: PLR0904 @@ -17,7 +18,7 @@ class PlatformDirsABC(ABC): # noqa: PLR0904 def __init__( # noqa: PLR0913, PLR0917 self, appname: str | None = None, - appauthor: str | None | Literal[False] = None, + appauthor: str | Literal[False] | None = None, version: str | None = None, roaming: bool = False, # noqa: FBT001, FBT002 multipath: bool = False, # noqa: FBT001, FBT002 @@ -91,6 +92,12 @@ def _optionally_create_directory(self, path: str) -> None: if self.ensure_exists: Path(path).mkdir(parents=True, exist_ok=True) + def _first_item_as_path_if_multipath(self, directory: str) -> Path: + if self.multipath: + # If multipath is True, the first path is returned. + directory = directory.partition(os.pathsep)[0] + return Path(directory) + @property @abstractmethod def user_data_dir(self) -> str: diff --git a/setuptools/_vendor/platformdirs/macos.py b/setuptools/_vendor/platformdirs/macos.py index eb1ba5df1d..30ab368913 100644 --- a/setuptools/_vendor/platformdirs/macos.py +++ b/setuptools/_vendor/platformdirs/macos.py @@ -4,9 +4,13 @@ import os.path import sys +from typing import TYPE_CHECKING from .api import PlatformDirsABC +if TYPE_CHECKING: + from pathlib import Path + class MacOS(PlatformDirsABC): """ @@ -30,18 +34,24 @@ def site_data_dir(self) -> str: """ :return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``. If we're using a Python binary managed by `Homebrew `_, the directory - will be under the Homebrew prefix, e.g. ``/opt/homebrew/share/$appname/$version``. + will be under the Homebrew prefix, e.g. ``$homebrew_prefix/share/$appname/$version``. If `multipath ` is enabled, and we're in Homebrew, the response is a multi-path string separated by ":", e.g. - ``/opt/homebrew/share/$appname/$version:/Library/Application Support/$appname/$version`` + ``$homebrew_prefix/share/$appname/$version:/Library/Application Support/$appname/$version`` """ - is_homebrew = sys.prefix.startswith("/opt/homebrew") - path_list = [self._append_app_name_and_version("/opt/homebrew/share")] if is_homebrew else [] + is_homebrew = "/opt/python" in sys.prefix + homebrew_prefix = sys.prefix.split("/opt/python")[0] if is_homebrew else "" + path_list = [self._append_app_name_and_version(f"{homebrew_prefix}/share")] if is_homebrew else [] path_list.append(self._append_app_name_and_version("/Library/Application Support")) if self.multipath: return os.pathsep.join(path_list) return path_list[0] + @property + def site_data_path(self) -> Path: + """:return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_data_dir) + @property def user_config_dir(self) -> str: """:return: config directory tied to the user, same as `user_data_dir`""" @@ -62,18 +72,24 @@ def site_cache_dir(self) -> str: """ :return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``. If we're using a Python binary managed by `Homebrew `_, the directory - will be under the Homebrew prefix, e.g. ``/opt/homebrew/var/cache/$appname/$version``. + will be under the Homebrew prefix, e.g. ``$homebrew_prefix/var/cache/$appname/$version``. If `multipath ` is enabled, and we're in Homebrew, the response is a multi-path string separated by ":", e.g. - ``/opt/homebrew/var/cache/$appname/$version:/Library/Caches/$appname/$version`` + ``$homebrew_prefix/var/cache/$appname/$version:/Library/Caches/$appname/$version`` """ - is_homebrew = sys.prefix.startswith("/opt/homebrew") - path_list = [self._append_app_name_and_version("/opt/homebrew/var/cache")] if is_homebrew else [] + is_homebrew = "/opt/python" in sys.prefix + homebrew_prefix = sys.prefix.split("/opt/python")[0] if is_homebrew else "" + path_list = [self._append_app_name_and_version(f"{homebrew_prefix}/var/cache")] if is_homebrew else [] path_list.append(self._append_app_name_and_version("/Library/Caches")) if self.multipath: return os.pathsep.join(path_list) return path_list[0] + @property + def site_cache_path(self) -> Path: + """:return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``""" + return self._first_item_as_path_if_multipath(self.site_cache_dir) + @property def user_state_dir(self) -> str: """:return: state directory tied to the user, same as `user_data_dir`""" diff --git a/setuptools/_vendor/platformdirs/unix.py b/setuptools/_vendor/platformdirs/unix.py index 9500ade614..fc75d8d074 100644 --- a/setuptools/_vendor/platformdirs/unix.py +++ b/setuptools/_vendor/platformdirs/unix.py @@ -6,10 +6,13 @@ import sys from configparser import ConfigParser from pathlib import Path -from typing import Iterator, NoReturn +from typing import TYPE_CHECKING, NoReturn from .api import PlatformDirsABC +if TYPE_CHECKING: + from collections.abc import Iterator + if sys.platform == "win32": def getuid() -> NoReturn: @@ -218,12 +221,6 @@ def site_cache_path(self) -> Path: """:return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``""" return self._first_item_as_path_if_multipath(self.site_cache_dir) - def _first_item_as_path_if_multipath(self, directory: str) -> Path: - if self.multipath: - # If multipath is True, the first path is returned. - directory = directory.split(os.pathsep)[0] - return Path(directory) - def iter_config_dirs(self) -> Iterator[str]: """:yield: all user and site configuration directories.""" yield self.user_config_dir diff --git a/setuptools/_vendor/platformdirs/version.py b/setuptools/_vendor/platformdirs/version.py index 6483ddce0b..b9451472c3 100644 --- a/setuptools/_vendor/platformdirs/version.py +++ b/setuptools/_vendor/platformdirs/version.py @@ -1,16 +1,34 @@ -# file generated by setuptools_scm +# file generated by setuptools-scm # don't change, don't track in version control + +__all__ = [ + "__version__", + "__version_tuple__", + "version", + "version_tuple", + "__commit_id__", + "commit_id", +] + TYPE_CHECKING = False if TYPE_CHECKING: - from typing import Tuple, Union + from typing import Tuple + from typing import Union + VERSION_TUPLE = Tuple[Union[int, str], ...] + COMMIT_ID = Union[str, None] else: VERSION_TUPLE = object + COMMIT_ID = object version: str __version__: str __version_tuple__: VERSION_TUPLE version_tuple: VERSION_TUPLE +commit_id: COMMIT_ID +__commit_id__: COMMIT_ID + +__version__ = version = '4.4.0' +__version_tuple__ = version_tuple = (4, 4, 0) -__version__ = version = '4.2.2' -__version_tuple__ = version_tuple = (4, 2, 2) +__commit_id__ = commit_id = None diff --git a/setuptools/_vendor/tomli-2.0.1.dist-info/INSTALLER b/setuptools/_vendor/tomli-2.0.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/tomli-2.0.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/tomli-2.0.1.dist-info/RECORD b/setuptools/_vendor/tomli-2.0.1.dist-info/RECORD deleted file mode 100644 index 1db8063ec5..0000000000 --- a/setuptools/_vendor/tomli-2.0.1.dist-info/RECORD +++ /dev/null @@ -1,15 +0,0 @@ -tomli-2.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -tomli-2.0.1.dist-info/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 -tomli-2.0.1.dist-info/METADATA,sha256=zPDceKmPwJGLWtZykrHixL7WVXWmJGzZ1jyRT5lCoPI,8875 -tomli-2.0.1.dist-info/RECORD,, -tomli-2.0.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -tomli-2.0.1.dist-info/WHEEL,sha256=jPMR_Dzkc4X4icQtmz81lnNY_kAsfog7ry7qoRvYLXw,81 -tomli/__init__.py,sha256=JhUwV66DB1g4Hvt1UQCVMdfCu-IgAV8FXmvDU9onxd4,396 -tomli/__pycache__/__init__.cpython-312.pyc,, -tomli/__pycache__/_parser.cpython-312.pyc,, -tomli/__pycache__/_re.cpython-312.pyc,, -tomli/__pycache__/_types.cpython-312.pyc,, -tomli/_parser.py,sha256=g9-ENaALS-B8dokYpCuzUFalWlog7T-SIYMjLZSWrtM,22633 -tomli/_re.py,sha256=dbjg5ChZT23Ka9z9DHOXfdtSpPwUfdgMXnj8NOoly-w,2943 -tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 -tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 diff --git a/setuptools/_vendor/tomli-2.4.0.dist-info/INSTALLER b/setuptools/_vendor/tomli-2.4.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/tomli-2.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/tomli-2.0.1.dist-info/METADATA b/setuptools/_vendor/tomli-2.4.0.dist-info/METADATA similarity index 63% rename from setuptools/_vendor/tomli-2.0.1.dist-info/METADATA rename to setuptools/_vendor/tomli-2.4.0.dist-info/METADATA index efd87ecc16..ffe8e40d77 100644 --- a/setuptools/_vendor/tomli-2.0.1.dist-info/METADATA +++ b/setuptools/_vendor/tomli-2.4.0.dist-info/METADATA @@ -1,28 +1,25 @@ -Metadata-Version: 2.1 +Metadata-Version: 2.4 Name: tomli -Version: 2.0.1 +Version: 2.4.0 Summary: A lil' TOML parser Keywords: toml Author-email: Taneli Hukkinen -Requires-Python: >=3.7 +Requires-Python: >=3.8 Description-Content-Type: text/markdown -Classifier: License :: OSI Approved :: MIT License +License-Expression: MIT Classifier: Operating System :: MacOS Classifier: Operating System :: Microsoft :: Windows Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Typing :: Typed +License-File: LICENSE Project-URL: Changelog, https://github.com/hukkin/tomli/blob/master/CHANGELOG.md Project-URL: Homepage, https://github.com/hukkin/tomli -[![Build Status](https://github.com/hukkin/tomli/workflows/Tests/badge.svg?branch=master)](https://github.com/hukkin/tomli/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush) +[![Build Status](https://github.com/hukkin/tomli/actions/workflows/tests.yaml/badge.svg?branch=master)](https://github.com/hukkin/tomli/actions?query=workflow%3ATests+branch%3Amaster+event%3Apush) [![codecov.io](https://codecov.io/gh/hukkin/tomli/branch/master/graph/badge.svg)](https://codecov.io/gh/hukkin/tomli) [![PyPI version](https://img.shields.io/pypi/v/tomli)](https://pypi.org/project/tomli) @@ -30,7 +27,7 @@ Project-URL: Homepage, https://github.com/hukkin/tomli > A lil' TOML parser -**Table of Contents** *generated with [mdformat-toc](https://github.com/hukkin/mdformat-toc)* +**Table of Contents** *generated with [mdformat-toc](https://github.com/hukkin/mdformat-toc)* @@ -41,19 +38,35 @@ Project-URL: Homepage, https://github.com/hukkin/tomli - [Parse a TOML file](#parse-a-toml-file) - [Handle invalid TOML](#handle-invalid-toml) - [Construct `decimal.Decimal`s from TOML floats](#construct-decimaldecimals-from-toml-floats) + - [Building a `tomli`/`tomllib` compatibility layer](#building-a-tomlitomllib-compatibility-layer) - [FAQ](#faq) - [Why this parser?](#why-this-parser) - [Is comment preserving round-trip parsing supported?](#is-comment-preserving-round-trip-parsing-supported) - [Is there a `dumps`, `write` or `encode` function?](#is-there-a-dumps-write-or-encode-function) - [How do TOML types map into Python types?](#how-do-toml-types-map-into-python-types) - [Performance](#performance) + - [Pure Python](#pure-python) + - [Mypyc generated wheel](#mypyc-generated-wheel) ## Intro Tomli is a Python library for parsing [TOML](https://toml.io). -Tomli is fully compatible with [TOML v1.0.0](https://toml.io/en/v1.0.0). +Version 2.4.0 and later are compatible with [TOML v1.1.0](https://toml.io/en/v1.1.0). +Older versions are [TOML v1.0.0](https://toml.io/en/v1.0.0) compatible. + +A version of Tomli, the `tomllib` module, +was added to the standard library in Python 3.11 +via [PEP 680](https://www.python.org/dev/peps/pep-0680/). +Tomli continues to provide a backport on PyPI for Python versions +where the standard library module is not available +and that have not yet reached their end-of-life. + +Tomli uses [mypyc](https://github.com/mypyc/mypyc) +to generate binary wheels for most of the widely used platforms, +so Python 3.11+ users may prefer it over `tomllib` for improved performance. +Pure Python wheels are available on any platform and should perform the same as `tomllib`. ## Installation @@ -69,14 +82,19 @@ pip install tomli import tomli toml_str = """ - gretzky = 99 +[[players]] +name = "Lehtinen" +number = 26 - [kurri] - jari = 17 - """ +[[players]] +name = "Numminen" +number = 27 +""" toml_dict = tomli.loads(toml_str) -assert toml_dict == {"gretzky": 99, "kurri": {"jari": 17}} +assert toml_dict == { + "players": [{"name": "Lehtinen", "number": 26}, {"name": "Numminen", "number": 27}] +} ``` ### Parse a TOML file @@ -113,6 +131,7 @@ from decimal import Decimal import tomli toml_dict = tomli.loads("precision-matters = 0.982492", parse_float=Decimal) +assert isinstance(toml_dict["precision-matters"], Decimal) assert toml_dict["precision-matters"] == Decimal("0.982492") ``` @@ -122,21 +141,48 @@ The `decimal.Decimal` is, however, a practical choice for use cases where float Illegal types are `dict` and `list`, and their subtypes. A `ValueError` will be raised if `parse_float` produces illegal types. +### Building a `tomli`/`tomllib` compatibility layer + +Python versions 3.11+ ship with a version of Tomli: +the `tomllib` standard library module. +To build code that uses the standard library if available, +but still works seamlessly with Python 3.6+, +do the following. + +Instead of a hard Tomli dependency, use the following +[dependency specifier](https://packaging.python.org/en/latest/specifications/dependency-specifiers/) +to only require Tomli when the standard library module is not available: + +``` +tomli >= 1.1.0 ; python_version < "3.11" +``` + +Then, in your code, import a TOML parser using the following fallback mechanism: + +```python +import sys + +if sys.version_info >= (3, 11): + import tomllib +else: + import tomli as tomllib + +tomllib.loads("['This parses fine with Python 3.6+']") +``` + ## FAQ ### Why this parser? - it's lil' - pure Python with zero dependencies -- the fastest pure Python parser [\*](#performance): - 15x as fast as [tomlkit](https://pypi.org/project/tomlkit/), - 2.4x as fast as [toml](https://pypi.org/project/toml/) +- the fastest pure Python parser [\*](#pure-python): + 18x as fast as [tomlkit](https://pypi.org/project/tomlkit/), + 2.1x as fast as [toml](https://pypi.org/project/toml/) - outputs [basic data types](#how-do-toml-types-map-into-python-types) only - 100% spec compliant: passes all tests in - [a test set](https://github.com/toml-lang/compliance/pull/8) - soon to be merged to the official - [compliance tests for TOML](https://github.com/toml-lang/compliance) - repository + [toml-lang/toml-test](https://github.com/toml-lang/toml-test) + test suite - thoroughly tested: 100% branch coverage ### Is comment preserving round-trip parsing supported? @@ -176,31 +222,49 @@ The core library does not include write capability, as most TOML use cases are r ## Performance The `benchmark/` folder in this repository contains a performance benchmark for comparing the various Python TOML parsers. -The benchmark can be run with `tox -e benchmark-pypi`. -Running the benchmark on my personal computer output the following: + +Below are the results for commit [0724e2a](https://github.com/hukkin/tomli/tree/0724e2ab1858da7f5e05a9bffdb24c33589d951c). + +### Pure Python ```console -foo@bar:~/dev/tomli$ tox -e benchmark-pypi -benchmark-pypi installed: attrs==19.3.0,click==7.1.2,pytomlpp==1.0.2,qtoml==0.3.0,rtoml==0.7.0,toml==0.10.2,tomli==1.1.0,tomlkit==0.7.2 -benchmark-pypi run-test-pre: PYTHONHASHSEED='2658546909' -benchmark-pypi run-test: commands[0] | python -c 'import datetime; print(datetime.date.today())' -2021-07-23 -benchmark-pypi run-test: commands[1] | python --version -Python 3.8.10 -benchmark-pypi run-test: commands[2] | python benchmark/run.py +foo@bar:~/dev/tomli$ python --version +Python 3.12.7 +foo@bar:~/dev/tomli$ pip freeze +attrs==21.4.0 +click==8.1.7 +pytomlpp==1.0.13 +qtoml==0.3.1 +rtoml==0.11.0 +toml==0.10.2 +tomli @ file:///home/foo/dev/tomli +tomlkit==0.13.2 +foo@bar:~/dev/tomli$ python benchmark/run.py Parsing data.toml 5000 times: ------------------------------------------------------ parser | exec time | performance (more is better) -----------+------------+----------------------------- - rtoml | 0.901 s | baseline (100%) - pytomlpp | 1.08 s | 83.15% - tomli | 3.89 s | 23.15% - toml | 9.36 s | 9.63% - qtoml | 11.5 s | 7.82% - tomlkit | 56.8 s | 1.59% + rtoml | 0.647 s | baseline (100%) + pytomlpp | 0.891 s | 72.62% + tomli | 3.14 s | 20.56% + toml | 6.69 s | 9.67% + qtoml | 8.27 s | 7.82% + tomlkit | 56.1 s | 1.15% ``` -The parsers are ordered from fastest to slowest, using the fastest parser as baseline. -Tomli performed the best out of all pure Python TOML parsers, -losing only to pytomlpp (wraps C++) and rtoml (wraps Rust). +### Mypyc generated wheel + +```console +foo@bar:~/dev/tomli$ python benchmark/run.py +Parsing data.toml 5000 times: +------------------------------------------------------ + parser | exec time | performance (more is better) +-----------+------------+----------------------------- + rtoml | 0.668 s | baseline (100%) + pytomlpp | 0.893 s | 74.81% + tomli | 1.96 s | 34.18% + toml | 6.64 s | 10.07% + qtoml | 8.26 s | 8.09% + tomlkit | 52.9 s | 1.26% +``` diff --git a/setuptools/_vendor/tomli-2.4.0.dist-info/RECORD b/setuptools/_vendor/tomli-2.4.0.dist-info/RECORD new file mode 100644 index 0000000000..d2415a38de --- /dev/null +++ b/setuptools/_vendor/tomli-2.4.0.dist-info/RECORD @@ -0,0 +1,11 @@ +tomli-2.4.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +tomli-2.4.0.dist-info/METADATA,sha256=9awKH4-6kItGRs1lUwnpGq2Wm2eHYWrFccpGKjgy_84,10567 +tomli-2.4.0.dist-info/RECORD,, +tomli-2.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tomli-2.4.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +tomli-2.4.0.dist-info/licenses/LICENSE,sha256=uAgWsNUwuKzLTCIReDeQmEpuO2GSLCte6S8zcqsnQv4,1072 +tomli/__init__.py,sha256=ahtDjGJA2M_wWVvGpzx4YJtWxrWBx6qE-GH5-UYoECA,314 +tomli/_parser.py,sha256=txeATLE3zHyZ-ushXtYfrZ3LoIs7JzQF2W2KL1gwJPg,25958 +tomli/_re.py,sha256=oSNZ_ilFI6chEuQ01YRSoUydBQr_okF_mSdHTkFmv90,3396 +tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254 +tomli/py.typed,sha256=8PjyZ1aVoQpRVvt71muvuq5qE-jTFZkK-GLHkhdebmc,26 diff --git a/setuptools/_vendor/jaraco/collections/py.typed b/setuptools/_vendor/tomli-2.4.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/jaraco/collections/py.typed rename to setuptools/_vendor/tomli-2.4.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/tomli-2.0.1.dist-info/WHEEL b/setuptools/_vendor/tomli-2.4.0.dist-info/WHEEL similarity index 71% rename from setuptools/_vendor/tomli-2.0.1.dist-info/WHEEL rename to setuptools/_vendor/tomli-2.4.0.dist-info/WHEEL index c727d14823..d8b9936dad 100644 --- a/setuptools/_vendor/tomli-2.0.1.dist-info/WHEEL +++ b/setuptools/_vendor/tomli-2.4.0.dist-info/WHEEL @@ -1,4 +1,4 @@ Wheel-Version: 1.0 -Generator: flit 3.6.0 +Generator: flit 3.12.0 Root-Is-Purelib: true Tag: py3-none-any diff --git a/setuptools/_vendor/tomli-2.0.1.dist-info/LICENSE b/setuptools/_vendor/tomli-2.4.0.dist-info/licenses/LICENSE similarity index 100% rename from setuptools/_vendor/tomli-2.0.1.dist-info/LICENSE rename to setuptools/_vendor/tomli-2.4.0.dist-info/licenses/LICENSE diff --git a/setuptools/_vendor/tomli/__init__.py b/setuptools/_vendor/tomli/__init__.py index 4c6ec97ec6..55699b1ea6 100644 --- a/setuptools/_vendor/tomli/__init__.py +++ b/setuptools/_vendor/tomli/__init__.py @@ -3,9 +3,6 @@ # Licensed to PSF under a Contributor Agreement. __all__ = ("loads", "load", "TOMLDecodeError") -__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT +__version__ = "2.4.0" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT from ._parser import TOMLDecodeError, load, loads - -# Pretend this exception was created here. -TOMLDecodeError.__module__ = __name__ diff --git a/setuptools/_vendor/tomli/_parser.py b/setuptools/_vendor/tomli/_parser.py index f1bb0aa19a..3038891afe 100644 --- a/setuptools/_vendor/tomli/_parser.py +++ b/setuptools/_vendor/tomli/_parser.py @@ -4,10 +4,8 @@ from __future__ import annotations -from collections.abc import Iterable -import string +import sys from types import MappingProxyType -from typing import Any, BinaryIO, NamedTuple from ._re import ( RE_DATETIME, @@ -17,44 +15,126 @@ match_to_localtime, match_to_number, ) -from ._types import Key, ParseFloat, Pos -ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) +TYPE_CHECKING = False +if TYPE_CHECKING: + from collections.abc import Iterable + from typing import IO, Any, Final + + from ._types import Key, ParseFloat, Pos + +# Inline tables/arrays are implemented using recursion. Pathologically +# nested documents cause pure Python to raise RecursionError (which is OK), +# but mypyc binary wheels will crash unrecoverably (not OK). According to +# mypyc docs this will be fixed in the future: +# https://mypyc.readthedocs.io/en/latest/differences_from_python.html#stack-overflows +# Before mypyc's fix is in, recursion needs to be limited by this library. +# Choosing `sys.getrecursionlimit()` as maximum inline table/array nesting +# level, as it allows more nesting than pure Python, but still seems a far +# lower number than where mypyc binaries crash. +MAX_INLINE_NESTING: Final = sys.getrecursionlimit() + +ASCII_CTRL: Final = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) # Neither of these sets include quotation mark or backslash. They are # currently handled as separate cases in the parser functions. -ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") -ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") +ILLEGAL_BASIC_STR_CHARS: Final = ASCII_CTRL - frozenset("\t") +ILLEGAL_MULTILINE_BASIC_STR_CHARS: Final = ASCII_CTRL - frozenset("\t\n") -ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS -ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS +ILLEGAL_LITERAL_STR_CHARS: Final = ILLEGAL_BASIC_STR_CHARS +ILLEGAL_MULTILINE_LITERAL_STR_CHARS: Final = ILLEGAL_MULTILINE_BASIC_STR_CHARS -ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS +ILLEGAL_COMMENT_CHARS: Final = ILLEGAL_BASIC_STR_CHARS -TOML_WS = frozenset(" \t") -TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") -BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") -KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") -HEXDIGIT_CHARS = frozenset(string.hexdigits) +TOML_WS: Final = frozenset(" \t") +TOML_WS_AND_NEWLINE: Final = TOML_WS | frozenset("\n") +BARE_KEY_CHARS: Final = frozenset( + "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789" "-_" +) +KEY_INITIAL_CHARS: Final = BARE_KEY_CHARS | frozenset("\"'") +HEXDIGIT_CHARS: Final = frozenset("abcdef" "ABCDEF" "0123456789") -BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( +BASIC_STR_ESCAPE_REPLACEMENTS: Final = MappingProxyType( { "\\b": "\u0008", # backspace "\\t": "\u0009", # tab - "\\n": "\u000A", # linefeed - "\\f": "\u000C", # form feed - "\\r": "\u000D", # carriage return + "\\n": "\u000a", # linefeed + "\\f": "\u000c", # form feed + "\\r": "\u000d", # carriage return + "\\e": "\u001b", # escape '\\"': "\u0022", # quote - "\\\\": "\u005C", # backslash + "\\\\": "\u005c", # backslash } ) +class DEPRECATED_DEFAULT: + """Sentinel to be used as default arg during deprecation + period of TOMLDecodeError's free-form arguments.""" + + class TOMLDecodeError(ValueError): - """An error raised if a document is not valid TOML.""" + """An error raised if a document is not valid TOML. + + Adds the following attributes to ValueError: + msg: The unformatted error message + doc: The TOML document being parsed + pos: The index of doc where parsing failed + lineno: The line corresponding to pos + colno: The column corresponding to pos + """ + def __init__( + self, + msg: str | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT, + doc: str | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT, + pos: Pos | type[DEPRECATED_DEFAULT] = DEPRECATED_DEFAULT, + *args: Any, + ): + if ( + args + or not isinstance(msg, str) + or not isinstance(doc, str) + or not isinstance(pos, int) + ): + import warnings + + warnings.warn( + "Free-form arguments for TOMLDecodeError are deprecated. " + "Please set 'msg' (str), 'doc' (str) and 'pos' (int) arguments only.", + DeprecationWarning, + stacklevel=2, + ) + if pos is not DEPRECATED_DEFAULT: + args = pos, *args + if doc is not DEPRECATED_DEFAULT: + args = doc, *args + if msg is not DEPRECATED_DEFAULT: + args = msg, *args + ValueError.__init__(self, *args) + return + + lineno = doc.count("\n", 0, pos) + 1 + if lineno == 1: + colno = pos + 1 + else: + colno = pos - doc.rindex("\n", 0, pos) -def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]: + if pos >= len(doc): + coord_repr = "end of document" + else: + coord_repr = f"line {lineno}, column {colno}" + errmsg = f"{msg} (at {coord_repr})" + ValueError.__init__(self, errmsg) + + self.msg = msg + self.doc = doc + self.pos = pos + self.lineno = lineno + self.colno = colno + + +def load(__fp: IO[bytes], *, parse_float: ParseFloat = float) -> dict[str, Any]: """Parse TOML from a binary file object.""" b = __fp.read() try: @@ -66,14 +146,19 @@ def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]: return loads(s, parse_float=parse_float) -def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 +def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: """Parse TOML from a string.""" # The spec allows converting "\r\n" to "\n", even in string # literals. Let's do so to simplify parsing. - src = __s.replace("\r\n", "\n") + try: + src = __s.replace("\r\n", "\n") + except (AttributeError, TypeError): + raise TypeError( + f"Expected str object, not '{type(__s).__qualname__}'" + ) from None pos = 0 - out = Output(NestedDict(), Flags()) + out = Output() header: Key = () parse_float = make_safe_parse_float(parse_float) @@ -113,7 +198,7 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no pos, header = create_dict_rule(src, pos, out) pos = skip_chars(src, pos, TOML_WS) elif char != "#": - raise suffixed_err(src, pos, "Invalid statement") + raise TOMLDecodeError("Invalid statement", src, pos) # 3. Skip comment pos = skip_comment(src, pos) @@ -124,8 +209,8 @@ def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # no except IndexError: break if char != "\n": - raise suffixed_err( - src, pos, "Expected newline or end of document after a statement" + raise TOMLDecodeError( + "Expected newline or end of document after a statement", src, pos ) pos += 1 @@ -136,13 +221,13 @@ class Flags: """Flags that map to parsed keys/namespaces.""" # Marks an immutable namespace (inline array or inline table). - FROZEN = 0 + FROZEN: Final = 0 # Marks a nest that has been explicitly created and can no longer # be opened using the "[table]" syntax. - EXPLICIT_NEST = 1 + EXPLICIT_NEST: Final = 1 def __init__(self) -> None: - self._flags: dict[str, dict] = {} + self._flags: dict[str, dict[Any, Any]] = {} self._pending_flags: set[tuple[Key, int]] = set() def add_pending(self, key: Key, flag: int) -> None: @@ -185,8 +270,8 @@ def is_(self, key: Key, flag: int) -> bool: cont = inner_cont["nested"] key_stem = key[-1] if key_stem in cont: - cont = cont[key_stem] - return flag in cont["flags"] or flag in cont["recursive_flags"] + inner_cont = cont[key_stem] + return flag in inner_cont["flags"] or flag in inner_cont["recursive_flags"] return False @@ -200,7 +285,7 @@ def get_or_create_nest( key: Key, *, access_lists: bool = True, - ) -> dict: + ) -> dict[str, Any]: cont: Any = self.dict for k in key: if k not in cont: @@ -210,7 +295,7 @@ def get_or_create_nest( cont = cont[-1] if not isinstance(cont, dict): raise KeyError("There is no nest behind this key") - return cont + return cont # type: ignore[no-any-return] def append_nest_to_list(self, key: Key) -> None: cont = self.get_or_create_nest(key[:-1]) @@ -224,9 +309,10 @@ def append_nest_to_list(self, key: Key) -> None: cont[last_key] = [{}] -class Output(NamedTuple): - data: NestedDict - flags: Flags +class Output: + def __init__(self) -> None: + self.data = NestedDict() + self.flags = Flags() def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: @@ -251,12 +337,12 @@ def skip_until( except ValueError: new_pos = len(src) if error_on_eof: - raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None + raise TOMLDecodeError(f"Expected {expect!r}", src, new_pos) from None if not error_on.isdisjoint(src[pos:new_pos]): while src[pos] not in error_on: pos += 1 - raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") + raise TOMLDecodeError(f"Found invalid character {src[pos]!r}", src, pos) return new_pos @@ -287,15 +373,17 @@ def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: pos, key = parse_key(src, pos) if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot declare {key} twice") + raise TOMLDecodeError(f"Cannot declare {key} twice", src, pos) out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) try: out.data.get_or_create_nest(key) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None if not src.startswith("]", pos): - raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration") + raise TOMLDecodeError( + "Expected ']' at the end of a table declaration", src, pos + ) return pos + 1, key @@ -305,7 +393,7 @@ def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: pos, key = parse_key(src, pos) if out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") + raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos) # Free the namespace now that it points to another empty list item... out.flags.unset_all(key) # ...but this key precisely is still prohibited from table declaration @@ -313,17 +401,19 @@ def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: try: out.data.append_nest_to_list(key) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None if not src.startswith("]]", pos): - raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration") + raise TOMLDecodeError( + "Expected ']]' at the end of an array declaration", src, pos + ) return pos + 2, key def key_value_rule( src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat ) -> Pos: - pos, key, value = parse_key_value_pair(src, pos, parse_float) + pos, key, value = parse_key_value_pair(src, pos, parse_float, nest_lvl=0) key_parent, key_stem = key[:-1], key[-1] abs_key_parent = header + key_parent @@ -331,22 +421,22 @@ def key_value_rule( for cont_key in relative_path_cont_keys: # Check that dotted key syntax does not redefine an existing table if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): - raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}") + raise TOMLDecodeError(f"Cannot redefine namespace {cont_key}", src, pos) # Containers in the relative path can't be opened with the table syntax or # dotted key/value syntax in following table sections. out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) if out.flags.is_(abs_key_parent, Flags.FROZEN): - raise suffixed_err( - src, pos, f"Cannot mutate immutable namespace {abs_key_parent}" + raise TOMLDecodeError( + f"Cannot mutate immutable namespace {abs_key_parent}", src, pos ) try: nest = out.data.get_or_create_nest(abs_key_parent) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None if key_stem in nest: - raise suffixed_err(src, pos, "Cannot overwrite a value") + raise TOMLDecodeError("Cannot overwrite a value", src, pos) # Mark inline table and array namespaces recursively immutable if isinstance(value, (dict, list)): out.flags.set(header + key, Flags.FROZEN, recursive=True) @@ -355,7 +445,7 @@ def key_value_rule( def parse_key_value_pair( - src: str, pos: Pos, parse_float: ParseFloat + src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int ) -> tuple[Pos, Key, Any]: pos, key = parse_key(src, pos) try: @@ -363,10 +453,10 @@ def parse_key_value_pair( except IndexError: char = None if char != "=": - raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair") + raise TOMLDecodeError("Expected '=' after a key in a key/value pair", src, pos) pos += 1 pos = skip_chars(src, pos, TOML_WS) - pos, value = parse_value(src, pos, parse_float) + pos, value = parse_value(src, pos, parse_float, nest_lvl) return pos, key, value @@ -401,7 +491,7 @@ def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: return parse_literal_str(src, pos) if char == '"': return parse_one_line_basic_str(src, pos) - raise suffixed_err(src, pos, "Invalid initial character for a key part") + raise TOMLDecodeError("Invalid initial character for a key part", src, pos) def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: @@ -409,15 +499,17 @@ def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: return parse_basic_str(src, pos, multiline=False) -def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]: +def parse_array( + src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int +) -> tuple[Pos, list[Any]]: pos += 1 - array: list = [] + array: list[Any] = [] pos = skip_comments_and_array_ws(src, pos) if src.startswith("]", pos): return pos + 1, array while True: - pos, val = parse_value(src, pos, parse_float) + pos, val = parse_value(src, pos, parse_float, nest_lvl) array.append(val) pos = skip_comments_and_array_ws(src, pos) @@ -425,7 +517,7 @@ def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list] if c == "]": return pos + 1, array if c != ",": - raise suffixed_err(src, pos, "Unclosed array") + raise TOMLDecodeError("Unclosed array", src, pos) pos += 1 pos = skip_comments_and_array_ws(src, pos) @@ -433,36 +525,40 @@ def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list] return pos + 1, array -def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]: +def parse_inline_table( + src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int +) -> tuple[Pos, dict[str, Any]]: pos += 1 nested_dict = NestedDict() flags = Flags() - pos = skip_chars(src, pos, TOML_WS) + pos = skip_comments_and_array_ws(src, pos) if src.startswith("}", pos): return pos + 1, nested_dict.dict while True: - pos, key, value = parse_key_value_pair(src, pos, parse_float) + pos, key, value = parse_key_value_pair(src, pos, parse_float, nest_lvl) key_parent, key_stem = key[:-1], key[-1] if flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") + raise TOMLDecodeError(f"Cannot mutate immutable namespace {key}", src, pos) try: nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None + raise TOMLDecodeError("Cannot overwrite a value", src, pos) from None if key_stem in nest: - raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") + raise TOMLDecodeError(f"Duplicate inline table key {key_stem!r}", src, pos) nest[key_stem] = value - pos = skip_chars(src, pos, TOML_WS) + pos = skip_comments_and_array_ws(src, pos) c = src[pos : pos + 1] if c == "}": return pos + 1, nested_dict.dict if c != ",": - raise suffixed_err(src, pos, "Unclosed inline table") + raise TOMLDecodeError("Unclosed inline table", src, pos) + pos += 1 + pos = skip_comments_and_array_ws(src, pos) + if src.startswith("}", pos): + return pos + 1, nested_dict.dict if isinstance(value, (dict, list)): flags.set(key, Flags.FROZEN, recursive=True) - pos += 1 - pos = skip_chars(src, pos, TOML_WS) def parse_basic_str_escape( @@ -480,10 +576,12 @@ def parse_basic_str_escape( except IndexError: return pos, "" if char != "\n": - raise suffixed_err(src, pos, "Unescaped '\\' in a string") + raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) pos += 1 pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) return pos, "" + if escape_id == "\\x": + return parse_hex_char(src, pos, 2) if escape_id == "\\u": return parse_hex_char(src, pos, 4) if escape_id == "\\U": @@ -491,7 +589,7 @@ def parse_basic_str_escape( try: return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] except KeyError: - raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None + raise TOMLDecodeError("Unescaped '\\' in a string", src, pos) from None def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: @@ -501,11 +599,13 @@ def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: hex_str = src[pos : pos + hex_len] if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): - raise suffixed_err(src, pos, "Invalid hex value") + raise TOMLDecodeError("Invalid hex value", src, pos) pos += hex_len hex_int = int(hex_str, 16) if not is_unicode_scalar_value(hex_int): - raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") + raise TOMLDecodeError( + "Escaped character is not a Unicode scalar value", src, pos + ) return pos, chr(hex_int) @@ -562,7 +662,7 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: try: char = src[pos] except IndexError: - raise suffixed_err(src, pos, "Unterminated string") from None + raise TOMLDecodeError("Unterminated string", src, pos) from None if char == '"': if not multiline: return pos + 1, result + src[start_pos:pos] @@ -577,13 +677,21 @@ def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: start_pos = pos continue if char in error_on: - raise suffixed_err(src, pos, f"Illegal character {char!r}") + raise TOMLDecodeError(f"Illegal character {char!r}", src, pos) pos += 1 -def parse_value( # noqa: C901 - src: str, pos: Pos, parse_float: ParseFloat +def parse_value( + src: str, pos: Pos, parse_float: ParseFloat, nest_lvl: int ) -> tuple[Pos, Any]: + if nest_lvl > MAX_INLINE_NESTING: + # Pure Python should have raised RecursionError already. + # This ensures mypyc binaries eventually do the same. + raise RecursionError( # pragma: no cover + "TOML inline arrays/tables are nested more than the allowed" + f" {MAX_INLINE_NESTING} levels" + ) + try: char: str | None = src[pos] except IndexError: @@ -613,11 +721,11 @@ def parse_value( # noqa: C901 # Arrays if char == "[": - return parse_array(src, pos, parse_float) + return parse_array(src, pos, parse_float, nest_lvl + 1) # Inline tables if char == "{": - return parse_inline_table(src, pos, parse_float) + return parse_inline_table(src, pos, parse_float, nest_lvl + 1) # Dates and times datetime_match = RE_DATETIME.match(src, pos) @@ -625,7 +733,7 @@ def parse_value( # noqa: C901 try: datetime_obj = match_to_datetime(datetime_match) except ValueError as e: - raise suffixed_err(src, pos, "Invalid date or datetime") from e + raise TOMLDecodeError("Invalid date or datetime", src, pos) from e return datetime_match.end(), datetime_obj localtime_match = RE_LOCALTIME.match(src, pos) if localtime_match: @@ -646,24 +754,7 @@ def parse_value( # noqa: C901 if first_four in {"-inf", "+inf", "-nan", "+nan"}: return pos + 4, parse_float(first_four) - raise suffixed_err(src, pos, "Invalid value") - - -def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: - """Return a `TOMLDecodeError` where error message is suffixed with - coordinates in source.""" - - def coord_repr(src: str, pos: Pos) -> str: - if pos >= len(src): - return "end of document" - line = src.count("\n", 0, pos) + 1 - if line == 1: - column = pos + 1 - else: - column = pos - src.rindex("\n", 0, pos) - return f"line {line}, column {column}" - - return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") + raise TOMLDecodeError("Invalid value", src, pos) def is_unicode_scalar_value(codepoint: int) -> bool: @@ -679,7 +770,7 @@ def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: instead of returning illegal types. """ # The default `float` callable never returns illegal types. Optimize it. - if parse_float is float: # type: ignore[comparison-overlap] + if parse_float is float: return float def safe_parse_float(float_str: str) -> Any: diff --git a/setuptools/_vendor/tomli/_re.py b/setuptools/_vendor/tomli/_re.py index 994bb7493f..fc374ed63d 100644 --- a/setuptools/_vendor/tomli/_re.py +++ b/setuptools/_vendor/tomli/_re.py @@ -7,16 +7,23 @@ from datetime import date, datetime, time, timedelta, timezone, tzinfo from functools import lru_cache import re -from typing import Any -from ._types import ParseFloat +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Any, Final -# E.g. -# - 00:32:00.999999 -# - 00:32:00 -_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" + from ._types import ParseFloat -RE_NUMBER = re.compile( +_TIME_RE_STR: Final = r""" +([01][0-9]|2[0-3]) # hours +:([0-5][0-9]) # minutes +(?: + :([0-5][0-9]) # optional seconds + (?:\.([0-9]{1,6})[0-9]*)? # optional fractions of a second +)? +""" + +RE_NUMBER: Final = re.compile( r""" 0 (?: @@ -35,8 +42,8 @@ """, flags=re.VERBOSE, ) -RE_LOCALTIME = re.compile(_TIME_RE_STR) -RE_DATETIME = re.compile( +RE_LOCALTIME: Final = re.compile(_TIME_RE_STR, flags=re.VERBOSE) +RE_DATETIME: Final = re.compile( rf""" ([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 (?: @@ -49,7 +56,7 @@ ) -def match_to_datetime(match: re.Match) -> datetime | date: +def match_to_datetime(match: re.Match[str]) -> datetime | date: """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. Raises ValueError if the match does not correspond to a valid date @@ -71,7 +78,8 @@ def match_to_datetime(match: re.Match) -> datetime | date: year, month, day = int(year_str), int(month_str), int(day_str) if hour_str is None: return date(year, month, day) - hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) + hour, minute = int(hour_str), int(minute_str) + sec = int(sec_str) if sec_str else 0 micros = int(micros_str.ljust(6, "0")) if micros_str else 0 if offset_sign_str: tz: tzinfo | None = cached_tz( @@ -84,6 +92,9 @@ def match_to_datetime(match: re.Match) -> datetime | date: return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) +# No need to limit cache size. This is only ever called on input +# that matched RE_DATETIME, so there is an implicit bound of +# 24 (hours) * 60 (minutes) * 2 (offset direction) = 2880. @lru_cache(maxsize=None) def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: sign = 1 if sign_str == "+" else -1 @@ -95,13 +106,14 @@ def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: ) -def match_to_localtime(match: re.Match) -> time: +def match_to_localtime(match: re.Match[str]) -> time: hour_str, minute_str, sec_str, micros_str = match.groups() + sec = int(sec_str) if sec_str else 0 micros = int(micros_str.ljust(6, "0")) if micros_str else 0 - return time(int(hour_str), int(minute_str), int(sec_str), micros) + return time(int(hour_str), int(minute_str), sec, micros) -def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any: +def match_to_number(match: re.Match[str], parse_float: ParseFloat) -> Any: if match.group("floatpart"): return parse_float(match.group()) return int(match.group(), 0) diff --git a/setuptools/_vendor/typeguard-4.3.0.dist-info/INSTALLER b/setuptools/_vendor/typeguard-4.3.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/typeguard-4.3.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/typeguard-4.3.0.dist-info/LICENSE b/setuptools/_vendor/typeguard-4.3.0.dist-info/LICENSE deleted file mode 100644 index 07806f8af9..0000000000 --- a/setuptools/_vendor/typeguard-4.3.0.dist-info/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -This is the MIT license: http://www.opensource.org/licenses/mit-license.php - -Copyright (c) Alex Grönholm - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons -to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR -PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE -FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/setuptools/_vendor/typeguard-4.3.0.dist-info/METADATA b/setuptools/_vendor/typeguard-4.3.0.dist-info/METADATA deleted file mode 100644 index 6e5750b485..0000000000 --- a/setuptools/_vendor/typeguard-4.3.0.dist-info/METADATA +++ /dev/null @@ -1,81 +0,0 @@ -Metadata-Version: 2.1 -Name: typeguard -Version: 4.3.0 -Summary: Run-time type checker for Python -Author-email: Alex Grönholm -License: MIT -Project-URL: Documentation, https://typeguard.readthedocs.io/en/latest/ -Project-URL: Change log, https://typeguard.readthedocs.io/en/latest/versionhistory.html -Project-URL: Source code, https://github.com/agronholm/typeguard -Project-URL: Issue tracker, https://github.com/agronholm/typeguard/issues -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: 3.11 -Classifier: Programming Language :: Python :: 3.12 -Requires-Python: >=3.8 -Description-Content-Type: text/x-rst -License-File: LICENSE -Requires-Dist: typing-extensions >=4.10.0 -Requires-Dist: importlib-metadata >=3.6 ; python_version < "3.10" -Provides-Extra: doc -Requires-Dist: packaging ; extra == 'doc' -Requires-Dist: Sphinx >=7 ; extra == 'doc' -Requires-Dist: sphinx-autodoc-typehints >=1.2.0 ; extra == 'doc' -Requires-Dist: sphinx-rtd-theme >=1.3.0 ; extra == 'doc' -Provides-Extra: test -Requires-Dist: coverage[toml] >=7 ; extra == 'test' -Requires-Dist: pytest >=7 ; extra == 'test' -Requires-Dist: mypy >=1.2.0 ; (platform_python_implementation != "PyPy") and extra == 'test' - -.. image:: https://github.com/agronholm/typeguard/actions/workflows/test.yml/badge.svg - :target: https://github.com/agronholm/typeguard/actions/workflows/test.yml - :alt: Build Status -.. image:: https://coveralls.io/repos/agronholm/typeguard/badge.svg?branch=master&service=github - :target: https://coveralls.io/github/agronholm/typeguard?branch=master - :alt: Code Coverage -.. image:: https://readthedocs.org/projects/typeguard/badge/?version=latest - :target: https://typeguard.readthedocs.io/en/latest/?badge=latest - :alt: Documentation - -This library provides run-time type checking for functions defined with -`PEP 484 `_ argument (and return) type -annotations, and any arbitrary objects. It can be used together with static type -checkers as an additional layer of type safety, to catch type violations that could only -be detected at run time. - -Two principal ways to do type checking are provided: - -#. The ``check_type`` function: - - * like ``isinstance()``, but supports arbitrary type annotations (within limits) - * can be used as a ``cast()`` replacement, but with actual checking of the value -#. Code instrumentation: - - * entire modules, or individual functions (via ``@typechecked``) are recompiled, with - type checking code injected into them - * automatically checks function arguments, return values and assignments to annotated - local variables - * for generator functions (regular and async), checks yield and send values - * requires the original source code of the instrumented module(s) to be accessible - -Two options are provided for code instrumentation: - -#. the ``@typechecked`` function: - - * can be applied to functions individually -#. the import hook (``typeguard.install_import_hook()``): - - * automatically instruments targeted modules on import - * no manual code changes required in the target modules - * requires the import hook to be installed before the targeted modules are imported - * may clash with other import hooks - -See the documentation_ for further information. - -.. _documentation: https://typeguard.readthedocs.io/en/latest/ diff --git a/setuptools/_vendor/typeguard-4.3.0.dist-info/RECORD b/setuptools/_vendor/typeguard-4.3.0.dist-info/RECORD deleted file mode 100644 index 801e73347c..0000000000 --- a/setuptools/_vendor/typeguard-4.3.0.dist-info/RECORD +++ /dev/null @@ -1,34 +0,0 @@ -typeguard-4.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -typeguard-4.3.0.dist-info/LICENSE,sha256=YWP3mH37ONa8MgzitwsvArhivEESZRbVUu8c1DJH51g,1130 -typeguard-4.3.0.dist-info/METADATA,sha256=z2dcHAp0TwhYCFU5Deh8x31nazElgujUz9tbuP0pjSE,3717 -typeguard-4.3.0.dist-info/RECORD,, -typeguard-4.3.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 -typeguard-4.3.0.dist-info/entry_points.txt,sha256=qp7NQ1aLtiSgMQqo6gWlfGpy0IIXzoMJmeQTLpzqFZQ,48 -typeguard-4.3.0.dist-info/top_level.txt,sha256=4z28AhuDodwRS_c1J_l8H51t5QuwfTseskYzlxp6grs,10 -typeguard/__init__.py,sha256=Onh4w38elPCjtlcU3JY9k3h70NjsxXIkAflmQn-Z0FY,2071 -typeguard/__pycache__/__init__.cpython-312.pyc,, -typeguard/__pycache__/_checkers.cpython-312.pyc,, -typeguard/__pycache__/_config.cpython-312.pyc,, -typeguard/__pycache__/_decorators.cpython-312.pyc,, -typeguard/__pycache__/_exceptions.cpython-312.pyc,, -typeguard/__pycache__/_functions.cpython-312.pyc,, -typeguard/__pycache__/_importhook.cpython-312.pyc,, -typeguard/__pycache__/_memo.cpython-312.pyc,, -typeguard/__pycache__/_pytest_plugin.cpython-312.pyc,, -typeguard/__pycache__/_suppression.cpython-312.pyc,, -typeguard/__pycache__/_transformer.cpython-312.pyc,, -typeguard/__pycache__/_union_transformer.cpython-312.pyc,, -typeguard/__pycache__/_utils.cpython-312.pyc,, -typeguard/_checkers.py,sha256=JRrgKicdOEfIBoNEtegYCEIlhpad-a1u1Em7GCj0WCI,31360 -typeguard/_config.py,sha256=nIz8QwDa-oFO3L9O8_6srzlmd99pSby2wOM4Wb7F_B0,2846 -typeguard/_decorators.py,sha256=v6dsIeWvPhExGLP_wXF-RmDUyjZf_Ak28g7gBJ_v0-0,9033 -typeguard/_exceptions.py,sha256=ZIPeiV-FBd5Emw2EaWd2Fvlsrwi4ocwT2fVGBIAtHcQ,1121 -typeguard/_functions.py,sha256=ibgSAKa5ptIm1eR9ARG0BSozAFJPFNASZqhPVyQeqig,10393 -typeguard/_importhook.py,sha256=ugjCDvFcdWMU7UugqlJG91IpVNpEIxtRr-99s0h1k7M,6389 -typeguard/_memo.py,sha256=1juQV_vxnD2JYKbSrebiQuj4oKHz6n67v9pYA-CCISg,1303 -typeguard/_pytest_plugin.py,sha256=-fcSqkv54rIfIF8pDavY5YQPkj4OX8GMt_lL7CQSD4I,4416 -typeguard/_suppression.py,sha256=VQfzxcwIbu3if0f7VBkKM7hkYOA7tNFw9a7jMBsmMg4,2266 -typeguard/_transformer.py,sha256=9Ha7_QhdwoUni_6hvdY-hZbuEergowHrNL2vzHIakFY,44937 -typeguard/_union_transformer.py,sha256=v_42r7-6HuRX2SoFwnyJ-E5PlxXpVeUJPJR1-HU9qSo,1354 -typeguard/_utils.py,sha256=5HhO1rPn5f1M6ymkVAEv7Xmlz1cX-j0OnTMlyHqqrR8,5270 -typeguard/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/setuptools/_vendor/typeguard-4.3.0.dist-info/WHEEL b/setuptools/_vendor/typeguard-4.3.0.dist-info/WHEEL deleted file mode 100644 index bab98d6758..0000000000 --- a/setuptools/_vendor/typeguard-4.3.0.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.43.0) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/setuptools/_vendor/typeguard-4.3.0.dist-info/entry_points.txt b/setuptools/_vendor/typeguard-4.3.0.dist-info/entry_points.txt deleted file mode 100644 index 47c9d0bd91..0000000000 --- a/setuptools/_vendor/typeguard-4.3.0.dist-info/entry_points.txt +++ /dev/null @@ -1,2 +0,0 @@ -[pytest11] -typeguard = typeguard._pytest_plugin diff --git a/setuptools/_vendor/typeguard-4.3.0.dist-info/top_level.txt b/setuptools/_vendor/typeguard-4.3.0.dist-info/top_level.txt deleted file mode 100644 index be5ec23ea2..0000000000 --- a/setuptools/_vendor/typeguard-4.3.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -typeguard diff --git a/setuptools/_vendor/typeguard/__init__.py b/setuptools/_vendor/typeguard/__init__.py deleted file mode 100644 index 6781cad094..0000000000 --- a/setuptools/_vendor/typeguard/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -from typing import Any - -from ._checkers import TypeCheckerCallable as TypeCheckerCallable -from ._checkers import TypeCheckLookupCallback as TypeCheckLookupCallback -from ._checkers import check_type_internal as check_type_internal -from ._checkers import checker_lookup_functions as checker_lookup_functions -from ._checkers import load_plugins as load_plugins -from ._config import CollectionCheckStrategy as CollectionCheckStrategy -from ._config import ForwardRefPolicy as ForwardRefPolicy -from ._config import TypeCheckConfiguration as TypeCheckConfiguration -from ._decorators import typechecked as typechecked -from ._decorators import typeguard_ignore as typeguard_ignore -from ._exceptions import InstrumentationWarning as InstrumentationWarning -from ._exceptions import TypeCheckError as TypeCheckError -from ._exceptions import TypeCheckWarning as TypeCheckWarning -from ._exceptions import TypeHintWarning as TypeHintWarning -from ._functions import TypeCheckFailCallback as TypeCheckFailCallback -from ._functions import check_type as check_type -from ._functions import warn_on_error as warn_on_error -from ._importhook import ImportHookManager as ImportHookManager -from ._importhook import TypeguardFinder as TypeguardFinder -from ._importhook import install_import_hook as install_import_hook -from ._memo import TypeCheckMemo as TypeCheckMemo -from ._suppression import suppress_type_checks as suppress_type_checks -from ._utils import Unset as Unset - -# Re-export imports so they look like they live directly in this package -for value in list(locals().values()): - if getattr(value, "__module__", "").startswith(f"{__name__}."): - value.__module__ = __name__ - - -config: TypeCheckConfiguration - - -def __getattr__(name: str) -> Any: - if name == "config": - from ._config import global_config - - return global_config - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") - - -# Automatically load checker lookup functions unless explicitly disabled -if "TYPEGUARD_DISABLE_PLUGIN_AUTOLOAD" not in os.environ: - load_plugins() diff --git a/setuptools/_vendor/typeguard/_checkers.py b/setuptools/_vendor/typeguard/_checkers.py deleted file mode 100644 index 67dd5ad4dc..0000000000 --- a/setuptools/_vendor/typeguard/_checkers.py +++ /dev/null @@ -1,993 +0,0 @@ -from __future__ import annotations - -import collections.abc -import inspect -import sys -import types -import typing -import warnings -from enum import Enum -from inspect import Parameter, isclass, isfunction -from io import BufferedIOBase, IOBase, RawIOBase, TextIOBase -from textwrap import indent -from typing import ( - IO, - AbstractSet, - Any, - BinaryIO, - Callable, - Dict, - ForwardRef, - List, - Mapping, - MutableMapping, - NewType, - Optional, - Sequence, - Set, - TextIO, - Tuple, - Type, - TypeVar, - Union, -) -from unittest.mock import Mock -from weakref import WeakKeyDictionary - -try: - import typing_extensions -except ImportError: - typing_extensions = None # type: ignore[assignment] - -# Must use this because typing.is_typeddict does not recognize -# TypedDict from typing_extensions, and as of version 4.12.0 -# typing_extensions.TypedDict is different from typing.TypedDict -# on all versions. -from typing_extensions import is_typeddict - -from ._config import ForwardRefPolicy -from ._exceptions import TypeCheckError, TypeHintWarning -from ._memo import TypeCheckMemo -from ._utils import evaluate_forwardref, get_stacklevel, get_type_name, qualified_name - -if sys.version_info >= (3, 11): - from typing import ( - Annotated, - NotRequired, - TypeAlias, - get_args, - get_origin, - ) - - SubclassableAny = Any -else: - from typing_extensions import ( - Annotated, - NotRequired, - TypeAlias, - get_args, - get_origin, - ) - from typing_extensions import Any as SubclassableAny - -if sys.version_info >= (3, 10): - from importlib.metadata import entry_points - from typing import ParamSpec -else: - from importlib_metadata import entry_points - from typing_extensions import ParamSpec - -TypeCheckerCallable: TypeAlias = Callable[ - [Any, Any, Tuple[Any, ...], TypeCheckMemo], Any -] -TypeCheckLookupCallback: TypeAlias = Callable[ - [Any, Tuple[Any, ...], Tuple[Any, ...]], Optional[TypeCheckerCallable] -] - -checker_lookup_functions: list[TypeCheckLookupCallback] = [] -generic_alias_types: tuple[type, ...] = (type(List), type(List[Any])) -if sys.version_info >= (3, 9): - generic_alias_types += (types.GenericAlias,) - -protocol_check_cache: WeakKeyDictionary[ - type[Any], dict[type[Any], TypeCheckError | None] -] = WeakKeyDictionary() - -# Sentinel -_missing = object() - -# Lifted from mypy.sharedparse -BINARY_MAGIC_METHODS = { - "__add__", - "__and__", - "__cmp__", - "__divmod__", - "__div__", - "__eq__", - "__floordiv__", - "__ge__", - "__gt__", - "__iadd__", - "__iand__", - "__idiv__", - "__ifloordiv__", - "__ilshift__", - "__imatmul__", - "__imod__", - "__imul__", - "__ior__", - "__ipow__", - "__irshift__", - "__isub__", - "__itruediv__", - "__ixor__", - "__le__", - "__lshift__", - "__lt__", - "__matmul__", - "__mod__", - "__mul__", - "__ne__", - "__or__", - "__pow__", - "__radd__", - "__rand__", - "__rdiv__", - "__rfloordiv__", - "__rlshift__", - "__rmatmul__", - "__rmod__", - "__rmul__", - "__ror__", - "__rpow__", - "__rrshift__", - "__rshift__", - "__rsub__", - "__rtruediv__", - "__rxor__", - "__sub__", - "__truediv__", - "__xor__", -} - - -def check_callable( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not callable(value): - raise TypeCheckError("is not callable") - - if args: - try: - signature = inspect.signature(value) - except (TypeError, ValueError): - return - - argument_types = args[0] - if isinstance(argument_types, list) and not any( - type(item) is ParamSpec for item in argument_types - ): - # The callable must not have keyword-only arguments without defaults - unfulfilled_kwonlyargs = [ - param.name - for param in signature.parameters.values() - if param.kind == Parameter.KEYWORD_ONLY - and param.default == Parameter.empty - ] - if unfulfilled_kwonlyargs: - raise TypeCheckError( - f"has mandatory keyword-only arguments in its declaration: " - f'{", ".join(unfulfilled_kwonlyargs)}' - ) - - num_positional_args = num_mandatory_pos_args = 0 - has_varargs = False - for param in signature.parameters.values(): - if param.kind in ( - Parameter.POSITIONAL_ONLY, - Parameter.POSITIONAL_OR_KEYWORD, - ): - num_positional_args += 1 - if param.default is Parameter.empty: - num_mandatory_pos_args += 1 - elif param.kind == Parameter.VAR_POSITIONAL: - has_varargs = True - - if num_mandatory_pos_args > len(argument_types): - raise TypeCheckError( - f"has too many mandatory positional arguments in its declaration; " - f"expected {len(argument_types)} but {num_mandatory_pos_args} " - f"mandatory positional argument(s) declared" - ) - elif not has_varargs and num_positional_args < len(argument_types): - raise TypeCheckError( - f"has too few arguments in its declaration; expected " - f"{len(argument_types)} but {num_positional_args} argument(s) " - f"declared" - ) - - -def check_mapping( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if origin_type is Dict or origin_type is dict: - if not isinstance(value, dict): - raise TypeCheckError("is not a dict") - if origin_type is MutableMapping or origin_type is collections.abc.MutableMapping: - if not isinstance(value, collections.abc.MutableMapping): - raise TypeCheckError("is not a mutable mapping") - elif not isinstance(value, collections.abc.Mapping): - raise TypeCheckError("is not a mapping") - - if args: - key_type, value_type = args - if key_type is not Any or value_type is not Any: - samples = memo.config.collection_check_strategy.iterate_samples( - value.items() - ) - for k, v in samples: - try: - check_type_internal(k, key_type, memo) - except TypeCheckError as exc: - exc.append_path_element(f"key {k!r}") - raise - - try: - check_type_internal(v, value_type, memo) - except TypeCheckError as exc: - exc.append_path_element(f"value of key {k!r}") - raise - - -def check_typed_dict( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not isinstance(value, dict): - raise TypeCheckError("is not a dict") - - declared_keys = frozenset(origin_type.__annotations__) - if hasattr(origin_type, "__required_keys__"): - required_keys = set(origin_type.__required_keys__) - else: # py3.8 and lower - required_keys = set(declared_keys) if origin_type.__total__ else set() - - existing_keys = set(value) - extra_keys = existing_keys - declared_keys - if extra_keys: - keys_formatted = ", ".join(f'"{key}"' for key in sorted(extra_keys, key=repr)) - raise TypeCheckError(f"has unexpected extra key(s): {keys_formatted}") - - # Detect NotRequired fields which are hidden by get_type_hints() - type_hints: dict[str, type] = {} - for key, annotation in origin_type.__annotations__.items(): - if isinstance(annotation, ForwardRef): - annotation = evaluate_forwardref(annotation, memo) - if get_origin(annotation) is NotRequired: - required_keys.discard(key) - annotation = get_args(annotation)[0] - - type_hints[key] = annotation - - missing_keys = required_keys - existing_keys - if missing_keys: - keys_formatted = ", ".join(f'"{key}"' for key in sorted(missing_keys, key=repr)) - raise TypeCheckError(f"is missing required key(s): {keys_formatted}") - - for key, argtype in type_hints.items(): - argvalue = value.get(key, _missing) - if argvalue is not _missing: - try: - check_type_internal(argvalue, argtype, memo) - except TypeCheckError as exc: - exc.append_path_element(f"value of key {key!r}") - raise - - -def check_list( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not isinstance(value, list): - raise TypeCheckError("is not a list") - - if args and args != (Any,): - samples = memo.config.collection_check_strategy.iterate_samples(value) - for i, v in enumerate(samples): - try: - check_type_internal(v, args[0], memo) - except TypeCheckError as exc: - exc.append_path_element(f"item {i}") - raise - - -def check_sequence( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not isinstance(value, collections.abc.Sequence): - raise TypeCheckError("is not a sequence") - - if args and args != (Any,): - samples = memo.config.collection_check_strategy.iterate_samples(value) - for i, v in enumerate(samples): - try: - check_type_internal(v, args[0], memo) - except TypeCheckError as exc: - exc.append_path_element(f"item {i}") - raise - - -def check_set( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if origin_type is frozenset: - if not isinstance(value, frozenset): - raise TypeCheckError("is not a frozenset") - elif not isinstance(value, AbstractSet): - raise TypeCheckError("is not a set") - - if args and args != (Any,): - samples = memo.config.collection_check_strategy.iterate_samples(value) - for v in samples: - try: - check_type_internal(v, args[0], memo) - except TypeCheckError as exc: - exc.append_path_element(f"[{v}]") - raise - - -def check_tuple( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - # Specialized check for NamedTuples - if field_types := getattr(origin_type, "__annotations__", None): - if not isinstance(value, origin_type): - raise TypeCheckError( - f"is not a named tuple of type {qualified_name(origin_type)}" - ) - - for name, field_type in field_types.items(): - try: - check_type_internal(getattr(value, name), field_type, memo) - except TypeCheckError as exc: - exc.append_path_element(f"attribute {name!r}") - raise - - return - elif not isinstance(value, tuple): - raise TypeCheckError("is not a tuple") - - if args: - use_ellipsis = args[-1] is Ellipsis - tuple_params = args[: -1 if use_ellipsis else None] - else: - # Unparametrized Tuple or plain tuple - return - - if use_ellipsis: - element_type = tuple_params[0] - samples = memo.config.collection_check_strategy.iterate_samples(value) - for i, element in enumerate(samples): - try: - check_type_internal(element, element_type, memo) - except TypeCheckError as exc: - exc.append_path_element(f"item {i}") - raise - elif tuple_params == ((),): - if value != (): - raise TypeCheckError("is not an empty tuple") - else: - if len(value) != len(tuple_params): - raise TypeCheckError( - f"has wrong number of elements (expected {len(tuple_params)}, got " - f"{len(value)} instead)" - ) - - for i, (element, element_type) in enumerate(zip(value, tuple_params)): - try: - check_type_internal(element, element_type, memo) - except TypeCheckError as exc: - exc.append_path_element(f"item {i}") - raise - - -def check_union( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - errors: dict[str, TypeCheckError] = {} - try: - for type_ in args: - try: - check_type_internal(value, type_, memo) - return - except TypeCheckError as exc: - errors[get_type_name(type_)] = exc - - formatted_errors = indent( - "\n".join(f"{key}: {error}" for key, error in errors.items()), " " - ) - finally: - del errors # avoid creating ref cycle - raise TypeCheckError(f"did not match any element in the union:\n{formatted_errors}") - - -def check_uniontype( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - errors: dict[str, TypeCheckError] = {} - for type_ in args: - try: - check_type_internal(value, type_, memo) - return - except TypeCheckError as exc: - errors[get_type_name(type_)] = exc - - formatted_errors = indent( - "\n".join(f"{key}: {error}" for key, error in errors.items()), " " - ) - raise TypeCheckError(f"did not match any element in the union:\n{formatted_errors}") - - -def check_class( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not isclass(value) and not isinstance(value, generic_alias_types): - raise TypeCheckError("is not a class") - - if not args: - return - - if isinstance(args[0], ForwardRef): - expected_class = evaluate_forwardref(args[0], memo) - else: - expected_class = args[0] - - if expected_class is Any: - return - elif getattr(expected_class, "_is_protocol", False): - check_protocol(value, expected_class, (), memo) - elif isinstance(expected_class, TypeVar): - check_typevar(value, expected_class, (), memo, subclass_check=True) - elif get_origin(expected_class) is Union: - errors: dict[str, TypeCheckError] = {} - for arg in get_args(expected_class): - if arg is Any: - return - - try: - check_class(value, type, (arg,), memo) - return - except TypeCheckError as exc: - errors[get_type_name(arg)] = exc - else: - formatted_errors = indent( - "\n".join(f"{key}: {error}" for key, error in errors.items()), " " - ) - raise TypeCheckError( - f"did not match any element in the union:\n{formatted_errors}" - ) - elif not issubclass(value, expected_class): # type: ignore[arg-type] - raise TypeCheckError(f"is not a subclass of {qualified_name(expected_class)}") - - -def check_newtype( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - check_type_internal(value, origin_type.__supertype__, memo) - - -def check_instance( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not isinstance(value, origin_type): - raise TypeCheckError(f"is not an instance of {qualified_name(origin_type)}") - - -def check_typevar( - value: Any, - origin_type: TypeVar, - args: tuple[Any, ...], - memo: TypeCheckMemo, - *, - subclass_check: bool = False, -) -> None: - if origin_type.__bound__ is not None: - annotation = ( - Type[origin_type.__bound__] if subclass_check else origin_type.__bound__ - ) - check_type_internal(value, annotation, memo) - elif origin_type.__constraints__: - for constraint in origin_type.__constraints__: - annotation = Type[constraint] if subclass_check else constraint - try: - check_type_internal(value, annotation, memo) - except TypeCheckError: - pass - else: - break - else: - formatted_constraints = ", ".join( - get_type_name(constraint) for constraint in origin_type.__constraints__ - ) - raise TypeCheckError( - f"does not match any of the constraints " f"({formatted_constraints})" - ) - - -if typing_extensions is None: - - def _is_literal_type(typ: object) -> bool: - return typ is typing.Literal - -else: - - def _is_literal_type(typ: object) -> bool: - return typ is typing.Literal or typ is typing_extensions.Literal - - -def check_literal( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - def get_literal_args(literal_args: tuple[Any, ...]) -> tuple[Any, ...]: - retval: list[Any] = [] - for arg in literal_args: - if _is_literal_type(get_origin(arg)): - retval.extend(get_literal_args(arg.__args__)) - elif arg is None or isinstance(arg, (int, str, bytes, bool, Enum)): - retval.append(arg) - else: - raise TypeError( - f"Illegal literal value: {arg}" - ) # TypeError here is deliberate - - return tuple(retval) - - final_args = tuple(get_literal_args(args)) - try: - index = final_args.index(value) - except ValueError: - pass - else: - if type(final_args[index]) is type(value): - return - - formatted_args = ", ".join(repr(arg) for arg in final_args) - raise TypeCheckError(f"is not any of ({formatted_args})") from None - - -def check_literal_string( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - check_type_internal(value, str, memo) - - -def check_typeguard( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - check_type_internal(value, bool, memo) - - -def check_none( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if value is not None: - raise TypeCheckError("is not None") - - -def check_number( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if origin_type is complex and not isinstance(value, (complex, float, int)): - raise TypeCheckError("is neither complex, float or int") - elif origin_type is float and not isinstance(value, (float, int)): - raise TypeCheckError("is neither float or int") - - -def check_io( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if origin_type is TextIO or (origin_type is IO and args == (str,)): - if not isinstance(value, TextIOBase): - raise TypeCheckError("is not a text based I/O object") - elif origin_type is BinaryIO or (origin_type is IO and args == (bytes,)): - if not isinstance(value, (RawIOBase, BufferedIOBase)): - raise TypeCheckError("is not a binary I/O object") - elif not isinstance(value, IOBase): - raise TypeCheckError("is not an I/O object") - - -def check_protocol( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - subject: type[Any] = value if isclass(value) else type(value) - - if subject in protocol_check_cache: - result_map = protocol_check_cache[subject] - if origin_type in result_map: - if exc := result_map[origin_type]: - raise exc - else: - return - - # Collect a set of methods and non-method attributes present in the protocol - ignored_attrs = set(dir(typing.Protocol)) | { - "__annotations__", - "__non_callable_proto_members__", - } - expected_methods: dict[str, tuple[Any, Any]] = {} - expected_noncallable_members: dict[str, Any] = {} - for attrname in dir(origin_type): - # Skip attributes present in typing.Protocol - if attrname in ignored_attrs: - continue - - member = getattr(origin_type, attrname) - if callable(member): - signature = inspect.signature(member) - argtypes = [ - (p.annotation if p.annotation is not Parameter.empty else Any) - for p in signature.parameters.values() - if p.kind is not Parameter.KEYWORD_ONLY - ] or Ellipsis - return_annotation = ( - signature.return_annotation - if signature.return_annotation is not Parameter.empty - else Any - ) - expected_methods[attrname] = argtypes, return_annotation - else: - expected_noncallable_members[attrname] = member - - for attrname, annotation in typing.get_type_hints(origin_type).items(): - expected_noncallable_members[attrname] = annotation - - subject_annotations = typing.get_type_hints(subject) - - # Check that all required methods are present and their signatures are compatible - result_map = protocol_check_cache.setdefault(subject, {}) - try: - for attrname, callable_args in expected_methods.items(): - try: - method = getattr(subject, attrname) - except AttributeError: - if attrname in subject_annotations: - raise TypeCheckError( - f"is not compatible with the {origin_type.__qualname__} protocol " - f"because its {attrname!r} attribute is not a method" - ) from None - else: - raise TypeCheckError( - f"is not compatible with the {origin_type.__qualname__} protocol " - f"because it has no method named {attrname!r}" - ) from None - - if not callable(method): - raise TypeCheckError( - f"is not compatible with the {origin_type.__qualname__} protocol " - f"because its {attrname!r} attribute is not a callable" - ) - - # TODO: raise exception on added keyword-only arguments without defaults - try: - check_callable(method, Callable, callable_args, memo) - except TypeCheckError as exc: - raise TypeCheckError( - f"is not compatible with the {origin_type.__qualname__} protocol " - f"because its {attrname!r} method {exc}" - ) from None - - # Check that all required non-callable members are present - for attrname in expected_noncallable_members: - # TODO: implement assignability checks for non-callable members - if attrname not in subject_annotations and not hasattr(subject, attrname): - raise TypeCheckError( - f"is not compatible with the {origin_type.__qualname__} protocol " - f"because it has no attribute named {attrname!r}" - ) - except TypeCheckError as exc: - result_map[origin_type] = exc - raise - else: - result_map[origin_type] = None - - -def check_byteslike( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not isinstance(value, (bytearray, bytes, memoryview)): - raise TypeCheckError("is not bytes-like") - - -def check_self( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if memo.self_type is None: - raise TypeCheckError("cannot be checked against Self outside of a method call") - - if isclass(value): - if not issubclass(value, memo.self_type): - raise TypeCheckError( - f"is not an instance of the self type " - f"({qualified_name(memo.self_type)})" - ) - elif not isinstance(value, memo.self_type): - raise TypeCheckError( - f"is not an instance of the self type ({qualified_name(memo.self_type)})" - ) - - -def check_paramspec( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - pass # No-op for now - - -def check_instanceof( - value: Any, - origin_type: Any, - args: tuple[Any, ...], - memo: TypeCheckMemo, -) -> None: - if not isinstance(value, origin_type): - raise TypeCheckError(f"is not an instance of {qualified_name(origin_type)}") - - -def check_type_internal( - value: Any, - annotation: Any, - memo: TypeCheckMemo, -) -> None: - """ - Check that the given object is compatible with the given type annotation. - - This function should only be used by type checker callables. Applications should use - :func:`~.check_type` instead. - - :param value: the value to check - :param annotation: the type annotation to check against - :param memo: a memo object containing configuration and information necessary for - looking up forward references - """ - - if isinstance(annotation, ForwardRef): - try: - annotation = evaluate_forwardref(annotation, memo) - except NameError: - if memo.config.forward_ref_policy is ForwardRefPolicy.ERROR: - raise - elif memo.config.forward_ref_policy is ForwardRefPolicy.WARN: - warnings.warn( - f"Cannot resolve forward reference {annotation.__forward_arg__!r}", - TypeHintWarning, - stacklevel=get_stacklevel(), - ) - - return - - if annotation is Any or annotation is SubclassableAny or isinstance(value, Mock): - return - - # Skip type checks if value is an instance of a class that inherits from Any - if not isclass(value) and SubclassableAny in type(value).__bases__: - return - - extras: tuple[Any, ...] - origin_type = get_origin(annotation) - if origin_type is Annotated: - annotation, *extras_ = get_args(annotation) - extras = tuple(extras_) - origin_type = get_origin(annotation) - else: - extras = () - - if origin_type is not None: - args = get_args(annotation) - - # Compatibility hack to distinguish between unparametrized and empty tuple - # (tuple[()]), necessary due to https://github.com/python/cpython/issues/91137 - if origin_type in (tuple, Tuple) and annotation is not Tuple and not args: - args = ((),) - else: - origin_type = annotation - args = () - - for lookup_func in checker_lookup_functions: - checker = lookup_func(origin_type, args, extras) - if checker: - checker(value, origin_type, args, memo) - return - - if isclass(origin_type): - if not isinstance(value, origin_type): - raise TypeCheckError(f"is not an instance of {qualified_name(origin_type)}") - elif type(origin_type) is str: # noqa: E721 - warnings.warn( - f"Skipping type check against {origin_type!r}; this looks like a " - f"string-form forward reference imported from another module", - TypeHintWarning, - stacklevel=get_stacklevel(), - ) - - -# Equality checks are applied to these -origin_type_checkers = { - bytes: check_byteslike, - AbstractSet: check_set, - BinaryIO: check_io, - Callable: check_callable, - collections.abc.Callable: check_callable, - complex: check_number, - dict: check_mapping, - Dict: check_mapping, - float: check_number, - frozenset: check_set, - IO: check_io, - list: check_list, - List: check_list, - typing.Literal: check_literal, - Mapping: check_mapping, - MutableMapping: check_mapping, - None: check_none, - collections.abc.Mapping: check_mapping, - collections.abc.MutableMapping: check_mapping, - Sequence: check_sequence, - collections.abc.Sequence: check_sequence, - collections.abc.Set: check_set, - set: check_set, - Set: check_set, - TextIO: check_io, - tuple: check_tuple, - Tuple: check_tuple, - type: check_class, - Type: check_class, - Union: check_union, -} -if sys.version_info >= (3, 10): - origin_type_checkers[types.UnionType] = check_uniontype - origin_type_checkers[typing.TypeGuard] = check_typeguard -if sys.version_info >= (3, 11): - origin_type_checkers.update( - {typing.LiteralString: check_literal_string, typing.Self: check_self} - ) -if typing_extensions is not None: - # On some Python versions, these may simply be re-exports from typing, - # but exactly which Python versions is subject to change, - # so it's best to err on the safe side - # and update the dictionary on all Python versions - # if typing_extensions is installed - origin_type_checkers[typing_extensions.Literal] = check_literal - origin_type_checkers[typing_extensions.LiteralString] = check_literal_string - origin_type_checkers[typing_extensions.Self] = check_self - origin_type_checkers[typing_extensions.TypeGuard] = check_typeguard - - -def builtin_checker_lookup( - origin_type: Any, args: tuple[Any, ...], extras: tuple[Any, ...] -) -> TypeCheckerCallable | None: - checker = origin_type_checkers.get(origin_type) - if checker is not None: - return checker - elif is_typeddict(origin_type): - return check_typed_dict - elif isclass(origin_type) and issubclass( - origin_type, - Tuple, # type: ignore[arg-type] - ): - # NamedTuple - return check_tuple - elif getattr(origin_type, "_is_protocol", False): - return check_protocol - elif isinstance(origin_type, ParamSpec): - return check_paramspec - elif isinstance(origin_type, TypeVar): - return check_typevar - elif origin_type.__class__ is NewType: - # typing.NewType on Python 3.10+ - return check_newtype - elif ( - isfunction(origin_type) - and getattr(origin_type, "__module__", None) == "typing" - and getattr(origin_type, "__qualname__", "").startswith("NewType.") - and hasattr(origin_type, "__supertype__") - ): - # typing.NewType on Python 3.9 and below - return check_newtype - - return None - - -checker_lookup_functions.append(builtin_checker_lookup) - - -def load_plugins() -> None: - """ - Load all type checker lookup functions from entry points. - - All entry points from the ``typeguard.checker_lookup`` group are loaded, and the - returned lookup functions are added to :data:`typeguard.checker_lookup_functions`. - - .. note:: This function is called implicitly on import, unless the - ``TYPEGUARD_DISABLE_PLUGIN_AUTOLOAD`` environment variable is present. - """ - - for ep in entry_points(group="typeguard.checker_lookup"): - try: - plugin = ep.load() - except Exception as exc: - warnings.warn( - f"Failed to load plugin {ep.name!r}: " f"{qualified_name(exc)}: {exc}", - stacklevel=2, - ) - continue - - if not callable(plugin): - warnings.warn( - f"Plugin {ep} returned a non-callable object: {plugin!r}", stacklevel=2 - ) - continue - - checker_lookup_functions.insert(0, plugin) diff --git a/setuptools/_vendor/typeguard/_config.py b/setuptools/_vendor/typeguard/_config.py deleted file mode 100644 index 36efad5396..0000000000 --- a/setuptools/_vendor/typeguard/_config.py +++ /dev/null @@ -1,108 +0,0 @@ -from __future__ import annotations - -from collections.abc import Iterable -from dataclasses import dataclass -from enum import Enum, auto -from typing import TYPE_CHECKING, TypeVar - -if TYPE_CHECKING: - from ._functions import TypeCheckFailCallback - -T = TypeVar("T") - - -class ForwardRefPolicy(Enum): - """ - Defines how unresolved forward references are handled. - - Members: - - * ``ERROR``: propagate the :exc:`NameError` when the forward reference lookup fails - * ``WARN``: emit a :class:`~.TypeHintWarning` if the forward reference lookup fails - * ``IGNORE``: silently skip checks for unresolveable forward references - """ - - ERROR = auto() - WARN = auto() - IGNORE = auto() - - -class CollectionCheckStrategy(Enum): - """ - Specifies how thoroughly the contents of collections are type checked. - - This has an effect on the following built-in checkers: - - * ``AbstractSet`` - * ``Dict`` - * ``List`` - * ``Mapping`` - * ``Set`` - * ``Tuple[, ...]`` (arbitrarily sized tuples) - - Members: - - * ``FIRST_ITEM``: check only the first item - * ``ALL_ITEMS``: check all items - """ - - FIRST_ITEM = auto() - ALL_ITEMS = auto() - - def iterate_samples(self, collection: Iterable[T]) -> Iterable[T]: - if self is CollectionCheckStrategy.FIRST_ITEM: - try: - return [next(iter(collection))] - except StopIteration: - return () - else: - return collection - - -@dataclass -class TypeCheckConfiguration: - """ - You can change Typeguard's behavior with these settings. - - .. attribute:: typecheck_fail_callback - :type: Callable[[TypeCheckError, TypeCheckMemo], Any] - - Callable that is called when type checking fails. - - Default: ``None`` (the :exc:`~.TypeCheckError` is raised directly) - - .. attribute:: forward_ref_policy - :type: ForwardRefPolicy - - Specifies what to do when a forward reference fails to resolve. - - Default: ``WARN`` - - .. attribute:: collection_check_strategy - :type: CollectionCheckStrategy - - Specifies how thoroughly the contents of collections (list, dict, etc.) are - type checked. - - Default: ``FIRST_ITEM`` - - .. attribute:: debug_instrumentation - :type: bool - - If set to ``True``, the code of modules or functions instrumented by typeguard - is printed to ``sys.stderr`` after the instrumentation is done - - Requires Python 3.9 or newer. - - Default: ``False`` - """ - - forward_ref_policy: ForwardRefPolicy = ForwardRefPolicy.WARN - typecheck_fail_callback: TypeCheckFailCallback | None = None - collection_check_strategy: CollectionCheckStrategy = ( - CollectionCheckStrategy.FIRST_ITEM - ) - debug_instrumentation: bool = False - - -global_config = TypeCheckConfiguration() diff --git a/setuptools/_vendor/typeguard/_decorators.py b/setuptools/_vendor/typeguard/_decorators.py deleted file mode 100644 index cf3253351f..0000000000 --- a/setuptools/_vendor/typeguard/_decorators.py +++ /dev/null @@ -1,235 +0,0 @@ -from __future__ import annotations - -import ast -import inspect -import sys -from collections.abc import Sequence -from functools import partial -from inspect import isclass, isfunction -from types import CodeType, FrameType, FunctionType -from typing import TYPE_CHECKING, Any, Callable, ForwardRef, TypeVar, cast, overload -from warnings import warn - -from ._config import CollectionCheckStrategy, ForwardRefPolicy, global_config -from ._exceptions import InstrumentationWarning -from ._functions import TypeCheckFailCallback -from ._transformer import TypeguardTransformer -from ._utils import Unset, function_name, get_stacklevel, is_method_of, unset - -if TYPE_CHECKING: - from typeshed.stdlib.types import _Cell - - _F = TypeVar("_F") - - def typeguard_ignore(f: _F) -> _F: - """This decorator is a noop during static type-checking.""" - return f - -else: - from typing import no_type_check as typeguard_ignore # noqa: F401 - -T_CallableOrType = TypeVar("T_CallableOrType", bound=Callable[..., Any]) - - -def make_cell(value: object) -> _Cell: - return (lambda: value).__closure__[0] # type: ignore[index] - - -def find_target_function( - new_code: CodeType, target_path: Sequence[str], firstlineno: int -) -> CodeType | None: - target_name = target_path[0] - for const in new_code.co_consts: - if isinstance(const, CodeType): - if const.co_name == target_name: - if const.co_firstlineno == firstlineno: - return const - elif len(target_path) > 1: - target_code = find_target_function( - const, target_path[1:], firstlineno - ) - if target_code: - return target_code - - return None - - -def instrument(f: T_CallableOrType) -> FunctionType | str: - if not getattr(f, "__code__", None): - return "no code associated" - elif not getattr(f, "__module__", None): - return "__module__ attribute is not set" - elif f.__code__.co_filename == "": - return "cannot instrument functions defined in a REPL" - elif hasattr(f, "__wrapped__"): - return ( - "@typechecked only supports instrumenting functions wrapped with " - "@classmethod, @staticmethod or @property" - ) - - target_path = [item for item in f.__qualname__.split(".") if item != ""] - module_source = inspect.getsource(sys.modules[f.__module__]) - module_ast = ast.parse(module_source) - instrumentor = TypeguardTransformer(target_path, f.__code__.co_firstlineno) - instrumentor.visit(module_ast) - - if not instrumentor.target_node or instrumentor.target_lineno is None: - return "instrumentor did not find the target function" - - module_code = compile(module_ast, f.__code__.co_filename, "exec", dont_inherit=True) - new_code = find_target_function( - module_code, target_path, instrumentor.target_lineno - ) - if not new_code: - return "cannot find the target function in the AST" - - if global_config.debug_instrumentation and sys.version_info >= (3, 9): - # Find the matching AST node, then unparse it to source and print to stdout - print( - f"Source code of {f.__qualname__}() after instrumentation:" - "\n----------------------------------------------", - file=sys.stderr, - ) - print(ast.unparse(instrumentor.target_node), file=sys.stderr) - print( - "----------------------------------------------", - file=sys.stderr, - ) - - closure = f.__closure__ - if new_code.co_freevars != f.__code__.co_freevars: - # Create a new closure and find values for the new free variables - frame = cast(FrameType, inspect.currentframe()) - frame = cast(FrameType, frame.f_back) - frame_locals = cast(FrameType, frame.f_back).f_locals - cells: list[_Cell] = [] - for key in new_code.co_freevars: - if key in instrumentor.names_used_in_annotations: - # Find the value and make a new cell from it - value = frame_locals.get(key) or ForwardRef(key) - cells.append(make_cell(value)) - else: - # Reuse the cell from the existing closure - assert f.__closure__ - cells.append(f.__closure__[f.__code__.co_freevars.index(key)]) - - closure = tuple(cells) - - new_function = FunctionType(new_code, f.__globals__, f.__name__, closure=closure) - new_function.__module__ = f.__module__ - new_function.__name__ = f.__name__ - new_function.__qualname__ = f.__qualname__ - new_function.__annotations__ = f.__annotations__ - new_function.__doc__ = f.__doc__ - new_function.__defaults__ = f.__defaults__ - new_function.__kwdefaults__ = f.__kwdefaults__ - return new_function - - -@overload -def typechecked( - *, - forward_ref_policy: ForwardRefPolicy | Unset = unset, - typecheck_fail_callback: TypeCheckFailCallback | Unset = unset, - collection_check_strategy: CollectionCheckStrategy | Unset = unset, - debug_instrumentation: bool | Unset = unset, -) -> Callable[[T_CallableOrType], T_CallableOrType]: ... - - -@overload -def typechecked(target: T_CallableOrType) -> T_CallableOrType: ... - - -def typechecked( - target: T_CallableOrType | None = None, - *, - forward_ref_policy: ForwardRefPolicy | Unset = unset, - typecheck_fail_callback: TypeCheckFailCallback | Unset = unset, - collection_check_strategy: CollectionCheckStrategy | Unset = unset, - debug_instrumentation: bool | Unset = unset, -) -> Any: - """ - Instrument the target function to perform run-time type checking. - - This decorator recompiles the target function, injecting code to type check - arguments, return values, yield values (excluding ``yield from``) and assignments to - annotated local variables. - - This can also be used as a class decorator. This will instrument all type annotated - methods, including :func:`@classmethod `, - :func:`@staticmethod `, and :class:`@property ` decorated - methods in the class. - - .. note:: When Python is run in optimized mode (``-O`` or ``-OO``, this decorator - is a no-op). This is a feature meant for selectively introducing type checking - into a code base where the checks aren't meant to be run in production. - - :param target: the function or class to enable type checking for - :param forward_ref_policy: override for - :attr:`.TypeCheckConfiguration.forward_ref_policy` - :param typecheck_fail_callback: override for - :attr:`.TypeCheckConfiguration.typecheck_fail_callback` - :param collection_check_strategy: override for - :attr:`.TypeCheckConfiguration.collection_check_strategy` - :param debug_instrumentation: override for - :attr:`.TypeCheckConfiguration.debug_instrumentation` - - """ - if target is None: - return partial( - typechecked, - forward_ref_policy=forward_ref_policy, - typecheck_fail_callback=typecheck_fail_callback, - collection_check_strategy=collection_check_strategy, - debug_instrumentation=debug_instrumentation, - ) - - if not __debug__: - return target - - if isclass(target): - for key, attr in target.__dict__.items(): - if is_method_of(attr, target): - retval = instrument(attr) - if isfunction(retval): - setattr(target, key, retval) - elif isinstance(attr, (classmethod, staticmethod)): - if is_method_of(attr.__func__, target): - retval = instrument(attr.__func__) - if isfunction(retval): - wrapper = attr.__class__(retval) - setattr(target, key, wrapper) - elif isinstance(attr, property): - kwargs: dict[str, Any] = dict(doc=attr.__doc__) - for name in ("fset", "fget", "fdel"): - property_func = kwargs[name] = getattr(attr, name) - if is_method_of(property_func, target): - retval = instrument(property_func) - if isfunction(retval): - kwargs[name] = retval - - setattr(target, key, attr.__class__(**kwargs)) - - return target - - # Find either the first Python wrapper or the actual function - wrapper_class: ( - type[classmethod[Any, Any, Any]] | type[staticmethod[Any, Any]] | None - ) = None - if isinstance(target, (classmethod, staticmethod)): - wrapper_class = target.__class__ - target = target.__func__ - - retval = instrument(target) - if isinstance(retval, str): - warn( - f"{retval} -- not typechecking {function_name(target)}", - InstrumentationWarning, - stacklevel=get_stacklevel(), - ) - return target - - if wrapper_class is None: - return retval - else: - return wrapper_class(retval) diff --git a/setuptools/_vendor/typeguard/_exceptions.py b/setuptools/_vendor/typeguard/_exceptions.py deleted file mode 100644 index 625437a649..0000000000 --- a/setuptools/_vendor/typeguard/_exceptions.py +++ /dev/null @@ -1,42 +0,0 @@ -from collections import deque -from typing import Deque - - -class TypeHintWarning(UserWarning): - """ - A warning that is emitted when a type hint in string form could not be resolved to - an actual type. - """ - - -class TypeCheckWarning(UserWarning): - """Emitted by typeguard's type checkers when a type mismatch is detected.""" - - def __init__(self, message: str): - super().__init__(message) - - -class InstrumentationWarning(UserWarning): - """Emitted when there's a problem with instrumenting a function for type checks.""" - - def __init__(self, message: str): - super().__init__(message) - - -class TypeCheckError(Exception): - """ - Raised by typeguard's type checkers when a type mismatch is detected. - """ - - def __init__(self, message: str): - super().__init__(message) - self._path: Deque[str] = deque() - - def append_path_element(self, element: str) -> None: - self._path.append(element) - - def __str__(self) -> str: - if self._path: - return " of ".join(self._path) + " " + str(self.args[0]) - else: - return str(self.args[0]) diff --git a/setuptools/_vendor/typeguard/_functions.py b/setuptools/_vendor/typeguard/_functions.py deleted file mode 100644 index 28497856a3..0000000000 --- a/setuptools/_vendor/typeguard/_functions.py +++ /dev/null @@ -1,308 +0,0 @@ -from __future__ import annotations - -import sys -import warnings -from typing import Any, Callable, NoReturn, TypeVar, Union, overload - -from . import _suppression -from ._checkers import BINARY_MAGIC_METHODS, check_type_internal -from ._config import ( - CollectionCheckStrategy, - ForwardRefPolicy, - TypeCheckConfiguration, -) -from ._exceptions import TypeCheckError, TypeCheckWarning -from ._memo import TypeCheckMemo -from ._utils import get_stacklevel, qualified_name - -if sys.version_info >= (3, 11): - from typing import Literal, Never, TypeAlias -else: - from typing_extensions import Literal, Never, TypeAlias - -T = TypeVar("T") -TypeCheckFailCallback: TypeAlias = Callable[[TypeCheckError, TypeCheckMemo], Any] - - -@overload -def check_type( - value: object, - expected_type: type[T], - *, - forward_ref_policy: ForwardRefPolicy = ..., - typecheck_fail_callback: TypeCheckFailCallback | None = ..., - collection_check_strategy: CollectionCheckStrategy = ..., -) -> T: ... - - -@overload -def check_type( - value: object, - expected_type: Any, - *, - forward_ref_policy: ForwardRefPolicy = ..., - typecheck_fail_callback: TypeCheckFailCallback | None = ..., - collection_check_strategy: CollectionCheckStrategy = ..., -) -> Any: ... - - -def check_type( - value: object, - expected_type: Any, - *, - forward_ref_policy: ForwardRefPolicy = TypeCheckConfiguration().forward_ref_policy, - typecheck_fail_callback: TypeCheckFailCallback | None = ( - TypeCheckConfiguration().typecheck_fail_callback - ), - collection_check_strategy: CollectionCheckStrategy = ( - TypeCheckConfiguration().collection_check_strategy - ), -) -> Any: - """ - Ensure that ``value`` matches ``expected_type``. - - The types from the :mod:`typing` module do not support :func:`isinstance` or - :func:`issubclass` so a number of type specific checks are required. This function - knows which checker to call for which type. - - This function wraps :func:`~.check_type_internal` in the following ways: - - * Respects type checking suppression (:func:`~.suppress_type_checks`) - * Forms a :class:`~.TypeCheckMemo` from the current stack frame - * Calls the configured type check fail callback if the check fails - - Note that this function is independent of the globally shared configuration in - :data:`typeguard.config`. This means that usage within libraries is safe from being - affected configuration changes made by other libraries or by the integrating - application. Instead, configuration options have the same default values as their - corresponding fields in :class:`TypeCheckConfiguration`. - - :param value: value to be checked against ``expected_type`` - :param expected_type: a class or generic type instance, or a tuple of such things - :param forward_ref_policy: see :attr:`TypeCheckConfiguration.forward_ref_policy` - :param typecheck_fail_callback: - see :attr`TypeCheckConfiguration.typecheck_fail_callback` - :param collection_check_strategy: - see :attr:`TypeCheckConfiguration.collection_check_strategy` - :return: ``value``, unmodified - :raises TypeCheckError: if there is a type mismatch - - """ - if type(expected_type) is tuple: - expected_type = Union[expected_type] - - config = TypeCheckConfiguration( - forward_ref_policy=forward_ref_policy, - typecheck_fail_callback=typecheck_fail_callback, - collection_check_strategy=collection_check_strategy, - ) - - if _suppression.type_checks_suppressed or expected_type is Any: - return value - - frame = sys._getframe(1) - memo = TypeCheckMemo(frame.f_globals, frame.f_locals, config=config) - try: - check_type_internal(value, expected_type, memo) - except TypeCheckError as exc: - exc.append_path_element(qualified_name(value, add_class_prefix=True)) - if config.typecheck_fail_callback: - config.typecheck_fail_callback(exc, memo) - else: - raise - - return value - - -def check_argument_types( - func_name: str, - arguments: dict[str, tuple[Any, Any]], - memo: TypeCheckMemo, -) -> Literal[True]: - if _suppression.type_checks_suppressed: - return True - - for argname, (value, annotation) in arguments.items(): - if annotation is NoReturn or annotation is Never: - exc = TypeCheckError( - f"{func_name}() was declared never to be called but it was" - ) - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise exc - - try: - check_type_internal(value, annotation, memo) - except TypeCheckError as exc: - qualname = qualified_name(value, add_class_prefix=True) - exc.append_path_element(f'argument "{argname}" ({qualname})') - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise - - return True - - -def check_return_type( - func_name: str, - retval: T, - annotation: Any, - memo: TypeCheckMemo, -) -> T: - if _suppression.type_checks_suppressed: - return retval - - if annotation is NoReturn or annotation is Never: - exc = TypeCheckError(f"{func_name}() was declared never to return but it did") - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise exc - - try: - check_type_internal(retval, annotation, memo) - except TypeCheckError as exc: - # Allow NotImplemented if this is a binary magic method (__eq__() et al) - if retval is NotImplemented and annotation is bool: - # This does (and cannot) not check if it's actually a method - func_name = func_name.rsplit(".", 1)[-1] - if func_name in BINARY_MAGIC_METHODS: - return retval - - qualname = qualified_name(retval, add_class_prefix=True) - exc.append_path_element(f"the return value ({qualname})") - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise - - return retval - - -def check_send_type( - func_name: str, - sendval: T, - annotation: Any, - memo: TypeCheckMemo, -) -> T: - if _suppression.type_checks_suppressed: - return sendval - - if annotation is NoReturn or annotation is Never: - exc = TypeCheckError( - f"{func_name}() was declared never to be sent a value to but it was" - ) - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise exc - - try: - check_type_internal(sendval, annotation, memo) - except TypeCheckError as exc: - qualname = qualified_name(sendval, add_class_prefix=True) - exc.append_path_element(f"the value sent to generator ({qualname})") - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise - - return sendval - - -def check_yield_type( - func_name: str, - yieldval: T, - annotation: Any, - memo: TypeCheckMemo, -) -> T: - if _suppression.type_checks_suppressed: - return yieldval - - if annotation is NoReturn or annotation is Never: - exc = TypeCheckError(f"{func_name}() was declared never to yield but it did") - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise exc - - try: - check_type_internal(yieldval, annotation, memo) - except TypeCheckError as exc: - qualname = qualified_name(yieldval, add_class_prefix=True) - exc.append_path_element(f"the yielded value ({qualname})") - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise - - return yieldval - - -def check_variable_assignment( - value: object, varname: str, annotation: Any, memo: TypeCheckMemo -) -> Any: - if _suppression.type_checks_suppressed: - return value - - try: - check_type_internal(value, annotation, memo) - except TypeCheckError as exc: - qualname = qualified_name(value, add_class_prefix=True) - exc.append_path_element(f"value assigned to {varname} ({qualname})") - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise - - return value - - -def check_multi_variable_assignment( - value: Any, targets: list[dict[str, Any]], memo: TypeCheckMemo -) -> Any: - if max(len(target) for target in targets) == 1: - iterated_values = [value] - else: - iterated_values = list(value) - - if not _suppression.type_checks_suppressed: - for expected_types in targets: - value_index = 0 - for ann_index, (varname, expected_type) in enumerate( - expected_types.items() - ): - if varname.startswith("*"): - varname = varname[1:] - keys_left = len(expected_types) - 1 - ann_index - next_value_index = len(iterated_values) - keys_left - obj: object = iterated_values[value_index:next_value_index] - value_index = next_value_index - else: - obj = iterated_values[value_index] - value_index += 1 - - try: - check_type_internal(obj, expected_type, memo) - except TypeCheckError as exc: - qualname = qualified_name(obj, add_class_prefix=True) - exc.append_path_element(f"value assigned to {varname} ({qualname})") - if memo.config.typecheck_fail_callback: - memo.config.typecheck_fail_callback(exc, memo) - else: - raise - - return iterated_values[0] if len(iterated_values) == 1 else iterated_values - - -def warn_on_error(exc: TypeCheckError, memo: TypeCheckMemo) -> None: - """ - Emit a warning on a type mismatch. - - This is intended to be used as an error handler in - :attr:`TypeCheckConfiguration.typecheck_fail_callback`. - - """ - warnings.warn(TypeCheckWarning(str(exc)), stacklevel=get_stacklevel()) diff --git a/setuptools/_vendor/typeguard/_importhook.py b/setuptools/_vendor/typeguard/_importhook.py deleted file mode 100644 index 8590540a5a..0000000000 --- a/setuptools/_vendor/typeguard/_importhook.py +++ /dev/null @@ -1,213 +0,0 @@ -from __future__ import annotations - -import ast -import sys -import types -from collections.abc import Callable, Iterable -from importlib.abc import MetaPathFinder -from importlib.machinery import ModuleSpec, SourceFileLoader -from importlib.util import cache_from_source, decode_source -from inspect import isclass -from os import PathLike -from types import CodeType, ModuleType, TracebackType -from typing import Sequence, TypeVar -from unittest.mock import patch - -from ._config import global_config -from ._transformer import TypeguardTransformer - -if sys.version_info >= (3, 12): - from collections.abc import Buffer -else: - from typing_extensions import Buffer - -if sys.version_info >= (3, 11): - from typing import ParamSpec -else: - from typing_extensions import ParamSpec - -if sys.version_info >= (3, 10): - from importlib.metadata import PackageNotFoundError, version -else: - from importlib_metadata import PackageNotFoundError, version - -try: - OPTIMIZATION = "typeguard" + "".join(version("typeguard").split(".")[:3]) -except PackageNotFoundError: - OPTIMIZATION = "typeguard" - -P = ParamSpec("P") -T = TypeVar("T") - - -# The name of this function is magical -def _call_with_frames_removed( - f: Callable[P, T], *args: P.args, **kwargs: P.kwargs -) -> T: - return f(*args, **kwargs) - - -def optimized_cache_from_source(path: str, debug_override: bool | None = None) -> str: - return cache_from_source(path, debug_override, optimization=OPTIMIZATION) - - -class TypeguardLoader(SourceFileLoader): - @staticmethod - def source_to_code( - data: Buffer | str | ast.Module | ast.Expression | ast.Interactive, - path: Buffer | str | PathLike[str] = "", - ) -> CodeType: - if isinstance(data, (ast.Module, ast.Expression, ast.Interactive)): - tree = data - else: - if isinstance(data, str): - source = data - else: - source = decode_source(data) - - tree = _call_with_frames_removed( - ast.parse, - source, - path, - "exec", - ) - - tree = TypeguardTransformer().visit(tree) - ast.fix_missing_locations(tree) - - if global_config.debug_instrumentation and sys.version_info >= (3, 9): - print( - f"Source code of {path!r} after instrumentation:\n" - "----------------------------------------------", - file=sys.stderr, - ) - print(ast.unparse(tree), file=sys.stderr) - print("----------------------------------------------", file=sys.stderr) - - return _call_with_frames_removed( - compile, tree, path, "exec", 0, dont_inherit=True - ) - - def exec_module(self, module: ModuleType) -> None: - # Use a custom optimization marker – the import lock should make this monkey - # patch safe - with patch( - "importlib._bootstrap_external.cache_from_source", - optimized_cache_from_source, - ): - super().exec_module(module) - - -class TypeguardFinder(MetaPathFinder): - """ - Wraps another path finder and instruments the module with - :func:`@typechecked ` if :meth:`should_instrument` returns - ``True``. - - Should not be used directly, but rather via :func:`~.install_import_hook`. - - .. versionadded:: 2.6 - """ - - def __init__(self, packages: list[str] | None, original_pathfinder: MetaPathFinder): - self.packages = packages - self._original_pathfinder = original_pathfinder - - def find_spec( - self, - fullname: str, - path: Sequence[str] | None, - target: types.ModuleType | None = None, - ) -> ModuleSpec | None: - if self.should_instrument(fullname): - spec = self._original_pathfinder.find_spec(fullname, path, target) - if spec is not None and isinstance(spec.loader, SourceFileLoader): - spec.loader = TypeguardLoader(spec.loader.name, spec.loader.path) - return spec - - return None - - def should_instrument(self, module_name: str) -> bool: - """ - Determine whether the module with the given name should be instrumented. - - :param module_name: full name of the module that is about to be imported (e.g. - ``xyz.abc``) - - """ - if self.packages is None: - return True - - for package in self.packages: - if module_name == package or module_name.startswith(package + "."): - return True - - return False - - -class ImportHookManager: - """ - A handle that can be used to uninstall the Typeguard import hook. - """ - - def __init__(self, hook: MetaPathFinder): - self.hook = hook - - def __enter__(self) -> None: - pass - - def __exit__( - self, - exc_type: type[BaseException], - exc_val: BaseException, - exc_tb: TracebackType, - ) -> None: - self.uninstall() - - def uninstall(self) -> None: - """Uninstall the import hook.""" - try: - sys.meta_path.remove(self.hook) - except ValueError: - pass # already removed - - -def install_import_hook( - packages: Iterable[str] | None = None, - *, - cls: type[TypeguardFinder] = TypeguardFinder, -) -> ImportHookManager: - """ - Install an import hook that instruments functions for automatic type checking. - - This only affects modules loaded **after** this hook has been installed. - - :param packages: an iterable of package names to instrument, or ``None`` to - instrument all packages - :param cls: a custom meta path finder class - :return: a context manager that uninstalls the hook on exit (or when you call - ``.uninstall()``) - - .. versionadded:: 2.6 - - """ - if packages is None: - target_packages: list[str] | None = None - elif isinstance(packages, str): - target_packages = [packages] - else: - target_packages = list(packages) - - for finder in sys.meta_path: - if ( - isclass(finder) - and finder.__name__ == "PathFinder" - and hasattr(finder, "find_spec") - ): - break - else: - raise RuntimeError("Cannot find a PathFinder in sys.meta_path") - - hook = cls(target_packages, finder) - sys.meta_path.insert(0, hook) - return ImportHookManager(hook) diff --git a/setuptools/_vendor/typeguard/_memo.py b/setuptools/_vendor/typeguard/_memo.py deleted file mode 100644 index 1d0d80c66d..0000000000 --- a/setuptools/_vendor/typeguard/_memo.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from typeguard._config import TypeCheckConfiguration, global_config - - -class TypeCheckMemo: - """ - Contains information necessary for type checkers to do their work. - - .. attribute:: globals - :type: dict[str, Any] - - Dictionary of global variables to use for resolving forward references. - - .. attribute:: locals - :type: dict[str, Any] - - Dictionary of local variables to use for resolving forward references. - - .. attribute:: self_type - :type: type | None - - When running type checks within an instance method or class method, this is the - class object that the first argument (usually named ``self`` or ``cls``) refers - to. - - .. attribute:: config - :type: TypeCheckConfiguration - - Contains the configuration for a particular set of type checking operations. - """ - - __slots__ = "globals", "locals", "self_type", "config" - - def __init__( - self, - globals: dict[str, Any], - locals: dict[str, Any], - *, - self_type: type | None = None, - config: TypeCheckConfiguration = global_config, - ): - self.globals = globals - self.locals = locals - self.self_type = self_type - self.config = config diff --git a/setuptools/_vendor/typeguard/_pytest_plugin.py b/setuptools/_vendor/typeguard/_pytest_plugin.py deleted file mode 100644 index 7b2f494ec7..0000000000 --- a/setuptools/_vendor/typeguard/_pytest_plugin.py +++ /dev/null @@ -1,127 +0,0 @@ -from __future__ import annotations - -import sys -import warnings -from typing import TYPE_CHECKING, Any, Literal - -from typeguard._config import CollectionCheckStrategy, ForwardRefPolicy, global_config -from typeguard._exceptions import InstrumentationWarning -from typeguard._importhook import install_import_hook -from typeguard._utils import qualified_name, resolve_reference - -if TYPE_CHECKING: - from pytest import Config, Parser - - -def pytest_addoption(parser: Parser) -> None: - def add_ini_option( - opt_type: ( - Literal["string", "paths", "pathlist", "args", "linelist", "bool"] | None - ), - ) -> None: - parser.addini( - group.options[-1].names()[0][2:], - group.options[-1].attrs()["help"], - opt_type, - ) - - group = parser.getgroup("typeguard") - group.addoption( - "--typeguard-packages", - action="store", - help="comma separated name list of packages and modules to instrument for " - "type checking, or :all: to instrument all modules loaded after typeguard", - ) - add_ini_option("linelist") - - group.addoption( - "--typeguard-debug-instrumentation", - action="store_true", - help="print all instrumented code to stderr", - ) - add_ini_option("bool") - - group.addoption( - "--typeguard-typecheck-fail-callback", - action="store", - help=( - "a module:varname (e.g. typeguard:warn_on_error) reference to a function " - "that is called (with the exception, and memo object as arguments) to " - "handle a TypeCheckError" - ), - ) - add_ini_option("string") - - group.addoption( - "--typeguard-forward-ref-policy", - action="store", - choices=list(ForwardRefPolicy.__members__), - help=( - "determines how to deal with unresolveable forward references in type " - "annotations" - ), - ) - add_ini_option("string") - - group.addoption( - "--typeguard-collection-check-strategy", - action="store", - choices=list(CollectionCheckStrategy.__members__), - help="determines how thoroughly to check collections (list, dict, etc)", - ) - add_ini_option("string") - - -def pytest_configure(config: Config) -> None: - def getoption(name: str) -> Any: - return config.getoption(name.replace("-", "_")) or config.getini(name) - - packages: list[str] | None = [] - if packages_option := config.getoption("typeguard_packages"): - packages = [pkg.strip() for pkg in packages_option.split(",")] - elif packages_ini := config.getini("typeguard-packages"): - packages = packages_ini - - if packages: - if packages == [":all:"]: - packages = None - else: - already_imported_packages = sorted( - package for package in packages if package in sys.modules - ) - if already_imported_packages: - warnings.warn( - f"typeguard cannot check these packages because they are already " - f"imported: {', '.join(already_imported_packages)}", - InstrumentationWarning, - stacklevel=1, - ) - - install_import_hook(packages=packages) - - debug_option = getoption("typeguard-debug-instrumentation") - if debug_option: - global_config.debug_instrumentation = True - - fail_callback_option = getoption("typeguard-typecheck-fail-callback") - if fail_callback_option: - callback = resolve_reference(fail_callback_option) - if not callable(callback): - raise TypeError( - f"{fail_callback_option} ({qualified_name(callback.__class__)}) is not " - f"a callable" - ) - - global_config.typecheck_fail_callback = callback - - forward_ref_policy_option = getoption("typeguard-forward-ref-policy") - if forward_ref_policy_option: - forward_ref_policy = ForwardRefPolicy.__members__[forward_ref_policy_option] - global_config.forward_ref_policy = forward_ref_policy - - collection_check_strategy_option = getoption("typeguard-collection-check-strategy") - if collection_check_strategy_option: - collection_check_strategy = CollectionCheckStrategy.__members__[ - collection_check_strategy_option - ] - global_config.collection_check_strategy = collection_check_strategy diff --git a/setuptools/_vendor/typeguard/_suppression.py b/setuptools/_vendor/typeguard/_suppression.py deleted file mode 100644 index bbbfbfbe8e..0000000000 --- a/setuptools/_vendor/typeguard/_suppression.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import annotations - -import sys -from collections.abc import Callable, Generator -from contextlib import contextmanager -from functools import update_wrapper -from threading import Lock -from typing import ContextManager, TypeVar, overload - -if sys.version_info >= (3, 10): - from typing import ParamSpec -else: - from typing_extensions import ParamSpec - -P = ParamSpec("P") -T = TypeVar("T") - -type_checks_suppressed = 0 -type_checks_suppress_lock = Lock() - - -@overload -def suppress_type_checks(func: Callable[P, T]) -> Callable[P, T]: ... - - -@overload -def suppress_type_checks() -> ContextManager[None]: ... - - -def suppress_type_checks( - func: Callable[P, T] | None = None, -) -> Callable[P, T] | ContextManager[None]: - """ - Temporarily suppress all type checking. - - This function has two operating modes, based on how it's used: - - #. as a context manager (``with suppress_type_checks(): ...``) - #. as a decorator (``@suppress_type_checks``) - - When used as a context manager, :func:`check_type` and any automatically - instrumented functions skip the actual type checking. These context managers can be - nested. - - When used as a decorator, all type checking is suppressed while the function is - running. - - Type checking will resume once no more context managers are active and no decorated - functions are running. - - Both operating modes are thread-safe. - - """ - - def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: - global type_checks_suppressed - - with type_checks_suppress_lock: - type_checks_suppressed += 1 - - assert func is not None - try: - return func(*args, **kwargs) - finally: - with type_checks_suppress_lock: - type_checks_suppressed -= 1 - - def cm() -> Generator[None, None, None]: - global type_checks_suppressed - - with type_checks_suppress_lock: - type_checks_suppressed += 1 - - try: - yield - finally: - with type_checks_suppress_lock: - type_checks_suppressed -= 1 - - if func is None: - # Context manager mode - return contextmanager(cm)() - else: - # Decorator mode - update_wrapper(wrapper, func) - return wrapper diff --git a/setuptools/_vendor/typeguard/_transformer.py b/setuptools/_vendor/typeguard/_transformer.py deleted file mode 100644 index 13ac3630e6..0000000000 --- a/setuptools/_vendor/typeguard/_transformer.py +++ /dev/null @@ -1,1229 +0,0 @@ -from __future__ import annotations - -import ast -import builtins -import sys -import typing -from ast import ( - AST, - Add, - AnnAssign, - Assign, - AsyncFunctionDef, - Attribute, - AugAssign, - BinOp, - BitAnd, - BitOr, - BitXor, - Call, - ClassDef, - Constant, - Dict, - Div, - Expr, - Expression, - FloorDiv, - FunctionDef, - If, - Import, - ImportFrom, - Index, - List, - Load, - LShift, - MatMult, - Mod, - Module, - Mult, - Name, - NamedExpr, - NodeTransformer, - NodeVisitor, - Pass, - Pow, - Return, - RShift, - Starred, - Store, - Sub, - Subscript, - Tuple, - Yield, - YieldFrom, - alias, - copy_location, - expr, - fix_missing_locations, - keyword, - walk, -) -from collections import defaultdict -from collections.abc import Generator, Sequence -from contextlib import contextmanager -from copy import deepcopy -from dataclasses import dataclass, field -from typing import Any, ClassVar, cast, overload - -generator_names = ( - "typing.Generator", - "collections.abc.Generator", - "typing.Iterator", - "collections.abc.Iterator", - "typing.Iterable", - "collections.abc.Iterable", - "typing.AsyncIterator", - "collections.abc.AsyncIterator", - "typing.AsyncIterable", - "collections.abc.AsyncIterable", - "typing.AsyncGenerator", - "collections.abc.AsyncGenerator", -) -anytype_names = ( - "typing.Any", - "typing_extensions.Any", -) -literal_names = ( - "typing.Literal", - "typing_extensions.Literal", -) -annotated_names = ( - "typing.Annotated", - "typing_extensions.Annotated", -) -ignore_decorators = ( - "typing.no_type_check", - "typeguard.typeguard_ignore", -) -aug_assign_functions = { - Add: "iadd", - Sub: "isub", - Mult: "imul", - MatMult: "imatmul", - Div: "itruediv", - FloorDiv: "ifloordiv", - Mod: "imod", - Pow: "ipow", - LShift: "ilshift", - RShift: "irshift", - BitAnd: "iand", - BitXor: "ixor", - BitOr: "ior", -} - - -@dataclass -class TransformMemo: - node: Module | ClassDef | FunctionDef | AsyncFunctionDef | None - parent: TransformMemo | None - path: tuple[str, ...] - joined_path: Constant = field(init=False) - return_annotation: expr | None = None - yield_annotation: expr | None = None - send_annotation: expr | None = None - is_async: bool = False - local_names: set[str] = field(init=False, default_factory=set) - imported_names: dict[str, str] = field(init=False, default_factory=dict) - ignored_names: set[str] = field(init=False, default_factory=set) - load_names: defaultdict[str, dict[str, Name]] = field( - init=False, default_factory=lambda: defaultdict(dict) - ) - has_yield_expressions: bool = field(init=False, default=False) - has_return_expressions: bool = field(init=False, default=False) - memo_var_name: Name | None = field(init=False, default=None) - should_instrument: bool = field(init=False, default=True) - variable_annotations: dict[str, expr] = field(init=False, default_factory=dict) - configuration_overrides: dict[str, Any] = field(init=False, default_factory=dict) - code_inject_index: int = field(init=False, default=0) - - def __post_init__(self) -> None: - elements: list[str] = [] - memo = self - while isinstance(memo.node, (ClassDef, FunctionDef, AsyncFunctionDef)): - elements.insert(0, memo.node.name) - if not memo.parent: - break - - memo = memo.parent - if isinstance(memo.node, (FunctionDef, AsyncFunctionDef)): - elements.insert(0, "") - - self.joined_path = Constant(".".join(elements)) - - # Figure out where to insert instrumentation code - if self.node: - for index, child in enumerate(self.node.body): - if isinstance(child, ImportFrom) and child.module == "__future__": - # (module only) __future__ imports must come first - continue - elif ( - isinstance(child, Expr) - and isinstance(child.value, Constant) - and isinstance(child.value.value, str) - ): - continue # docstring - - self.code_inject_index = index - break - - def get_unused_name(self, name: str) -> str: - memo: TransformMemo | None = self - while memo is not None: - if name in memo.local_names: - memo = self - name += "_" - else: - memo = memo.parent - - self.local_names.add(name) - return name - - def is_ignored_name(self, expression: expr | Expr | None) -> bool: - top_expression = ( - expression.value if isinstance(expression, Expr) else expression - ) - - if isinstance(top_expression, Attribute) and isinstance( - top_expression.value, Name - ): - name = top_expression.value.id - elif isinstance(top_expression, Name): - name = top_expression.id - else: - return False - - memo: TransformMemo | None = self - while memo is not None: - if name in memo.ignored_names: - return True - - memo = memo.parent - - return False - - def get_memo_name(self) -> Name: - if not self.memo_var_name: - self.memo_var_name = Name(id="memo", ctx=Load()) - - return self.memo_var_name - - def get_import(self, module: str, name: str) -> Name: - if module in self.load_names and name in self.load_names[module]: - return self.load_names[module][name] - - qualified_name = f"{module}.{name}" - if name in self.imported_names and self.imported_names[name] == qualified_name: - return Name(id=name, ctx=Load()) - - alias = self.get_unused_name(name) - node = self.load_names[module][name] = Name(id=alias, ctx=Load()) - self.imported_names[name] = qualified_name - return node - - def insert_imports(self, node: Module | FunctionDef | AsyncFunctionDef) -> None: - """Insert imports needed by injected code.""" - if not self.load_names: - return - - # Insert imports after any "from __future__ ..." imports and any docstring - for modulename, names in self.load_names.items(): - aliases = [ - alias(orig_name, new_name.id if orig_name != new_name.id else None) - for orig_name, new_name in sorted(names.items()) - ] - node.body.insert(self.code_inject_index, ImportFrom(modulename, aliases, 0)) - - def name_matches(self, expression: expr | Expr | None, *names: str) -> bool: - if expression is None: - return False - - path: list[str] = [] - top_expression = ( - expression.value if isinstance(expression, Expr) else expression - ) - - if isinstance(top_expression, Subscript): - top_expression = top_expression.value - elif isinstance(top_expression, Call): - top_expression = top_expression.func - - while isinstance(top_expression, Attribute): - path.insert(0, top_expression.attr) - top_expression = top_expression.value - - if not isinstance(top_expression, Name): - return False - - if top_expression.id in self.imported_names: - translated = self.imported_names[top_expression.id] - elif hasattr(builtins, top_expression.id): - translated = "builtins." + top_expression.id - else: - translated = top_expression.id - - path.insert(0, translated) - joined_path = ".".join(path) - if joined_path in names: - return True - elif self.parent: - return self.parent.name_matches(expression, *names) - else: - return False - - def get_config_keywords(self) -> list[keyword]: - if self.parent and isinstance(self.parent.node, ClassDef): - overrides = self.parent.configuration_overrides.copy() - else: - overrides = {} - - overrides.update(self.configuration_overrides) - return [keyword(key, value) for key, value in overrides.items()] - - -class NameCollector(NodeVisitor): - def __init__(self) -> None: - self.names: set[str] = set() - - def visit_Import(self, node: Import) -> None: - for name in node.names: - self.names.add(name.asname or name.name) - - def visit_ImportFrom(self, node: ImportFrom) -> None: - for name in node.names: - self.names.add(name.asname or name.name) - - def visit_Assign(self, node: Assign) -> None: - for target in node.targets: - if isinstance(target, Name): - self.names.add(target.id) - - def visit_NamedExpr(self, node: NamedExpr) -> Any: - if isinstance(node.target, Name): - self.names.add(node.target.id) - - def visit_FunctionDef(self, node: FunctionDef) -> None: - pass - - def visit_ClassDef(self, node: ClassDef) -> None: - pass - - -class GeneratorDetector(NodeVisitor): - """Detects if a function node is a generator function.""" - - contains_yields: bool = False - in_root_function: bool = False - - def visit_Yield(self, node: Yield) -> Any: - self.contains_yields = True - - def visit_YieldFrom(self, node: YieldFrom) -> Any: - self.contains_yields = True - - def visit_ClassDef(self, node: ClassDef) -> Any: - pass - - def visit_FunctionDef(self, node: FunctionDef | AsyncFunctionDef) -> Any: - if not self.in_root_function: - self.in_root_function = True - self.generic_visit(node) - self.in_root_function = False - - def visit_AsyncFunctionDef(self, node: AsyncFunctionDef) -> Any: - self.visit_FunctionDef(node) - - -class AnnotationTransformer(NodeTransformer): - type_substitutions: ClassVar[dict[str, tuple[str, str]]] = { - "builtins.dict": ("typing", "Dict"), - "builtins.list": ("typing", "List"), - "builtins.tuple": ("typing", "Tuple"), - "builtins.set": ("typing", "Set"), - "builtins.frozenset": ("typing", "FrozenSet"), - } - - def __init__(self, transformer: TypeguardTransformer): - self.transformer = transformer - self._memo = transformer._memo - self._level = 0 - - def visit(self, node: AST) -> Any: - # Don't process Literals - if isinstance(node, expr) and self._memo.name_matches(node, *literal_names): - return node - - self._level += 1 - new_node = super().visit(node) - self._level -= 1 - - if isinstance(new_node, Expression) and not hasattr(new_node, "body"): - return None - - # Return None if this new node matches a variation of typing.Any - if ( - self._level == 0 - and isinstance(new_node, expr) - and self._memo.name_matches(new_node, *anytype_names) - ): - return None - - return new_node - - def visit_BinOp(self, node: BinOp) -> Any: - self.generic_visit(node) - - if isinstance(node.op, BitOr): - # If either branch of the BinOp has been transformed to `None`, it means - # that a type in the union was ignored, so the entire annotation should e - # ignored - if not hasattr(node, "left") or not hasattr(node, "right"): - return None - - # Return Any if either side is Any - if self._memo.name_matches(node.left, *anytype_names): - return node.left - elif self._memo.name_matches(node.right, *anytype_names): - return node.right - - if sys.version_info < (3, 10): - union_name = self.transformer._get_import("typing", "Union") - return Subscript( - value=union_name, - slice=Index( - Tuple(elts=[node.left, node.right], ctx=Load()), ctx=Load() - ), - ctx=Load(), - ) - - return node - - def visit_Attribute(self, node: Attribute) -> Any: - if self._memo.is_ignored_name(node): - return None - - return node - - def visit_Subscript(self, node: Subscript) -> Any: - if self._memo.is_ignored_name(node.value): - return None - - # The subscript of typing(_extensions).Literal can be any arbitrary string, so - # don't try to evaluate it as code - if node.slice: - if isinstance(node.slice, Index): - # Python 3.8 - slice_value = node.slice.value # type: ignore[attr-defined] - else: - slice_value = node.slice - - if isinstance(slice_value, Tuple): - if self._memo.name_matches(node.value, *annotated_names): - # Only treat the first argument to typing.Annotated as a potential - # forward reference - items = cast( - typing.List[expr], - [self.visit(slice_value.elts[0])] + slice_value.elts[1:], - ) - else: - items = cast( - typing.List[expr], - [self.visit(item) for item in slice_value.elts], - ) - - # If this is a Union and any of the items is Any, erase the entire - # annotation - if self._memo.name_matches(node.value, "typing.Union") and any( - item is None - or ( - isinstance(item, expr) - and self._memo.name_matches(item, *anytype_names) - ) - for item in items - ): - return None - - # If all items in the subscript were Any, erase the subscript entirely - if all(item is None for item in items): - return node.value - - for index, item in enumerate(items): - if item is None: - items[index] = self.transformer._get_import("typing", "Any") - - slice_value.elts = items - else: - self.generic_visit(node) - - # If the transformer erased the slice entirely, just return the node - # value without the subscript (unless it's Optional, in which case erase - # the node entirely - if self._memo.name_matches( - node.value, "typing.Optional" - ) and not hasattr(node, "slice"): - return None - if sys.version_info >= (3, 9) and not hasattr(node, "slice"): - return node.value - elif sys.version_info < (3, 9) and not hasattr(node.slice, "value"): - return node.value - - return node - - def visit_Name(self, node: Name) -> Any: - if self._memo.is_ignored_name(node): - return None - - if sys.version_info < (3, 9): - for typename, substitute in self.type_substitutions.items(): - if self._memo.name_matches(node, typename): - new_node = self.transformer._get_import(*substitute) - return copy_location(new_node, node) - - return node - - def visit_Call(self, node: Call) -> Any: - # Don't recurse into calls - return node - - def visit_Constant(self, node: Constant) -> Any: - if isinstance(node.value, str): - expression = ast.parse(node.value, mode="eval") - new_node = self.visit(expression) - if new_node: - return copy_location(new_node.body, node) - else: - return None - - return node - - -class TypeguardTransformer(NodeTransformer): - def __init__( - self, target_path: Sequence[str] | None = None, target_lineno: int | None = None - ) -> None: - self._target_path = tuple(target_path) if target_path else None - self._memo = self._module_memo = TransformMemo(None, None, ()) - self.names_used_in_annotations: set[str] = set() - self.target_node: FunctionDef | AsyncFunctionDef | None = None - self.target_lineno = target_lineno - - def generic_visit(self, node: AST) -> AST: - has_non_empty_body_initially = bool(getattr(node, "body", None)) - initial_type = type(node) - - node = super().generic_visit(node) - - if ( - type(node) is initial_type - and has_non_empty_body_initially - and hasattr(node, "body") - and not node.body - ): - # If we have still the same node type after transformation - # but we've optimised it's body away, we add a `pass` statement. - node.body = [Pass()] - - return node - - @contextmanager - def _use_memo( - self, node: ClassDef | FunctionDef | AsyncFunctionDef - ) -> Generator[None, Any, None]: - new_memo = TransformMemo(node, self._memo, self._memo.path + (node.name,)) - old_memo = self._memo - self._memo = new_memo - - if isinstance(node, (FunctionDef, AsyncFunctionDef)): - new_memo.should_instrument = ( - self._target_path is None or new_memo.path == self._target_path - ) - if new_memo.should_instrument: - # Check if the function is a generator function - detector = GeneratorDetector() - detector.visit(node) - - # Extract yield, send and return types where possible from a subscripted - # annotation like Generator[int, str, bool] - return_annotation = deepcopy(node.returns) - if detector.contains_yields and new_memo.name_matches( - return_annotation, *generator_names - ): - if isinstance(return_annotation, Subscript): - annotation_slice = return_annotation.slice - - # Python < 3.9 - if isinstance(annotation_slice, Index): - annotation_slice = ( - annotation_slice.value # type: ignore[attr-defined] - ) - - if isinstance(annotation_slice, Tuple): - items = annotation_slice.elts - else: - items = [annotation_slice] - - if len(items) > 0: - new_memo.yield_annotation = self._convert_annotation( - items[0] - ) - - if len(items) > 1: - new_memo.send_annotation = self._convert_annotation( - items[1] - ) - - if len(items) > 2: - new_memo.return_annotation = self._convert_annotation( - items[2] - ) - else: - new_memo.return_annotation = self._convert_annotation( - return_annotation - ) - - if isinstance(node, AsyncFunctionDef): - new_memo.is_async = True - - yield - self._memo = old_memo - - def _get_import(self, module: str, name: str) -> Name: - memo = self._memo if self._target_path else self._module_memo - return memo.get_import(module, name) - - @overload - def _convert_annotation(self, annotation: None) -> None: ... - - @overload - def _convert_annotation(self, annotation: expr) -> expr: ... - - def _convert_annotation(self, annotation: expr | None) -> expr | None: - if annotation is None: - return None - - # Convert PEP 604 unions (x | y) and generic built-in collections where - # necessary, and undo forward references - new_annotation = cast(expr, AnnotationTransformer(self).visit(annotation)) - if isinstance(new_annotation, expr): - new_annotation = ast.copy_location(new_annotation, annotation) - - # Store names used in the annotation - names = {node.id for node in walk(new_annotation) if isinstance(node, Name)} - self.names_used_in_annotations.update(names) - - return new_annotation - - def visit_Name(self, node: Name) -> Name: - self._memo.local_names.add(node.id) - return node - - def visit_Module(self, node: Module) -> Module: - self._module_memo = self._memo = TransformMemo(node, None, ()) - self.generic_visit(node) - self._module_memo.insert_imports(node) - - fix_missing_locations(node) - return node - - def visit_Import(self, node: Import) -> Import: - for name in node.names: - self._memo.local_names.add(name.asname or name.name) - self._memo.imported_names[name.asname or name.name] = name.name - - return node - - def visit_ImportFrom(self, node: ImportFrom) -> ImportFrom: - for name in node.names: - if name.name != "*": - alias = name.asname or name.name - self._memo.local_names.add(alias) - self._memo.imported_names[alias] = f"{node.module}.{name.name}" - - return node - - def visit_ClassDef(self, node: ClassDef) -> ClassDef | None: - self._memo.local_names.add(node.name) - - # Eliminate top level classes not belonging to the target path - if ( - self._target_path is not None - and not self._memo.path - and node.name != self._target_path[0] - ): - return None - - with self._use_memo(node): - for decorator in node.decorator_list.copy(): - if self._memo.name_matches(decorator, "typeguard.typechecked"): - # Remove the decorator to prevent duplicate instrumentation - node.decorator_list.remove(decorator) - - # Store any configuration overrides - if isinstance(decorator, Call) and decorator.keywords: - self._memo.configuration_overrides.update( - {kw.arg: kw.value for kw in decorator.keywords if kw.arg} - ) - - self.generic_visit(node) - return node - - def visit_FunctionDef( - self, node: FunctionDef | AsyncFunctionDef - ) -> FunctionDef | AsyncFunctionDef | None: - """ - Injects type checks for function arguments, and for a return of None if the - function is annotated to return something else than Any or None, and the body - ends without an explicit "return". - - """ - self._memo.local_names.add(node.name) - - # Eliminate top level functions not belonging to the target path - if ( - self._target_path is not None - and not self._memo.path - and node.name != self._target_path[0] - ): - return None - - # Skip instrumentation if we're instrumenting the whole module and the function - # contains either @no_type_check or @typeguard_ignore - if self._target_path is None: - for decorator in node.decorator_list: - if self._memo.name_matches(decorator, *ignore_decorators): - return node - - with self._use_memo(node): - arg_annotations: dict[str, Any] = {} - if self._target_path is None or self._memo.path == self._target_path: - # Find line number we're supposed to match against - if node.decorator_list: - first_lineno = node.decorator_list[0].lineno - else: - first_lineno = node.lineno - - for decorator in node.decorator_list.copy(): - if self._memo.name_matches(decorator, "typing.overload"): - # Remove overloads entirely - return None - elif self._memo.name_matches(decorator, "typeguard.typechecked"): - # Remove the decorator to prevent duplicate instrumentation - node.decorator_list.remove(decorator) - - # Store any configuration overrides - if isinstance(decorator, Call) and decorator.keywords: - self._memo.configuration_overrides = { - kw.arg: kw.value for kw in decorator.keywords if kw.arg - } - - if self.target_lineno == first_lineno: - assert self.target_node is None - self.target_node = node - if node.decorator_list: - self.target_lineno = node.decorator_list[0].lineno - else: - self.target_lineno = node.lineno - - all_args = node.args.args + node.args.kwonlyargs + node.args.posonlyargs - - # Ensure that any type shadowed by the positional or keyword-only - # argument names are ignored in this function - for arg in all_args: - self._memo.ignored_names.add(arg.arg) - - # Ensure that any type shadowed by the variable positional argument name - # (e.g. "args" in *args) is ignored this function - if node.args.vararg: - self._memo.ignored_names.add(node.args.vararg.arg) - - # Ensure that any type shadowed by the variable keywrod argument name - # (e.g. "kwargs" in *kwargs) is ignored this function - if node.args.kwarg: - self._memo.ignored_names.add(node.args.kwarg.arg) - - for arg in all_args: - annotation = self._convert_annotation(deepcopy(arg.annotation)) - if annotation: - arg_annotations[arg.arg] = annotation - - if node.args.vararg: - annotation_ = self._convert_annotation(node.args.vararg.annotation) - if annotation_: - if sys.version_info >= (3, 9): - container = Name("tuple", ctx=Load()) - else: - container = self._get_import("typing", "Tuple") - - subscript_slice: Tuple | Index = Tuple( - [ - annotation_, - Constant(Ellipsis), - ], - ctx=Load(), - ) - if sys.version_info < (3, 9): - subscript_slice = Index(subscript_slice, ctx=Load()) - - arg_annotations[node.args.vararg.arg] = Subscript( - container, subscript_slice, ctx=Load() - ) - - if node.args.kwarg: - annotation_ = self._convert_annotation(node.args.kwarg.annotation) - if annotation_: - if sys.version_info >= (3, 9): - container = Name("dict", ctx=Load()) - else: - container = self._get_import("typing", "Dict") - - subscript_slice = Tuple( - [ - Name("str", ctx=Load()), - annotation_, - ], - ctx=Load(), - ) - if sys.version_info < (3, 9): - subscript_slice = Index(subscript_slice, ctx=Load()) - - arg_annotations[node.args.kwarg.arg] = Subscript( - container, subscript_slice, ctx=Load() - ) - - if arg_annotations: - self._memo.variable_annotations.update(arg_annotations) - - self.generic_visit(node) - - if arg_annotations: - annotations_dict = Dict( - keys=[Constant(key) for key in arg_annotations.keys()], - values=[ - Tuple([Name(key, ctx=Load()), annotation], ctx=Load()) - for key, annotation in arg_annotations.items() - ], - ) - func_name = self._get_import( - "typeguard._functions", "check_argument_types" - ) - args = [ - self._memo.joined_path, - annotations_dict, - self._memo.get_memo_name(), - ] - node.body.insert( - self._memo.code_inject_index, Expr(Call(func_name, args, [])) - ) - - # Add a checked "return None" to the end if there's no explicit return - # Skip if the return annotation is None or Any - if ( - self._memo.return_annotation - and (not self._memo.is_async or not self._memo.has_yield_expressions) - and not isinstance(node.body[-1], Return) - and ( - not isinstance(self._memo.return_annotation, Constant) - or self._memo.return_annotation.value is not None - ) - ): - func_name = self._get_import( - "typeguard._functions", "check_return_type" - ) - return_node = Return( - Call( - func_name, - [ - self._memo.joined_path, - Constant(None), - self._memo.return_annotation, - self._memo.get_memo_name(), - ], - [], - ) - ) - - # Replace a placeholder "pass" at the end - if isinstance(node.body[-1], Pass): - copy_location(return_node, node.body[-1]) - del node.body[-1] - - node.body.append(return_node) - - # Insert code to create the call memo, if it was ever needed for this - # function - if self._memo.memo_var_name: - memo_kwargs: dict[str, Any] = {} - if self._memo.parent and isinstance(self._memo.parent.node, ClassDef): - for decorator in node.decorator_list: - if ( - isinstance(decorator, Name) - and decorator.id == "staticmethod" - ): - break - elif ( - isinstance(decorator, Name) - and decorator.id == "classmethod" - ): - memo_kwargs["self_type"] = Name( - id=node.args.args[0].arg, ctx=Load() - ) - break - else: - if node.args.args: - if node.name == "__new__": - memo_kwargs["self_type"] = Name( - id=node.args.args[0].arg, ctx=Load() - ) - else: - memo_kwargs["self_type"] = Attribute( - Name(id=node.args.args[0].arg, ctx=Load()), - "__class__", - ctx=Load(), - ) - - # Construct the function reference - # Nested functions get special treatment: the function name is added - # to free variables (and the closure of the resulting function) - names: list[str] = [node.name] - memo = self._memo.parent - while memo: - if isinstance(memo.node, (FunctionDef, AsyncFunctionDef)): - # This is a nested function. Use the function name as-is. - del names[:-1] - break - elif not isinstance(memo.node, ClassDef): - break - - names.insert(0, memo.node.name) - memo = memo.parent - - config_keywords = self._memo.get_config_keywords() - if config_keywords: - memo_kwargs["config"] = Call( - self._get_import("dataclasses", "replace"), - [self._get_import("typeguard._config", "global_config")], - config_keywords, - ) - - self._memo.memo_var_name.id = self._memo.get_unused_name("memo") - memo_store_name = Name(id=self._memo.memo_var_name.id, ctx=Store()) - globals_call = Call(Name(id="globals", ctx=Load()), [], []) - locals_call = Call(Name(id="locals", ctx=Load()), [], []) - memo_expr = Call( - self._get_import("typeguard", "TypeCheckMemo"), - [globals_call, locals_call], - [keyword(key, value) for key, value in memo_kwargs.items()], - ) - node.body.insert( - self._memo.code_inject_index, - Assign([memo_store_name], memo_expr), - ) - - self._memo.insert_imports(node) - - # Special case the __new__() method to create a local alias from the - # class name to the first argument (usually "cls") - if ( - isinstance(node, FunctionDef) - and node.args - and self._memo.parent is not None - and isinstance(self._memo.parent.node, ClassDef) - and node.name == "__new__" - ): - first_args_expr = Name(node.args.args[0].arg, ctx=Load()) - cls_name = Name(self._memo.parent.node.name, ctx=Store()) - node.body.insert( - self._memo.code_inject_index, - Assign([cls_name], first_args_expr), - ) - - # Rmove any placeholder "pass" at the end - if isinstance(node.body[-1], Pass): - del node.body[-1] - - return node - - def visit_AsyncFunctionDef( - self, node: AsyncFunctionDef - ) -> FunctionDef | AsyncFunctionDef | None: - return self.visit_FunctionDef(node) - - def visit_Return(self, node: Return) -> Return: - """This injects type checks into "return" statements.""" - self.generic_visit(node) - if ( - self._memo.return_annotation - and self._memo.should_instrument - and not self._memo.is_ignored_name(self._memo.return_annotation) - ): - func_name = self._get_import("typeguard._functions", "check_return_type") - old_node = node - retval = old_node.value or Constant(None) - node = Return( - Call( - func_name, - [ - self._memo.joined_path, - retval, - self._memo.return_annotation, - self._memo.get_memo_name(), - ], - [], - ) - ) - copy_location(node, old_node) - - return node - - def visit_Yield(self, node: Yield) -> Yield | Call: - """ - This injects type checks into "yield" expressions, checking both the yielded - value and the value sent back to the generator, when appropriate. - - """ - self._memo.has_yield_expressions = True - self.generic_visit(node) - - if ( - self._memo.yield_annotation - and self._memo.should_instrument - and not self._memo.is_ignored_name(self._memo.yield_annotation) - ): - func_name = self._get_import("typeguard._functions", "check_yield_type") - yieldval = node.value or Constant(None) - node.value = Call( - func_name, - [ - self._memo.joined_path, - yieldval, - self._memo.yield_annotation, - self._memo.get_memo_name(), - ], - [], - ) - - if ( - self._memo.send_annotation - and self._memo.should_instrument - and not self._memo.is_ignored_name(self._memo.send_annotation) - ): - func_name = self._get_import("typeguard._functions", "check_send_type") - old_node = node - call_node = Call( - func_name, - [ - self._memo.joined_path, - old_node, - self._memo.send_annotation, - self._memo.get_memo_name(), - ], - [], - ) - copy_location(call_node, old_node) - return call_node - - return node - - def visit_AnnAssign(self, node: AnnAssign) -> Any: - """ - This injects a type check into a local variable annotation-assignment within a - function body. - - """ - self.generic_visit(node) - - if ( - isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) - and node.annotation - and isinstance(node.target, Name) - ): - self._memo.ignored_names.add(node.target.id) - annotation = self._convert_annotation(deepcopy(node.annotation)) - if annotation: - self._memo.variable_annotations[node.target.id] = annotation - if node.value: - func_name = self._get_import( - "typeguard._functions", "check_variable_assignment" - ) - node.value = Call( - func_name, - [ - node.value, - Constant(node.target.id), - annotation, - self._memo.get_memo_name(), - ], - [], - ) - - return node - - def visit_Assign(self, node: Assign) -> Any: - """ - This injects a type check into a local variable assignment within a function - body. The variable must have been annotated earlier in the function body. - - """ - self.generic_visit(node) - - # Only instrument function-local assignments - if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)): - targets: list[dict[Constant, expr | None]] = [] - check_required = False - for target in node.targets: - elts: Sequence[expr] - if isinstance(target, Name): - elts = [target] - elif isinstance(target, Tuple): - elts = target.elts - else: - continue - - annotations_: dict[Constant, expr | None] = {} - for exp in elts: - prefix = "" - if isinstance(exp, Starred): - exp = exp.value - prefix = "*" - - if isinstance(exp, Name): - self._memo.ignored_names.add(exp.id) - name = prefix + exp.id - annotation = self._memo.variable_annotations.get(exp.id) - if annotation: - annotations_[Constant(name)] = annotation - check_required = True - else: - annotations_[Constant(name)] = None - - targets.append(annotations_) - - if check_required: - # Replace missing annotations with typing.Any - for item in targets: - for key, expression in item.items(): - if expression is None: - item[key] = self._get_import("typing", "Any") - - if len(targets) == 1 and len(targets[0]) == 1: - func_name = self._get_import( - "typeguard._functions", "check_variable_assignment" - ) - target_varname = next(iter(targets[0])) - node.value = Call( - func_name, - [ - node.value, - target_varname, - targets[0][target_varname], - self._memo.get_memo_name(), - ], - [], - ) - elif targets: - func_name = self._get_import( - "typeguard._functions", "check_multi_variable_assignment" - ) - targets_arg = List( - [ - Dict(keys=list(target), values=list(target.values())) - for target in targets - ], - ctx=Load(), - ) - node.value = Call( - func_name, - [node.value, targets_arg, self._memo.get_memo_name()], - [], - ) - - return node - - def visit_NamedExpr(self, node: NamedExpr) -> Any: - """This injects a type check into an assignment expression (a := foo()).""" - self.generic_visit(node) - - # Only instrument function-local assignments - if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) and isinstance( - node.target, Name - ): - self._memo.ignored_names.add(node.target.id) - - # Bail out if no matching annotation is found - annotation = self._memo.variable_annotations.get(node.target.id) - if annotation is None: - return node - - func_name = self._get_import( - "typeguard._functions", "check_variable_assignment" - ) - node.value = Call( - func_name, - [ - node.value, - Constant(node.target.id), - annotation, - self._memo.get_memo_name(), - ], - [], - ) - - return node - - def visit_AugAssign(self, node: AugAssign) -> Any: - """ - This injects a type check into an augmented assignment expression (a += 1). - - """ - self.generic_visit(node) - - # Only instrument function-local assignments - if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) and isinstance( - node.target, Name - ): - # Bail out if no matching annotation is found - annotation = self._memo.variable_annotations.get(node.target.id) - if annotation is None: - return node - - # Bail out if the operator is not found (newer Python version?) - try: - operator_func_name = aug_assign_functions[node.op.__class__] - except KeyError: - return node - - operator_func = self._get_import("operator", operator_func_name) - operator_call = Call( - operator_func, [Name(node.target.id, ctx=Load()), node.value], [] - ) - check_call = Call( - self._get_import("typeguard._functions", "check_variable_assignment"), - [ - operator_call, - Constant(node.target.id), - annotation, - self._memo.get_memo_name(), - ], - [], - ) - return Assign(targets=[node.target], value=check_call) - - return node - - def visit_If(self, node: If) -> Any: - """ - This blocks names from being collected from a module-level - "if typing.TYPE_CHECKING:" block, so that they won't be type checked. - - """ - self.generic_visit(node) - - if ( - self._memo is self._module_memo - and isinstance(node.test, Name) - and self._memo.name_matches(node.test, "typing.TYPE_CHECKING") - ): - collector = NameCollector() - collector.visit(node) - self._memo.ignored_names.update(collector.names) - - return node diff --git a/setuptools/_vendor/typeguard/_union_transformer.py b/setuptools/_vendor/typeguard/_union_transformer.py deleted file mode 100644 index 19617e6af5..0000000000 --- a/setuptools/_vendor/typeguard/_union_transformer.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Transforms lazily evaluated PEP 604 unions into typing.Unions, for compatibility with -Python versions older than 3.10. -""" - -from __future__ import annotations - -from ast import ( - BinOp, - BitOr, - Index, - Load, - Name, - NodeTransformer, - Subscript, - fix_missing_locations, - parse, -) -from ast import Tuple as ASTTuple -from types import CodeType -from typing import Any, Dict, FrozenSet, List, Set, Tuple, Union - -type_substitutions = { - "dict": Dict, - "list": List, - "tuple": Tuple, - "set": Set, - "frozenset": FrozenSet, - "Union": Union, -} - - -class UnionTransformer(NodeTransformer): - def __init__(self, union_name: Name | None = None): - self.union_name = union_name or Name(id="Union", ctx=Load()) - - def visit_BinOp(self, node: BinOp) -> Any: - self.generic_visit(node) - if isinstance(node.op, BitOr): - return Subscript( - value=self.union_name, - slice=Index( - ASTTuple(elts=[node.left, node.right], ctx=Load()), ctx=Load() - ), - ctx=Load(), - ) - - return node - - -def compile_type_hint(hint: str) -> CodeType: - parsed = parse(hint, "", "eval") - UnionTransformer().visit(parsed) - fix_missing_locations(parsed) - return compile(parsed, "", "eval", flags=0) diff --git a/setuptools/_vendor/typeguard/_utils.py b/setuptools/_vendor/typeguard/_utils.py deleted file mode 100644 index 9bcc8417f8..0000000000 --- a/setuptools/_vendor/typeguard/_utils.py +++ /dev/null @@ -1,173 +0,0 @@ -from __future__ import annotations - -import inspect -import sys -from importlib import import_module -from inspect import currentframe -from types import CodeType, FrameType, FunctionType -from typing import TYPE_CHECKING, Any, Callable, ForwardRef, Union, cast, final -from weakref import WeakValueDictionary - -if TYPE_CHECKING: - from ._memo import TypeCheckMemo - -if sys.version_info >= (3, 13): - from typing import get_args, get_origin - - def evaluate_forwardref(forwardref: ForwardRef, memo: TypeCheckMemo) -> Any: - return forwardref._evaluate( - memo.globals, memo.locals, type_params=(), recursive_guard=frozenset() - ) - -elif sys.version_info >= (3, 10): - from typing import get_args, get_origin - - def evaluate_forwardref(forwardref: ForwardRef, memo: TypeCheckMemo) -> Any: - return forwardref._evaluate( - memo.globals, memo.locals, recursive_guard=frozenset() - ) - -else: - from typing_extensions import get_args, get_origin - - evaluate_extra_args: tuple[frozenset[Any], ...] = ( - (frozenset(),) if sys.version_info >= (3, 9) else () - ) - - def evaluate_forwardref(forwardref: ForwardRef, memo: TypeCheckMemo) -> Any: - from ._union_transformer import compile_type_hint, type_substitutions - - if not forwardref.__forward_evaluated__: - forwardref.__forward_code__ = compile_type_hint(forwardref.__forward_arg__) - - try: - return forwardref._evaluate(memo.globals, memo.locals, *evaluate_extra_args) - except NameError: - if sys.version_info < (3, 10): - # Try again, with the type substitutions (list -> List etc.) in place - new_globals = memo.globals.copy() - new_globals.setdefault("Union", Union) - if sys.version_info < (3, 9): - new_globals.update(type_substitutions) - - return forwardref._evaluate( - new_globals, memo.locals or new_globals, *evaluate_extra_args - ) - - raise - - -_functions_map: WeakValueDictionary[CodeType, FunctionType] = WeakValueDictionary() - - -def get_type_name(type_: Any) -> str: - name: str - for attrname in "__name__", "_name", "__forward_arg__": - candidate = getattr(type_, attrname, None) - if isinstance(candidate, str): - name = candidate - break - else: - origin = get_origin(type_) - candidate = getattr(origin, "_name", None) - if candidate is None: - candidate = type_.__class__.__name__.strip("_") - - if isinstance(candidate, str): - name = candidate - else: - return "(unknown)" - - args = get_args(type_) - if args: - if name == "Literal": - formatted_args = ", ".join(repr(arg) for arg in args) - else: - formatted_args = ", ".join(get_type_name(arg) for arg in args) - - name += f"[{formatted_args}]" - - module = getattr(type_, "__module__", None) - if module and module not in (None, "typing", "typing_extensions", "builtins"): - name = module + "." + name - - return name - - -def qualified_name(obj: Any, *, add_class_prefix: bool = False) -> str: - """ - Return the qualified name (e.g. package.module.Type) for the given object. - - Builtins and types from the :mod:`typing` package get special treatment by having - the module name stripped from the generated name. - - """ - if obj is None: - return "None" - elif inspect.isclass(obj): - prefix = "class " if add_class_prefix else "" - type_ = obj - else: - prefix = "" - type_ = type(obj) - - module = type_.__module__ - qualname = type_.__qualname__ - name = qualname if module in ("typing", "builtins") else f"{module}.{qualname}" - return prefix + name - - -def function_name(func: Callable[..., Any]) -> str: - """ - Return the qualified name of the given function. - - Builtins and types from the :mod:`typing` package get special treatment by having - the module name stripped from the generated name. - - """ - # For partial functions and objects with __call__ defined, __qualname__ does not - # exist - module = getattr(func, "__module__", "") - qualname = (module + ".") if module not in ("builtins", "") else "" - return qualname + getattr(func, "__qualname__", repr(func)) - - -def resolve_reference(reference: str) -> Any: - modulename, varname = reference.partition(":")[::2] - if not modulename or not varname: - raise ValueError(f"{reference!r} is not a module:varname reference") - - obj = import_module(modulename) - for attr in varname.split("."): - obj = getattr(obj, attr) - - return obj - - -def is_method_of(obj: object, cls: type) -> bool: - return ( - inspect.isfunction(obj) - and obj.__module__ == cls.__module__ - and obj.__qualname__.startswith(cls.__qualname__ + ".") - ) - - -def get_stacklevel() -> int: - level = 1 - frame = cast(FrameType, currentframe()).f_back - while frame and frame.f_globals.get("__name__", "").startswith("typeguard."): - level += 1 - frame = frame.f_back - - return level - - -@final -class Unset: - __slots__ = () - - def __repr__(self) -> str: - return "" - - -unset = Unset() diff --git a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/INSTALLER b/setuptools/_vendor/typing_extensions-4.12.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/LICENSE b/setuptools/_vendor/typing_extensions-4.12.2.dist-info/LICENSE deleted file mode 100644 index f26bcf4d2d..0000000000 --- a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/LICENSE +++ /dev/null @@ -1,279 +0,0 @@ -A. HISTORY OF THE SOFTWARE -========================== - -Python was created in the early 1990s by Guido van Rossum at Stichting -Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands -as a successor of a language called ABC. Guido remains Python's -principal author, although it includes many contributions from others. - -In 1995, Guido continued his work on Python at the Corporation for -National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) -in Reston, Virginia where he released several versions of the -software. - -In May 2000, Guido and the Python core development team moved to -BeOpen.com to form the BeOpen PythonLabs team. In October of the same -year, the PythonLabs team moved to Digital Creations, which became -Zope Corporation. In 2001, the Python Software Foundation (PSF, see -https://www.python.org/psf/) was formed, a non-profit organization -created specifically to own Python-related Intellectual Property. -Zope Corporation was a sponsoring member of the PSF. - -All Python releases are Open Source (see https://opensource.org for -the Open Source Definition). Historically, most, but not all, Python -releases have also been GPL-compatible; the table below summarizes -the various releases. - - Release Derived Year Owner GPL- - from compatible? (1) - - 0.9.0 thru 1.2 1991-1995 CWI yes - 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes - 1.6 1.5.2 2000 CNRI no - 2.0 1.6 2000 BeOpen.com no - 1.6.1 1.6 2001 CNRI yes (2) - 2.1 2.0+1.6.1 2001 PSF no - 2.0.1 2.0+1.6.1 2001 PSF yes - 2.1.1 2.1+2.0.1 2001 PSF yes - 2.1.2 2.1.1 2002 PSF yes - 2.1.3 2.1.2 2002 PSF yes - 2.2 and above 2.1.1 2001-now PSF yes - -Footnotes: - -(1) GPL-compatible doesn't mean that we're distributing Python under - the GPL. All Python licenses, unlike the GPL, let you distribute - a modified version without making your changes open source. The - GPL-compatible licenses make it possible to combine Python with - other software that is released under the GPL; the others don't. - -(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, - because its license has a choice of law clause. According to - CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 - is "not incompatible" with the GPL. - -Thanks to the many outside volunteers who have worked under Guido's -direction to make these releases possible. - - -B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON -=============================================================== - -Python software and documentation are licensed under the -Python Software Foundation License Version 2. - -Starting with Python 3.8.6, examples, recipes, and other code in -the documentation are dual licensed under the PSF License Version 2 -and the Zero-Clause BSD license. - -Some software incorporated into Python is under different licenses. -The licenses are listed with code falling under that license. - - -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF hereby -grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -analyze, test, perform and/or display publicly, prepare derivative works, -distribute, and otherwise use Python alone or in any derivative version, -provided, however, that PSF's License Agreement and PSF's notice of copyright, -i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; -All Rights Reserved" are retained in Python alone or in any derivative version -prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 -------------------------------------------- - -BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 - -1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an -office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the -Individual or Organization ("Licensee") accessing and otherwise using -this software in source or binary form and its associated -documentation ("the Software"). - -2. Subject to the terms and conditions of this BeOpen Python License -Agreement, BeOpen hereby grants Licensee a non-exclusive, -royalty-free, world-wide license to reproduce, analyze, test, perform -and/or display publicly, prepare derivative works, distribute, and -otherwise use the Software alone or in any derivative version, -provided, however, that the BeOpen Python License is retained in the -Software, alone or in any derivative version prepared by Licensee. - -3. BeOpen is making the Software available to Licensee on an "AS IS" -basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE -SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS -AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY -DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -5. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -6. This License Agreement shall be governed by and interpreted in all -respects by the law of the State of California, excluding conflict of -law provisions. Nothing in this License Agreement shall be deemed to -create any relationship of agency, partnership, or joint venture -between BeOpen and Licensee. This License Agreement does not grant -permission to use BeOpen trademarks or trade names in a trademark -sense to endorse or promote products or services of Licensee, or any -third party. As an exception, the "BeOpen Python" logos available at -http://www.pythonlabs.com/logos.html may be used according to the -permissions granted on that web page. - -7. By copying, installing or otherwise using the software, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. - - -CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 ---------------------------------------- - -1. This LICENSE AGREEMENT is between the Corporation for National -Research Initiatives, having an office at 1895 Preston White Drive, -Reston, VA 20191 ("CNRI"), and the Individual or Organization -("Licensee") accessing and otherwise using Python 1.6.1 software in -source or binary form and its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, CNRI -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python 1.6.1 -alone or in any derivative version, provided, however, that CNRI's -License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) -1995-2001 Corporation for National Research Initiatives; All Rights -Reserved" are retained in Python 1.6.1 alone or in any derivative -version prepared by Licensee. Alternately, in lieu of CNRI's License -Agreement, Licensee may substitute the following text (omitting the -quotes): "Python 1.6.1 is made available subject to the terms and -conditions in CNRI's License Agreement. This Agreement together with -Python 1.6.1 may be located on the internet using the following -unique, persistent identifier (known as a handle): 1895.22/1013. This -Agreement may also be obtained from a proxy server on the internet -using the following URL: http://hdl.handle.net/1895.22/1013". - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python 1.6.1 or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python 1.6.1. - -4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" -basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. This License Agreement shall be governed by the federal -intellectual property law of the United States, including without -limitation the federal copyright law, and, to the extent such -U.S. federal law does not apply, by the law of the Commonwealth of -Virginia, excluding Virginia's conflict of law provisions. -Notwithstanding the foregoing, with regard to derivative works based -on Python 1.6.1 that incorporate non-separable material that was -previously distributed under the GNU General Public License (GPL), the -law of the Commonwealth of Virginia shall govern this License -Agreement only as to issues arising under or with respect to -Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this -License Agreement shall be deemed to create any relationship of -agency, partnership, or joint venture between CNRI and Licensee. This -License Agreement does not grant permission to use CNRI trademarks or -trade name in a trademark sense to endorse or promote products or -services of Licensee, or any third party. - -8. By clicking on the "ACCEPT" button where indicated, or by copying, -installing or otherwise using Python 1.6.1, Licensee agrees to be -bound by the terms and conditions of this License Agreement. - - ACCEPT - - -CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 --------------------------------------------------- - -Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, -The Netherlands. All rights reserved. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appear in all copies and that -both that copyright notice and this permission notice appear in -supporting documentation, and that the name of Stichting Mathematisch -Centrum or CWI not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - -STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO -THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE -FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION ----------------------------------------------------------------------- - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. diff --git a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/METADATA b/setuptools/_vendor/typing_extensions-4.12.2.dist-info/METADATA deleted file mode 100644 index f15e2b3877..0000000000 --- a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/METADATA +++ /dev/null @@ -1,67 +0,0 @@ -Metadata-Version: 2.1 -Name: typing_extensions -Version: 4.12.2 -Summary: Backported and Experimental Type Hints for Python 3.8+ -Keywords: annotations,backport,checker,checking,function,hinting,hints,type,typechecking,typehinting,typehints,typing -Author-email: "Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Michael Lee" -Requires-Python: >=3.8 -Description-Content-Type: text/markdown -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: Python Software Foundation License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: 3.10 -Classifier: Programming Language :: Python :: 3.11 -Classifier: Programming Language :: Python :: 3.12 -Classifier: Programming Language :: Python :: 3.13 -Classifier: Topic :: Software Development -Project-URL: Bug Tracker, https://github.com/python/typing_extensions/issues -Project-URL: Changes, https://github.com/python/typing_extensions/blob/main/CHANGELOG.md -Project-URL: Documentation, https://typing-extensions.readthedocs.io/ -Project-URL: Home, https://github.com/python/typing_extensions -Project-URL: Q & A, https://github.com/python/typing/discussions -Project-URL: Repository, https://github.com/python/typing_extensions - -# Typing Extensions - -[![Chat at https://gitter.im/python/typing](https://badges.gitter.im/python/typing.svg)](https://gitter.im/python/typing) - -[Documentation](https://typing-extensions.readthedocs.io/en/latest/#) – -[PyPI](https://pypi.org/project/typing-extensions/) - -## Overview - -The `typing_extensions` module serves two related purposes: - -- Enable use of new type system features on older Python versions. For example, - `typing.TypeGuard` is new in Python 3.10, but `typing_extensions` allows - users on previous Python versions to use it too. -- Enable experimentation with new type system PEPs before they are accepted and - added to the `typing` module. - -`typing_extensions` is treated specially by static type checkers such as -mypy and pyright. Objects defined in `typing_extensions` are treated the same -way as equivalent forms in `typing`. - -`typing_extensions` uses -[Semantic Versioning](https://semver.org/). The -major version will be incremented only for backwards-incompatible changes. -Therefore, it's safe to depend -on `typing_extensions` like this: `typing_extensions >=x.y, <(x+1)`, -where `x.y` is the first version that includes all features you need. - -## Included items - -See [the documentation](https://typing-extensions.readthedocs.io/en/latest/#) for a -complete listing of module contents. - -## Contributing - -See [CONTRIBUTING.md](https://github.com/python/typing_extensions/blob/main/CONTRIBUTING.md) -for how to contribute to `typing_extensions`. - diff --git a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/RECORD b/setuptools/_vendor/typing_extensions-4.12.2.dist-info/RECORD deleted file mode 100644 index bc7b45334d..0000000000 --- a/setuptools/_vendor/typing_extensions-4.12.2.dist-info/RECORD +++ /dev/null @@ -1,7 +0,0 @@ -__pycache__/typing_extensions.cpython-312.pyc,, -typing_extensions-4.12.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -typing_extensions-4.12.2.dist-info/LICENSE,sha256=Oy-B_iHRgcSZxZolbI4ZaEVdZonSaaqFNzv7avQdo78,13936 -typing_extensions-4.12.2.dist-info/METADATA,sha256=BeUQIa8cnYbrjWx-N8TOznM9UGW5Gm2DicVpDtRA8W0,3018 -typing_extensions-4.12.2.dist-info/RECORD,, -typing_extensions-4.12.2.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 -typing_extensions.py,sha256=gwekpyG9DVG3lxWKX4ni8u7nk3We5slG98mA9F3DJQw,134451 diff --git a/setuptools/_vendor/typing_extensions.py b/setuptools/_vendor/typing_extensions.py deleted file mode 100644 index dec429ca87..0000000000 --- a/setuptools/_vendor/typing_extensions.py +++ /dev/null @@ -1,3641 +0,0 @@ -import abc -import collections -import collections.abc -import contextlib -import functools -import inspect -import operator -import sys -import types as _types -import typing -import warnings - -__all__ = [ - # Super-special typing primitives. - 'Any', - 'ClassVar', - 'Concatenate', - 'Final', - 'LiteralString', - 'ParamSpec', - 'ParamSpecArgs', - 'ParamSpecKwargs', - 'Self', - 'Type', - 'TypeVar', - 'TypeVarTuple', - 'Unpack', - - # ABCs (from collections.abc). - 'Awaitable', - 'AsyncIterator', - 'AsyncIterable', - 'Coroutine', - 'AsyncGenerator', - 'AsyncContextManager', - 'Buffer', - 'ChainMap', - - # Concrete collection types. - 'ContextManager', - 'Counter', - 'Deque', - 'DefaultDict', - 'NamedTuple', - 'OrderedDict', - 'TypedDict', - - # Structural checks, a.k.a. protocols. - 'SupportsAbs', - 'SupportsBytes', - 'SupportsComplex', - 'SupportsFloat', - 'SupportsIndex', - 'SupportsInt', - 'SupportsRound', - - # One-off things. - 'Annotated', - 'assert_never', - 'assert_type', - 'clear_overloads', - 'dataclass_transform', - 'deprecated', - 'Doc', - 'get_overloads', - 'final', - 'get_args', - 'get_origin', - 'get_original_bases', - 'get_protocol_members', - 'get_type_hints', - 'IntVar', - 'is_protocol', - 'is_typeddict', - 'Literal', - 'NewType', - 'overload', - 'override', - 'Protocol', - 'reveal_type', - 'runtime', - 'runtime_checkable', - 'Text', - 'TypeAlias', - 'TypeAliasType', - 'TypeGuard', - 'TypeIs', - 'TYPE_CHECKING', - 'Never', - 'NoReturn', - 'ReadOnly', - 'Required', - 'NotRequired', - - # Pure aliases, have always been in typing - 'AbstractSet', - 'AnyStr', - 'BinaryIO', - 'Callable', - 'Collection', - 'Container', - 'Dict', - 'ForwardRef', - 'FrozenSet', - 'Generator', - 'Generic', - 'Hashable', - 'IO', - 'ItemsView', - 'Iterable', - 'Iterator', - 'KeysView', - 'List', - 'Mapping', - 'MappingView', - 'Match', - 'MutableMapping', - 'MutableSequence', - 'MutableSet', - 'NoDefault', - 'Optional', - 'Pattern', - 'Reversible', - 'Sequence', - 'Set', - 'Sized', - 'TextIO', - 'Tuple', - 'Union', - 'ValuesView', - 'cast', - 'no_type_check', - 'no_type_check_decorator', -] - -# for backward compatibility -PEP_560 = True -GenericMeta = type -_PEP_696_IMPLEMENTED = sys.version_info >= (3, 13, 0, "beta") - -# The functions below are modified copies of typing internal helpers. -# They are needed by _ProtocolMeta and they provide support for PEP 646. - - -class _Sentinel: - def __repr__(self): - return "" - - -_marker = _Sentinel() - - -if sys.version_info >= (3, 10): - def _should_collect_from_parameters(t): - return isinstance( - t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) - ) -elif sys.version_info >= (3, 9): - def _should_collect_from_parameters(t): - return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) -else: - def _should_collect_from_parameters(t): - return isinstance(t, typing._GenericAlias) and not t._special - - -NoReturn = typing.NoReturn - -# Some unconstrained type variables. These are used by the container types. -# (These are not for export.) -T = typing.TypeVar('T') # Any type. -KT = typing.TypeVar('KT') # Key type. -VT = typing.TypeVar('VT') # Value type. -T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. -T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. - - -if sys.version_info >= (3, 11): - from typing import Any -else: - - class _AnyMeta(type): - def __instancecheck__(self, obj): - if self is Any: - raise TypeError("typing_extensions.Any cannot be used with isinstance()") - return super().__instancecheck__(obj) - - def __repr__(self): - if self is Any: - return "typing_extensions.Any" - return super().__repr__() - - class Any(metaclass=_AnyMeta): - """Special type indicating an unconstrained type. - - Any is compatible with every type. - - Any assumed to have all methods. - - All values assumed to be instances of Any. - Note that all the above statements are true from the point of view of - static type checkers. At runtime, Any should not be used with instance - checks. - """ - def __new__(cls, *args, **kwargs): - if cls is Any: - raise TypeError("Any cannot be instantiated") - return super().__new__(cls, *args, **kwargs) - - -ClassVar = typing.ClassVar - - -class _ExtensionsSpecialForm(typing._SpecialForm, _root=True): - def __repr__(self): - return 'typing_extensions.' + self._name - - -Final = typing.Final - -if sys.version_info >= (3, 11): - final = typing.final -else: - # @final exists in 3.8+, but we backport it for all versions - # before 3.11 to keep support for the __final__ attribute. - # See https://bugs.python.org/issue46342 - def final(f): - """This decorator can be used to indicate to type checkers that - the decorated method cannot be overridden, and decorated class - cannot be subclassed. For example: - - class Base: - @final - def done(self) -> None: - ... - class Sub(Base): - def done(self) -> None: # Error reported by type checker - ... - @final - class Leaf: - ... - class Other(Leaf): # Error reported by type checker - ... - - There is no runtime checking of these properties. The decorator - sets the ``__final__`` attribute to ``True`` on the decorated object - to allow runtime introspection. - """ - try: - f.__final__ = True - except (AttributeError, TypeError): - # Skip the attribute silently if it is not writable. - # AttributeError happens if the object has __slots__ or a - # read-only property, TypeError if it's a builtin class. - pass - return f - - -def IntVar(name): - return typing.TypeVar(name) - - -# A Literal bug was fixed in 3.11.0, 3.10.1 and 3.9.8 -if sys.version_info >= (3, 10, 1): - Literal = typing.Literal -else: - def _flatten_literal_params(parameters): - """An internal helper for Literal creation: flatten Literals among parameters""" - params = [] - for p in parameters: - if isinstance(p, _LiteralGenericAlias): - params.extend(p.__args__) - else: - params.append(p) - return tuple(params) - - def _value_and_type_iter(params): - for p in params: - yield p, type(p) - - class _LiteralGenericAlias(typing._GenericAlias, _root=True): - def __eq__(self, other): - if not isinstance(other, _LiteralGenericAlias): - return NotImplemented - these_args_deduped = set(_value_and_type_iter(self.__args__)) - other_args_deduped = set(_value_and_type_iter(other.__args__)) - return these_args_deduped == other_args_deduped - - def __hash__(self): - return hash(frozenset(_value_and_type_iter(self.__args__))) - - class _LiteralForm(_ExtensionsSpecialForm, _root=True): - def __init__(self, doc: str): - self._name = 'Literal' - self._doc = self.__doc__ = doc - - def __getitem__(self, parameters): - if not isinstance(parameters, tuple): - parameters = (parameters,) - - parameters = _flatten_literal_params(parameters) - - val_type_pairs = list(_value_and_type_iter(parameters)) - try: - deduped_pairs = set(val_type_pairs) - except TypeError: - # unhashable parameters - pass - else: - # similar logic to typing._deduplicate on Python 3.9+ - if len(deduped_pairs) < len(val_type_pairs): - new_parameters = [] - for pair in val_type_pairs: - if pair in deduped_pairs: - new_parameters.append(pair[0]) - deduped_pairs.remove(pair) - assert not deduped_pairs, deduped_pairs - parameters = tuple(new_parameters) - - return _LiteralGenericAlias(self, parameters) - - Literal = _LiteralForm(doc="""\ - A type that can be used to indicate to type checkers - that the corresponding value has a value literally equivalent - to the provided parameter. For example: - - var: Literal[4] = 4 - - The type checker understands that 'var' is literally equal to - the value 4 and no other value. - - Literal[...] cannot be subclassed. There is no runtime - checking verifying that the parameter is actually a value - instead of a type.""") - - -_overload_dummy = typing._overload_dummy - - -if hasattr(typing, "get_overloads"): # 3.11+ - overload = typing.overload - get_overloads = typing.get_overloads - clear_overloads = typing.clear_overloads -else: - # {module: {qualname: {firstlineno: func}}} - _overload_registry = collections.defaultdict( - functools.partial(collections.defaultdict, dict) - ) - - def overload(func): - """Decorator for overloaded functions/methods. - - In a stub file, place two or more stub definitions for the same - function in a row, each decorated with @overload. For example: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - - In a non-stub file (i.e. a regular .py file), do the same but - follow it with an implementation. The implementation should *not* - be decorated with @overload. For example: - - @overload - def utf8(value: None) -> None: ... - @overload - def utf8(value: bytes) -> bytes: ... - @overload - def utf8(value: str) -> bytes: ... - def utf8(value): - # implementation goes here - - The overloads for a function can be retrieved at runtime using the - get_overloads() function. - """ - # classmethod and staticmethod - f = getattr(func, "__func__", func) - try: - _overload_registry[f.__module__][f.__qualname__][ - f.__code__.co_firstlineno - ] = func - except AttributeError: - # Not a normal function; ignore. - pass - return _overload_dummy - - def get_overloads(func): - """Return all defined overloads for *func* as a sequence.""" - # classmethod and staticmethod - f = getattr(func, "__func__", func) - if f.__module__ not in _overload_registry: - return [] - mod_dict = _overload_registry[f.__module__] - if f.__qualname__ not in mod_dict: - return [] - return list(mod_dict[f.__qualname__].values()) - - def clear_overloads(): - """Clear all overloads in the registry.""" - _overload_registry.clear() - - -# This is not a real generic class. Don't use outside annotations. -Type = typing.Type - -# Various ABCs mimicking those in collections.abc. -# A few are simply re-exported for completeness. -Awaitable = typing.Awaitable -Coroutine = typing.Coroutine -AsyncIterable = typing.AsyncIterable -AsyncIterator = typing.AsyncIterator -Deque = typing.Deque -DefaultDict = typing.DefaultDict -OrderedDict = typing.OrderedDict -Counter = typing.Counter -ChainMap = typing.ChainMap -Text = typing.Text -TYPE_CHECKING = typing.TYPE_CHECKING - - -if sys.version_info >= (3, 13, 0, "beta"): - from typing import AsyncContextManager, AsyncGenerator, ContextManager, Generator -else: - def _is_dunder(attr): - return attr.startswith('__') and attr.endswith('__') - - # Python <3.9 doesn't have typing._SpecialGenericAlias - _special_generic_alias_base = getattr( - typing, "_SpecialGenericAlias", typing._GenericAlias - ) - - class _SpecialGenericAlias(_special_generic_alias_base, _root=True): - def __init__(self, origin, nparams, *, inst=True, name=None, defaults=()): - if _special_generic_alias_base is typing._GenericAlias: - # Python <3.9 - self.__origin__ = origin - self._nparams = nparams - super().__init__(origin, nparams, special=True, inst=inst, name=name) - else: - # Python >= 3.9 - super().__init__(origin, nparams, inst=inst, name=name) - self._defaults = defaults - - def __setattr__(self, attr, val): - allowed_attrs = {'_name', '_inst', '_nparams', '_defaults'} - if _special_generic_alias_base is typing._GenericAlias: - # Python <3.9 - allowed_attrs.add("__origin__") - if _is_dunder(attr) or attr in allowed_attrs: - object.__setattr__(self, attr, val) - else: - setattr(self.__origin__, attr, val) - - @typing._tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - msg = "Parameters to generic types must be types." - params = tuple(typing._type_check(p, msg) for p in params) - if ( - self._defaults - and len(params) < self._nparams - and len(params) + len(self._defaults) >= self._nparams - ): - params = (*params, *self._defaults[len(params) - self._nparams:]) - actual_len = len(params) - - if actual_len != self._nparams: - if self._defaults: - expected = f"at least {self._nparams - len(self._defaults)}" - else: - expected = str(self._nparams) - if not self._nparams: - raise TypeError(f"{self} is not a generic class") - raise TypeError( - f"Too {'many' if actual_len > self._nparams else 'few'}" - f" arguments for {self};" - f" actual {actual_len}, expected {expected}" - ) - return self.copy_with(params) - - _NoneType = type(None) - Generator = _SpecialGenericAlias( - collections.abc.Generator, 3, defaults=(_NoneType, _NoneType) - ) - AsyncGenerator = _SpecialGenericAlias( - collections.abc.AsyncGenerator, 2, defaults=(_NoneType,) - ) - ContextManager = _SpecialGenericAlias( - contextlib.AbstractContextManager, - 2, - name="ContextManager", - defaults=(typing.Optional[bool],) - ) - AsyncContextManager = _SpecialGenericAlias( - contextlib.AbstractAsyncContextManager, - 2, - name="AsyncContextManager", - defaults=(typing.Optional[bool],) - ) - - -_PROTO_ALLOWLIST = { - 'collections.abc': [ - 'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', - 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'Buffer', - ], - 'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'], - 'typing_extensions': ['Buffer'], -} - - -_EXCLUDED_ATTRS = frozenset(typing.EXCLUDED_ATTRIBUTES) | { - "__match_args__", "__protocol_attrs__", "__non_callable_proto_members__", - "__final__", -} - - -def _get_protocol_attrs(cls): - attrs = set() - for base in cls.__mro__[:-1]: # without object - if base.__name__ in {'Protocol', 'Generic'}: - continue - annotations = getattr(base, '__annotations__', {}) - for attr in (*base.__dict__, *annotations): - if (not attr.startswith('_abc_') and attr not in _EXCLUDED_ATTRS): - attrs.add(attr) - return attrs - - -def _caller(depth=2): - try: - return sys._getframe(depth).f_globals.get('__name__', '__main__') - except (AttributeError, ValueError): # For platforms without _getframe() - return None - - -# `__match_args__` attribute was removed from protocol members in 3.13, -# we want to backport this change to older Python versions. -if sys.version_info >= (3, 13): - Protocol = typing.Protocol -else: - def _allow_reckless_class_checks(depth=3): - """Allow instance and class checks for special stdlib modules. - The abc and functools modules indiscriminately call isinstance() and - issubclass() on the whole MRO of a user class, which may contain protocols. - """ - return _caller(depth) in {'abc', 'functools', None} - - def _no_init(self, *args, **kwargs): - if type(self)._is_protocol: - raise TypeError('Protocols cannot be instantiated') - - def _type_check_issubclass_arg_1(arg): - """Raise TypeError if `arg` is not an instance of `type` - in `issubclass(arg, )`. - - In most cases, this is verified by type.__subclasscheck__. - Checking it again unnecessarily would slow down issubclass() checks, - so, we don't perform this check unless we absolutely have to. - - For various error paths, however, - we want to ensure that *this* error message is shown to the user - where relevant, rather than a typing.py-specific error message. - """ - if not isinstance(arg, type): - # Same error message as for issubclass(1, int). - raise TypeError('issubclass() arg 1 must be a class') - - # Inheriting from typing._ProtocolMeta isn't actually desirable, - # but is necessary to allow typing.Protocol and typing_extensions.Protocol - # to mix without getting TypeErrors about "metaclass conflict" - class _ProtocolMeta(type(typing.Protocol)): - # This metaclass is somewhat unfortunate, - # but is necessary for several reasons... - # - # NOTE: DO NOT call super() in any methods in this class - # That would call the methods on typing._ProtocolMeta on Python 3.8-3.11 - # and those are slow - def __new__(mcls, name, bases, namespace, **kwargs): - if name == "Protocol" and len(bases) < 2: - pass - elif {Protocol, typing.Protocol} & set(bases): - for base in bases: - if not ( - base in {object, typing.Generic, Protocol, typing.Protocol} - or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, []) - or is_protocol(base) - ): - raise TypeError( - f"Protocols can only inherit from other protocols, " - f"got {base!r}" - ) - return abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) - - def __init__(cls, *args, **kwargs): - abc.ABCMeta.__init__(cls, *args, **kwargs) - if getattr(cls, "_is_protocol", False): - cls.__protocol_attrs__ = _get_protocol_attrs(cls) - - def __subclasscheck__(cls, other): - if cls is Protocol: - return type.__subclasscheck__(cls, other) - if ( - getattr(cls, '_is_protocol', False) - and not _allow_reckless_class_checks() - ): - if not getattr(cls, '_is_runtime_protocol', False): - _type_check_issubclass_arg_1(other) - raise TypeError( - "Instance and class checks can only be used with " - "@runtime_checkable protocols" - ) - if ( - # this attribute is set by @runtime_checkable: - cls.__non_callable_proto_members__ - and cls.__dict__.get("__subclasshook__") is _proto_hook - ): - _type_check_issubclass_arg_1(other) - non_method_attrs = sorted(cls.__non_callable_proto_members__) - raise TypeError( - "Protocols with non-method members don't support issubclass()." - f" Non-method members: {str(non_method_attrs)[1:-1]}." - ) - return abc.ABCMeta.__subclasscheck__(cls, other) - - def __instancecheck__(cls, instance): - # We need this method for situations where attributes are - # assigned in __init__. - if cls is Protocol: - return type.__instancecheck__(cls, instance) - if not getattr(cls, "_is_protocol", False): - # i.e., it's a concrete subclass of a protocol - return abc.ABCMeta.__instancecheck__(cls, instance) - - if ( - not getattr(cls, '_is_runtime_protocol', False) and - not _allow_reckless_class_checks() - ): - raise TypeError("Instance and class checks can only be used with" - " @runtime_checkable protocols") - - if abc.ABCMeta.__instancecheck__(cls, instance): - return True - - for attr in cls.__protocol_attrs__: - try: - val = inspect.getattr_static(instance, attr) - except AttributeError: - break - # this attribute is set by @runtime_checkable: - if val is None and attr not in cls.__non_callable_proto_members__: - break - else: - return True - - return False - - def __eq__(cls, other): - # Hack so that typing.Generic.__class_getitem__ - # treats typing_extensions.Protocol - # as equivalent to typing.Protocol - if abc.ABCMeta.__eq__(cls, other) is True: - return True - return cls is Protocol and other is typing.Protocol - - # This has to be defined, or the abc-module cache - # complains about classes with this metaclass being unhashable, - # if we define only __eq__! - def __hash__(cls) -> int: - return type.__hash__(cls) - - @classmethod - def _proto_hook(cls, other): - if not cls.__dict__.get('_is_protocol', False): - return NotImplemented - - for attr in cls.__protocol_attrs__: - for base in other.__mro__: - # Check if the members appears in the class dictionary... - if attr in base.__dict__: - if base.__dict__[attr] is None: - return NotImplemented - break - - # ...or in annotations, if it is a sub-protocol. - annotations = getattr(base, '__annotations__', {}) - if ( - isinstance(annotations, collections.abc.Mapping) - and attr in annotations - and is_protocol(other) - ): - break - else: - return NotImplemented - return True - - class Protocol(typing.Generic, metaclass=_ProtocolMeta): - __doc__ = typing.Protocol.__doc__ - __slots__ = () - _is_protocol = True - _is_runtime_protocol = False - - def __init_subclass__(cls, *args, **kwargs): - super().__init_subclass__(*args, **kwargs) - - # Determine if this is a protocol or a concrete subclass. - if not cls.__dict__.get('_is_protocol', False): - cls._is_protocol = any(b is Protocol for b in cls.__bases__) - - # Set (or override) the protocol subclass hook. - if '__subclasshook__' not in cls.__dict__: - cls.__subclasshook__ = _proto_hook - - # Prohibit instantiation for protocol classes - if cls._is_protocol and cls.__init__ is Protocol.__init__: - cls.__init__ = _no_init - - -if sys.version_info >= (3, 13): - runtime_checkable = typing.runtime_checkable -else: - def runtime_checkable(cls): - """Mark a protocol class as a runtime protocol. - - Such protocol can be used with isinstance() and issubclass(). - Raise TypeError if applied to a non-protocol class. - This allows a simple-minded structural check very similar to - one trick ponies in collections.abc such as Iterable. - - For example:: - - @runtime_checkable - class Closable(Protocol): - def close(self): ... - - assert isinstance(open('/some/file'), Closable) - - Warning: this will check only the presence of the required methods, - not their type signatures! - """ - if not issubclass(cls, typing.Generic) or not getattr(cls, '_is_protocol', False): - raise TypeError(f'@runtime_checkable can be only applied to protocol classes,' - f' got {cls!r}') - cls._is_runtime_protocol = True - - # typing.Protocol classes on <=3.11 break if we execute this block, - # because typing.Protocol classes on <=3.11 don't have a - # `__protocol_attrs__` attribute, and this block relies on the - # `__protocol_attrs__` attribute. Meanwhile, typing.Protocol classes on 3.12.2+ - # break if we *don't* execute this block, because *they* assume that all - # protocol classes have a `__non_callable_proto_members__` attribute - # (which this block sets) - if isinstance(cls, _ProtocolMeta) or sys.version_info >= (3, 12, 2): - # PEP 544 prohibits using issubclass() - # with protocols that have non-method members. - # See gh-113320 for why we compute this attribute here, - # rather than in `_ProtocolMeta.__init__` - cls.__non_callable_proto_members__ = set() - for attr in cls.__protocol_attrs__: - try: - is_callable = callable(getattr(cls, attr, None)) - except Exception as e: - raise TypeError( - f"Failed to determine whether protocol member {attr!r} " - "is a method member" - ) from e - else: - if not is_callable: - cls.__non_callable_proto_members__.add(attr) - - return cls - - -# The "runtime" alias exists for backwards compatibility. -runtime = runtime_checkable - - -# Our version of runtime-checkable protocols is faster on Python 3.8-3.11 -if sys.version_info >= (3, 12): - SupportsInt = typing.SupportsInt - SupportsFloat = typing.SupportsFloat - SupportsComplex = typing.SupportsComplex - SupportsBytes = typing.SupportsBytes - SupportsIndex = typing.SupportsIndex - SupportsAbs = typing.SupportsAbs - SupportsRound = typing.SupportsRound -else: - @runtime_checkable - class SupportsInt(Protocol): - """An ABC with one abstract method __int__.""" - __slots__ = () - - @abc.abstractmethod - def __int__(self) -> int: - pass - - @runtime_checkable - class SupportsFloat(Protocol): - """An ABC with one abstract method __float__.""" - __slots__ = () - - @abc.abstractmethod - def __float__(self) -> float: - pass - - @runtime_checkable - class SupportsComplex(Protocol): - """An ABC with one abstract method __complex__.""" - __slots__ = () - - @abc.abstractmethod - def __complex__(self) -> complex: - pass - - @runtime_checkable - class SupportsBytes(Protocol): - """An ABC with one abstract method __bytes__.""" - __slots__ = () - - @abc.abstractmethod - def __bytes__(self) -> bytes: - pass - - @runtime_checkable - class SupportsIndex(Protocol): - __slots__ = () - - @abc.abstractmethod - def __index__(self) -> int: - pass - - @runtime_checkable - class SupportsAbs(Protocol[T_co]): - """ - An ABC with one abstract method __abs__ that is covariant in its return type. - """ - __slots__ = () - - @abc.abstractmethod - def __abs__(self) -> T_co: - pass - - @runtime_checkable - class SupportsRound(Protocol[T_co]): - """ - An ABC with one abstract method __round__ that is covariant in its return type. - """ - __slots__ = () - - @abc.abstractmethod - def __round__(self, ndigits: int = 0) -> T_co: - pass - - -def _ensure_subclassable(mro_entries): - def inner(func): - if sys.implementation.name == "pypy" and sys.version_info < (3, 9): - cls_dict = { - "__call__": staticmethod(func), - "__mro_entries__": staticmethod(mro_entries) - } - t = type(func.__name__, (), cls_dict) - return functools.update_wrapper(t(), func) - else: - func.__mro_entries__ = mro_entries - return func - return inner - - -# Update this to something like >=3.13.0b1 if and when -# PEP 728 is implemented in CPython -_PEP_728_IMPLEMENTED = False - -if _PEP_728_IMPLEMENTED: - # The standard library TypedDict in Python 3.8 does not store runtime information - # about which (if any) keys are optional. See https://bugs.python.org/issue38834 - # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" - # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 - # The standard library TypedDict below Python 3.11 does not store runtime - # information about optional and required keys when using Required or NotRequired. - # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. - # Aaaand on 3.12 we add __orig_bases__ to TypedDict - # to enable better runtime introspection. - # On 3.13 we deprecate some odd ways of creating TypedDicts. - # Also on 3.13, PEP 705 adds the ReadOnly[] qualifier. - # PEP 728 (still pending) makes more changes. - TypedDict = typing.TypedDict - _TypedDictMeta = typing._TypedDictMeta - is_typeddict = typing.is_typeddict -else: - # 3.10.0 and later - _TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters - - def _get_typeddict_qualifiers(annotation_type): - while True: - annotation_origin = get_origin(annotation_type) - if annotation_origin is Annotated: - annotation_args = get_args(annotation_type) - if annotation_args: - annotation_type = annotation_args[0] - else: - break - elif annotation_origin is Required: - yield Required - annotation_type, = get_args(annotation_type) - elif annotation_origin is NotRequired: - yield NotRequired - annotation_type, = get_args(annotation_type) - elif annotation_origin is ReadOnly: - yield ReadOnly - annotation_type, = get_args(annotation_type) - else: - break - - class _TypedDictMeta(type): - def __new__(cls, name, bases, ns, *, total=True, closed=False): - """Create new typed dict class object. - - This method is called when TypedDict is subclassed, - or when TypedDict is instantiated. This way - TypedDict supports all three syntax forms described in its docstring. - Subclasses and instances of TypedDict return actual dictionaries. - """ - for base in bases: - if type(base) is not _TypedDictMeta and base is not typing.Generic: - raise TypeError('cannot inherit from both a TypedDict type ' - 'and a non-TypedDict base class') - - if any(issubclass(b, typing.Generic) for b in bases): - generic_base = (typing.Generic,) - else: - generic_base = () - - # typing.py generally doesn't let you inherit from plain Generic, unless - # the name of the class happens to be "Protocol" - tp_dict = type.__new__(_TypedDictMeta, "Protocol", (*generic_base, dict), ns) - tp_dict.__name__ = name - if tp_dict.__qualname__ == "Protocol": - tp_dict.__qualname__ = name - - if not hasattr(tp_dict, '__orig_bases__'): - tp_dict.__orig_bases__ = bases - - annotations = {} - if "__annotations__" in ns: - own_annotations = ns["__annotations__"] - elif "__annotate__" in ns: - # TODO: Use inspect.VALUE here, and make the annotations lazily evaluated - own_annotations = ns["__annotate__"](1) - else: - own_annotations = {} - msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" - if _TAKES_MODULE: - own_annotations = { - n: typing._type_check(tp, msg, module=tp_dict.__module__) - for n, tp in own_annotations.items() - } - else: - own_annotations = { - n: typing._type_check(tp, msg) - for n, tp in own_annotations.items() - } - required_keys = set() - optional_keys = set() - readonly_keys = set() - mutable_keys = set() - extra_items_type = None - - for base in bases: - base_dict = base.__dict__ - - annotations.update(base_dict.get('__annotations__', {})) - required_keys.update(base_dict.get('__required_keys__', ())) - optional_keys.update(base_dict.get('__optional_keys__', ())) - readonly_keys.update(base_dict.get('__readonly_keys__', ())) - mutable_keys.update(base_dict.get('__mutable_keys__', ())) - base_extra_items_type = base_dict.get('__extra_items__', None) - if base_extra_items_type is not None: - extra_items_type = base_extra_items_type - - if closed and extra_items_type is None: - extra_items_type = Never - if closed and "__extra_items__" in own_annotations: - annotation_type = own_annotations.pop("__extra_items__") - qualifiers = set(_get_typeddict_qualifiers(annotation_type)) - if Required in qualifiers: - raise TypeError( - "Special key __extra_items__ does not support " - "Required" - ) - if NotRequired in qualifiers: - raise TypeError( - "Special key __extra_items__ does not support " - "NotRequired" - ) - extra_items_type = annotation_type - - annotations.update(own_annotations) - for annotation_key, annotation_type in own_annotations.items(): - qualifiers = set(_get_typeddict_qualifiers(annotation_type)) - - if Required in qualifiers: - required_keys.add(annotation_key) - elif NotRequired in qualifiers: - optional_keys.add(annotation_key) - elif total: - required_keys.add(annotation_key) - else: - optional_keys.add(annotation_key) - if ReadOnly in qualifiers: - mutable_keys.discard(annotation_key) - readonly_keys.add(annotation_key) - else: - mutable_keys.add(annotation_key) - readonly_keys.discard(annotation_key) - - tp_dict.__annotations__ = annotations - tp_dict.__required_keys__ = frozenset(required_keys) - tp_dict.__optional_keys__ = frozenset(optional_keys) - tp_dict.__readonly_keys__ = frozenset(readonly_keys) - tp_dict.__mutable_keys__ = frozenset(mutable_keys) - if not hasattr(tp_dict, '__total__'): - tp_dict.__total__ = total - tp_dict.__closed__ = closed - tp_dict.__extra_items__ = extra_items_type - return tp_dict - - __call__ = dict # static method - - def __subclasscheck__(cls, other): - # Typed dicts are only for static structural subtyping. - raise TypeError('TypedDict does not support instance and class checks') - - __instancecheck__ = __subclasscheck__ - - _TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {}) - - @_ensure_subclassable(lambda bases: (_TypedDict,)) - def TypedDict(typename, fields=_marker, /, *, total=True, closed=False, **kwargs): - """A simple typed namespace. At runtime it is equivalent to a plain dict. - - TypedDict creates a dictionary type such that a type checker will expect all - instances to have a certain set of keys, where each key is - associated with a value of a consistent type. This expectation - is not checked at runtime. - - Usage:: - - class Point2D(TypedDict): - x: int - y: int - label: str - - a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK - b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check - - assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') - - The type info can be accessed via the Point2D.__annotations__ dict, and - the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. - TypedDict supports an additional equivalent form:: - - Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) - - By default, all keys must be present in a TypedDict. It is possible - to override this by specifying totality:: - - class Point2D(TypedDict, total=False): - x: int - y: int - - This means that a Point2D TypedDict can have any of the keys omitted. A type - checker is only expected to support a literal False or True as the value of - the total argument. True is the default, and makes all items defined in the - class body be required. - - The Required and NotRequired special forms can also be used to mark - individual keys as being required or not required:: - - class Point2D(TypedDict): - x: int # the "x" key must always be present (Required is the default) - y: NotRequired[int] # the "y" key can be omitted - - See PEP 655 for more details on Required and NotRequired. - """ - if fields is _marker or fields is None: - if fields is _marker: - deprecated_thing = "Failing to pass a value for the 'fields' parameter" - else: - deprecated_thing = "Passing `None` as the 'fields' parameter" - - example = f"`{typename} = TypedDict({typename!r}, {{}})`" - deprecation_msg = ( - f"{deprecated_thing} is deprecated and will be disallowed in " - "Python 3.15. To create a TypedDict class with 0 fields " - "using the functional syntax, pass an empty dictionary, e.g. " - ) + example + "." - warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2) - if closed is not False and closed is not True: - kwargs["closed"] = closed - closed = False - fields = kwargs - elif kwargs: - raise TypeError("TypedDict takes either a dict or keyword arguments," - " but not both") - if kwargs: - if sys.version_info >= (3, 13): - raise TypeError("TypedDict takes no keyword arguments") - warnings.warn( - "The kwargs-based syntax for TypedDict definitions is deprecated " - "in Python 3.11, will be removed in Python 3.13, and may not be " - "understood by third-party type checkers.", - DeprecationWarning, - stacklevel=2, - ) - - ns = {'__annotations__': dict(fields)} - module = _caller() - if module is not None: - # Setting correct module is necessary to make typed dict classes pickleable. - ns['__module__'] = module - - td = _TypedDictMeta(typename, (), ns, total=total, closed=closed) - td.__orig_bases__ = (TypedDict,) - return td - - if hasattr(typing, "_TypedDictMeta"): - _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) - else: - _TYPEDDICT_TYPES = (_TypedDictMeta,) - - def is_typeddict(tp): - """Check if an annotation is a TypedDict class - - For example:: - class Film(TypedDict): - title: str - year: int - - is_typeddict(Film) # => True - is_typeddict(Union[list, str]) # => False - """ - # On 3.8, this would otherwise return True - if hasattr(typing, "TypedDict") and tp is typing.TypedDict: - return False - return isinstance(tp, _TYPEDDICT_TYPES) - - -if hasattr(typing, "assert_type"): - assert_type = typing.assert_type - -else: - def assert_type(val, typ, /): - """Assert (to the type checker) that the value is of the given type. - - When the type checker encounters a call to assert_type(), it - emits an error if the value is not of the specified type:: - - def greet(name: str) -> None: - assert_type(name, str) # ok - assert_type(name, int) # type checker error - - At runtime this returns the first argument unchanged and otherwise - does nothing. - """ - return val - - -if hasattr(typing, "ReadOnly"): # 3.13+ - get_type_hints = typing.get_type_hints -else: # <=3.13 - # replaces _strip_annotations() - def _strip_extras(t): - """Strips Annotated, Required and NotRequired from a given type.""" - if isinstance(t, _AnnotatedAlias): - return _strip_extras(t.__origin__) - if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): - return _strip_extras(t.__args__[0]) - if isinstance(t, typing._GenericAlias): - stripped_args = tuple(_strip_extras(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return t.copy_with(stripped_args) - if hasattr(_types, "GenericAlias") and isinstance(t, _types.GenericAlias): - stripped_args = tuple(_strip_extras(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return _types.GenericAlias(t.__origin__, stripped_args) - if hasattr(_types, "UnionType") and isinstance(t, _types.UnionType): - stripped_args = tuple(_strip_extras(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - return functools.reduce(operator.or_, stripped_args) - - return t - - def get_type_hints(obj, globalns=None, localns=None, include_extras=False): - """Return type hints for an object. - - This is often the same as obj.__annotations__, but it handles - forward references encoded as string literals, adds Optional[t] if a - default value equal to None is set and recursively replaces all - 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' - (unless 'include_extras=True'). - - The argument may be a module, class, method, or function. The annotations - are returned as a dictionary. For classes, annotations include also - inherited members. - - TypeError is raised if the argument is not of a type that can contain - annotations, and an empty dictionary is returned if no annotations are - present. - - BEWARE -- the behavior of globalns and localns is counterintuitive - (unless you are familiar with how eval() and exec() work). The - search order is locals first, then globals. - - - If no dict arguments are passed, an attempt is made to use the - globals from obj (or the respective module's globals for classes), - and these are also used as the locals. If the object does not appear - to have globals, an empty dictionary is used. - - - If one dict argument is passed, it is used for both globals and - locals. - - - If two dict arguments are passed, they specify globals and - locals, respectively. - """ - if hasattr(typing, "Annotated"): # 3.9+ - hint = typing.get_type_hints( - obj, globalns=globalns, localns=localns, include_extras=True - ) - else: # 3.8 - hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) - if include_extras: - return hint - return {k: _strip_extras(t) for k, t in hint.items()} - - -# Python 3.9+ has PEP 593 (Annotated) -if hasattr(typing, 'Annotated'): - Annotated = typing.Annotated - # Not exported and not a public API, but needed for get_origin() and get_args() - # to work. - _AnnotatedAlias = typing._AnnotatedAlias -# 3.8 -else: - class _AnnotatedAlias(typing._GenericAlias, _root=True): - """Runtime representation of an annotated type. - - At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' - with extra annotations. The alias behaves like a normal typing alias, - instantiating is the same as instantiating the underlying type, binding - it to types is also the same. - """ - def __init__(self, origin, metadata): - if isinstance(origin, _AnnotatedAlias): - metadata = origin.__metadata__ + metadata - origin = origin.__origin__ - super().__init__(origin, origin) - self.__metadata__ = metadata - - def copy_with(self, params): - assert len(params) == 1 - new_type = params[0] - return _AnnotatedAlias(new_type, self.__metadata__) - - def __repr__(self): - return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " - f"{', '.join(repr(a) for a in self.__metadata__)}]") - - def __reduce__(self): - return operator.getitem, ( - Annotated, (self.__origin__, *self.__metadata__) - ) - - def __eq__(self, other): - if not isinstance(other, _AnnotatedAlias): - return NotImplemented - if self.__origin__ != other.__origin__: - return False - return self.__metadata__ == other.__metadata__ - - def __hash__(self): - return hash((self.__origin__, self.__metadata__)) - - class Annotated: - """Add context specific metadata to a type. - - Example: Annotated[int, runtime_check.Unsigned] indicates to the - hypothetical runtime_check module that this type is an unsigned int. - Every other consumer of this type can ignore this metadata and treat - this type as int. - - The first argument to Annotated must be a valid type (and will be in - the __origin__ field), the remaining arguments are kept as a tuple in - the __extra__ field. - - Details: - - - It's an error to call `Annotated` with less than two arguments. - - Nested Annotated are flattened:: - - Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - - - Instantiating an annotated type is equivalent to instantiating the - underlying type:: - - Annotated[C, Ann1](5) == C(5) - - - Annotated can be used as a generic type alias:: - - Optimized = Annotated[T, runtime.Optimize()] - Optimized[int] == Annotated[int, runtime.Optimize()] - - OptimizedList = Annotated[List[T], runtime.Optimize()] - OptimizedList[int] == Annotated[List[int], runtime.Optimize()] - """ - - __slots__ = () - - def __new__(cls, *args, **kwargs): - raise TypeError("Type Annotated cannot be instantiated.") - - @typing._tp_cache - def __class_getitem__(cls, params): - if not isinstance(params, tuple) or len(params) < 2: - raise TypeError("Annotated[...] should be used " - "with at least two arguments (a type and an " - "annotation).") - allowed_special_forms = (ClassVar, Final) - if get_origin(params[0]) in allowed_special_forms: - origin = params[0] - else: - msg = "Annotated[t, ...]: t must be a type." - origin = typing._type_check(params[0], msg) - metadata = tuple(params[1:]) - return _AnnotatedAlias(origin, metadata) - - def __init_subclass__(cls, *args, **kwargs): - raise TypeError( - f"Cannot subclass {cls.__module__}.Annotated" - ) - -# Python 3.8 has get_origin() and get_args() but those implementations aren't -# Annotated-aware, so we can't use those. Python 3.9's versions don't support -# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. -if sys.version_info[:2] >= (3, 10): - get_origin = typing.get_origin - get_args = typing.get_args -# 3.8-3.9 -else: - try: - # 3.9+ - from typing import _BaseGenericAlias - except ImportError: - _BaseGenericAlias = typing._GenericAlias - try: - # 3.9+ - from typing import GenericAlias as _typing_GenericAlias - except ImportError: - _typing_GenericAlias = typing._GenericAlias - - def get_origin(tp): - """Get the unsubscripted version of a type. - - This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar - and Annotated. Return None for unsupported types. Examples:: - - get_origin(Literal[42]) is Literal - get_origin(int) is None - get_origin(ClassVar[int]) is ClassVar - get_origin(Generic) is Generic - get_origin(Generic[T]) is Generic - get_origin(Union[T, int]) is Union - get_origin(List[Tuple[T, T]][int]) == list - get_origin(P.args) is P - """ - if isinstance(tp, _AnnotatedAlias): - return Annotated - if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, - ParamSpecArgs, ParamSpecKwargs)): - return tp.__origin__ - if tp is typing.Generic: - return typing.Generic - return None - - def get_args(tp): - """Get type arguments with all substitutions performed. - - For unions, basic simplifications used by Union constructor are performed. - Examples:: - get_args(Dict[str, int]) == (str, int) - get_args(int) == () - get_args(Union[int, Union[T, int], str][int]) == (int, str) - get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) - get_args(Callable[[], T][int]) == ([], int) - """ - if isinstance(tp, _AnnotatedAlias): - return (tp.__origin__, *tp.__metadata__) - if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): - if getattr(tp, "_special", False): - return () - res = tp.__args__ - if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: - res = (list(res[:-1]), res[-1]) - return res - return () - - -# 3.10+ -if hasattr(typing, 'TypeAlias'): - TypeAlias = typing.TypeAlias -# 3.9 -elif sys.version_info[:2] >= (3, 9): - @_ExtensionsSpecialForm - def TypeAlias(self, parameters): - """Special marker indicating that an assignment should - be recognized as a proper type alias definition by type - checkers. - - For example:: - - Predicate: TypeAlias = Callable[..., bool] - - It's invalid when used anywhere except as in the example above. - """ - raise TypeError(f"{self} is not subscriptable") -# 3.8 -else: - TypeAlias = _ExtensionsSpecialForm( - 'TypeAlias', - doc="""Special marker indicating that an assignment should - be recognized as a proper type alias definition by type - checkers. - - For example:: - - Predicate: TypeAlias = Callable[..., bool] - - It's invalid when used anywhere except as in the example - above.""" - ) - - -if hasattr(typing, "NoDefault"): - NoDefault = typing.NoDefault -else: - class NoDefaultTypeMeta(type): - def __setattr__(cls, attr, value): - # TypeError is consistent with the behavior of NoneType - raise TypeError( - f"cannot set {attr!r} attribute of immutable type {cls.__name__!r}" - ) - - class NoDefaultType(metaclass=NoDefaultTypeMeta): - """The type of the NoDefault singleton.""" - - __slots__ = () - - def __new__(cls): - return globals().get("NoDefault") or object.__new__(cls) - - def __repr__(self): - return "typing_extensions.NoDefault" - - def __reduce__(self): - return "NoDefault" - - NoDefault = NoDefaultType() - del NoDefaultType, NoDefaultTypeMeta - - -def _set_default(type_param, default): - type_param.has_default = lambda: default is not NoDefault - type_param.__default__ = default - - -def _set_module(typevarlike): - # for pickling: - def_mod = _caller(depth=3) - if def_mod != 'typing_extensions': - typevarlike.__module__ = def_mod - - -class _DefaultMixin: - """Mixin for TypeVarLike defaults.""" - - __slots__ = () - __init__ = _set_default - - -# Classes using this metaclass must provide a _backported_typevarlike ClassVar -class _TypeVarLikeMeta(type): - def __instancecheck__(cls, __instance: Any) -> bool: - return isinstance(__instance, cls._backported_typevarlike) - - -if _PEP_696_IMPLEMENTED: - from typing import TypeVar -else: - # Add default and infer_variance parameters from PEP 696 and 695 - class TypeVar(metaclass=_TypeVarLikeMeta): - """Type variable.""" - - _backported_typevarlike = typing.TypeVar - - def __new__(cls, name, *constraints, bound=None, - covariant=False, contravariant=False, - default=NoDefault, infer_variance=False): - if hasattr(typing, "TypeAliasType"): - # PEP 695 implemented (3.12+), can pass infer_variance to typing.TypeVar - typevar = typing.TypeVar(name, *constraints, bound=bound, - covariant=covariant, contravariant=contravariant, - infer_variance=infer_variance) - else: - typevar = typing.TypeVar(name, *constraints, bound=bound, - covariant=covariant, contravariant=contravariant) - if infer_variance and (covariant or contravariant): - raise ValueError("Variance cannot be specified with infer_variance.") - typevar.__infer_variance__ = infer_variance - - _set_default(typevar, default) - _set_module(typevar) - - def _tvar_prepare_subst(alias, args): - if ( - typevar.has_default() - and alias.__parameters__.index(typevar) == len(args) - ): - args += (typevar.__default__,) - return args - - typevar.__typing_prepare_subst__ = _tvar_prepare_subst - return typevar - - def __init_subclass__(cls) -> None: - raise TypeError(f"type '{__name__}.TypeVar' is not an acceptable base type") - - -# Python 3.10+ has PEP 612 -if hasattr(typing, 'ParamSpecArgs'): - ParamSpecArgs = typing.ParamSpecArgs - ParamSpecKwargs = typing.ParamSpecKwargs -# 3.8-3.9 -else: - class _Immutable: - """Mixin to indicate that object should not be copied.""" - __slots__ = () - - def __copy__(self): - return self - - def __deepcopy__(self, memo): - return self - - class ParamSpecArgs(_Immutable): - """The args for a ParamSpec object. - - Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. - - ParamSpecArgs objects have a reference back to their ParamSpec: - - P.args.__origin__ is P - - This type is meant for runtime introspection and has no special meaning to - static type checkers. - """ - def __init__(self, origin): - self.__origin__ = origin - - def __repr__(self): - return f"{self.__origin__.__name__}.args" - - def __eq__(self, other): - if not isinstance(other, ParamSpecArgs): - return NotImplemented - return self.__origin__ == other.__origin__ - - class ParamSpecKwargs(_Immutable): - """The kwargs for a ParamSpec object. - - Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. - - ParamSpecKwargs objects have a reference back to their ParamSpec: - - P.kwargs.__origin__ is P - - This type is meant for runtime introspection and has no special meaning to - static type checkers. - """ - def __init__(self, origin): - self.__origin__ = origin - - def __repr__(self): - return f"{self.__origin__.__name__}.kwargs" - - def __eq__(self, other): - if not isinstance(other, ParamSpecKwargs): - return NotImplemented - return self.__origin__ == other.__origin__ - - -if _PEP_696_IMPLEMENTED: - from typing import ParamSpec - -# 3.10+ -elif hasattr(typing, 'ParamSpec'): - - # Add default parameter - PEP 696 - class ParamSpec(metaclass=_TypeVarLikeMeta): - """Parameter specification.""" - - _backported_typevarlike = typing.ParamSpec - - def __new__(cls, name, *, bound=None, - covariant=False, contravariant=False, - infer_variance=False, default=NoDefault): - if hasattr(typing, "TypeAliasType"): - # PEP 695 implemented, can pass infer_variance to typing.TypeVar - paramspec = typing.ParamSpec(name, bound=bound, - covariant=covariant, - contravariant=contravariant, - infer_variance=infer_variance) - else: - paramspec = typing.ParamSpec(name, bound=bound, - covariant=covariant, - contravariant=contravariant) - paramspec.__infer_variance__ = infer_variance - - _set_default(paramspec, default) - _set_module(paramspec) - - def _paramspec_prepare_subst(alias, args): - params = alias.__parameters__ - i = params.index(paramspec) - if i == len(args) and paramspec.has_default(): - args = [*args, paramspec.__default__] - if i >= len(args): - raise TypeError(f"Too few arguments for {alias}") - # Special case where Z[[int, str, bool]] == Z[int, str, bool] in PEP 612. - if len(params) == 1 and not typing._is_param_expr(args[0]): - assert i == 0 - args = (args,) - # Convert lists to tuples to help other libraries cache the results. - elif isinstance(args[i], list): - args = (*args[:i], tuple(args[i]), *args[i + 1:]) - return args - - paramspec.__typing_prepare_subst__ = _paramspec_prepare_subst - return paramspec - - def __init_subclass__(cls) -> None: - raise TypeError(f"type '{__name__}.ParamSpec' is not an acceptable base type") - -# 3.8-3.9 -else: - - # Inherits from list as a workaround for Callable checks in Python < 3.9.2. - class ParamSpec(list, _DefaultMixin): - """Parameter specification variable. - - Usage:: - - P = ParamSpec('P') - - Parameter specification variables exist primarily for the benefit of static - type checkers. They are used to forward the parameter types of one - callable to another callable, a pattern commonly found in higher order - functions and decorators. They are only valid when used in ``Concatenate``, - or s the first argument to ``Callable``. In Python 3.10 and higher, - they are also supported in user-defined Generics at runtime. - See class Generic for more information on generic types. An - example for annotating a decorator:: - - T = TypeVar('T') - P = ParamSpec('P') - - def add_logging(f: Callable[P, T]) -> Callable[P, T]: - '''A type-safe decorator to add logging to a function.''' - def inner(*args: P.args, **kwargs: P.kwargs) -> T: - logging.info(f'{f.__name__} was called') - return f(*args, **kwargs) - return inner - - @add_logging - def add_two(x: float, y: float) -> float: - '''Add two numbers together.''' - return x + y - - Parameter specification variables defined with covariant=True or - contravariant=True can be used to declare covariant or contravariant - generic types. These keyword arguments are valid, but their actual semantics - are yet to be decided. See PEP 612 for details. - - Parameter specification variables can be introspected. e.g.: - - P.__name__ == 'T' - P.__bound__ == None - P.__covariant__ == False - P.__contravariant__ == False - - Note that only parameter specification variables defined in global scope can - be pickled. - """ - - # Trick Generic __parameters__. - __class__ = typing.TypeVar - - @property - def args(self): - return ParamSpecArgs(self) - - @property - def kwargs(self): - return ParamSpecKwargs(self) - - def __init__(self, name, *, bound=None, covariant=False, contravariant=False, - infer_variance=False, default=NoDefault): - list.__init__(self, [self]) - self.__name__ = name - self.__covariant__ = bool(covariant) - self.__contravariant__ = bool(contravariant) - self.__infer_variance__ = bool(infer_variance) - if bound: - self.__bound__ = typing._type_check(bound, 'Bound must be a type.') - else: - self.__bound__ = None - _DefaultMixin.__init__(self, default) - - # for pickling: - def_mod = _caller() - if def_mod != 'typing_extensions': - self.__module__ = def_mod - - def __repr__(self): - if self.__infer_variance__: - prefix = '' - elif self.__covariant__: - prefix = '+' - elif self.__contravariant__: - prefix = '-' - else: - prefix = '~' - return prefix + self.__name__ - - def __hash__(self): - return object.__hash__(self) - - def __eq__(self, other): - return self is other - - def __reduce__(self): - return self.__name__ - - # Hack to get typing._type_check to pass. - def __call__(self, *args, **kwargs): - pass - - -# 3.8-3.9 -if not hasattr(typing, 'Concatenate'): - # Inherits from list as a workaround for Callable checks in Python < 3.9.2. - class _ConcatenateGenericAlias(list): - - # Trick Generic into looking into this for __parameters__. - __class__ = typing._GenericAlias - - # Flag in 3.8. - _special = False - - def __init__(self, origin, args): - super().__init__(args) - self.__origin__ = origin - self.__args__ = args - - def __repr__(self): - _type_repr = typing._type_repr - return (f'{_type_repr(self.__origin__)}' - f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]') - - def __hash__(self): - return hash((self.__origin__, self.__args__)) - - # Hack to get typing._type_check to pass in Generic. - def __call__(self, *args, **kwargs): - pass - - @property - def __parameters__(self): - return tuple( - tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) - ) - - -# 3.8-3.9 -@typing._tp_cache -def _concatenate_getitem(self, parameters): - if parameters == (): - raise TypeError("Cannot take a Concatenate of no types.") - if not isinstance(parameters, tuple): - parameters = (parameters,) - if not isinstance(parameters[-1], ParamSpec): - raise TypeError("The last parameter to Concatenate should be a " - "ParamSpec variable.") - msg = "Concatenate[arg, ...]: each arg must be a type." - parameters = tuple(typing._type_check(p, msg) for p in parameters) - return _ConcatenateGenericAlias(self, parameters) - - -# 3.10+ -if hasattr(typing, 'Concatenate'): - Concatenate = typing.Concatenate - _ConcatenateGenericAlias = typing._ConcatenateGenericAlias -# 3.9 -elif sys.version_info[:2] >= (3, 9): - @_ExtensionsSpecialForm - def Concatenate(self, parameters): - """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a - higher order function which adds, removes or transforms parameters of a - callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """ - return _concatenate_getitem(self, parameters) -# 3.8 -else: - class _ConcatenateForm(_ExtensionsSpecialForm, _root=True): - def __getitem__(self, parameters): - return _concatenate_getitem(self, parameters) - - Concatenate = _ConcatenateForm( - 'Concatenate', - doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a - higher order function which adds, removes or transforms parameters of a - callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """) - -# 3.10+ -if hasattr(typing, 'TypeGuard'): - TypeGuard = typing.TypeGuard -# 3.9 -elif sys.version_info[:2] >= (3, 9): - @_ExtensionsSpecialForm - def TypeGuard(self, parameters): - """Special typing form used to annotate the return type of a user-defined - type guard function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeGuard[...]`` as its - return type to alert static type checkers to this intention. - - Using ``-> TypeGuard`` tells the static type checker that for a given - function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the type inside ``TypeGuard``. - - For example:: - - def is_str(val: Union[str, float]): - # "isinstance" type guard - if isinstance(val, str): - # Type of ``val`` is narrowed to ``str`` - ... - else: - # Else, type of ``val`` is narrowed to ``float``. - ... - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``List[object]`` to ``List[str]`` even though the latter is not - a subtype of the former, since ``List`` is invariant. The responsibility of - writing type-safe type guards is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """ - item = typing._type_check(parameters, f'{self} accepts only a single type.') - return typing._GenericAlias(self, (item,)) -# 3.8 -else: - class _TypeGuardForm(_ExtensionsSpecialForm, _root=True): - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only a single type') - return typing._GenericAlias(self, (item,)) - - TypeGuard = _TypeGuardForm( - 'TypeGuard', - doc="""Special typing form used to annotate the return type of a user-defined - type guard function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeGuard[...]`` as its - return type to alert static type checkers to this intention. - - Using ``-> TypeGuard`` tells the static type checker that for a given - function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the type inside ``TypeGuard``. - - For example:: - - def is_str(val: Union[str, float]): - # "isinstance" type guard - if isinstance(val, str): - # Type of ``val`` is narrowed to ``str`` - ... - else: - # Else, type of ``val`` is narrowed to ``float``. - ... - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``List[object]`` to ``List[str]`` even though the latter is not - a subtype of the former, since ``List`` is invariant. The responsibility of - writing type-safe type guards is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). - """) - -# 3.13+ -if hasattr(typing, 'TypeIs'): - TypeIs = typing.TypeIs -# 3.9 -elif sys.version_info[:2] >= (3, 9): - @_ExtensionsSpecialForm - def TypeIs(self, parameters): - """Special typing form used to annotate the return type of a user-defined - type narrower function. ``TypeIs`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeIs[...]`` as its - return type to alert static type checkers to this intention. - - Using ``-> TypeIs`` tells the static type checker that for a given - function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the intersection of the type inside ``TypeGuard`` and the argument's - previously known type. - - For example:: - - def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]: - return hasattr(val, '__await__') - - def f(val: Union[int, Awaitable[int]]) -> int: - if is_awaitable(val): - assert_type(val, Awaitable[int]) - else: - assert_type(val, int) - - ``TypeIs`` also works with type variables. For more information, see - PEP 742 (Narrowing types with TypeIs). - """ - item = typing._type_check(parameters, f'{self} accepts only a single type.') - return typing._GenericAlias(self, (item,)) -# 3.8 -else: - class _TypeIsForm(_ExtensionsSpecialForm, _root=True): - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only a single type') - return typing._GenericAlias(self, (item,)) - - TypeIs = _TypeIsForm( - 'TypeIs', - doc="""Special typing form used to annotate the return type of a user-defined - type narrower function. ``TypeIs`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. - - ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". - - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeIs[...]`` as its - return type to alert static type checkers to this intention. - - Using ``-> TypeIs`` tells the static type checker that for a given - function: - - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the intersection of the type inside ``TypeGuard`` and the argument's - previously known type. - - For example:: - - def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]: - return hasattr(val, '__await__') - - def f(val: Union[int, Awaitable[int]]) -> int: - if is_awaitable(val): - assert_type(val, Awaitable[int]) - else: - assert_type(val, int) - - ``TypeIs`` also works with type variables. For more information, see - PEP 742 (Narrowing types with TypeIs). - """) - - -# Vendored from cpython typing._SpecialFrom -class _SpecialForm(typing._Final, _root=True): - __slots__ = ('_name', '__doc__', '_getitem') - - def __init__(self, getitem): - self._getitem = getitem - self._name = getitem.__name__ - self.__doc__ = getitem.__doc__ - - def __getattr__(self, item): - if item in {'__name__', '__qualname__'}: - return self._name - - raise AttributeError(item) - - def __mro_entries__(self, bases): - raise TypeError(f"Cannot subclass {self!r}") - - def __repr__(self): - return f'typing_extensions.{self._name}' - - def __reduce__(self): - return self._name - - def __call__(self, *args, **kwds): - raise TypeError(f"Cannot instantiate {self!r}") - - def __or__(self, other): - return typing.Union[self, other] - - def __ror__(self, other): - return typing.Union[other, self] - - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance()") - - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass()") - - @typing._tp_cache - def __getitem__(self, parameters): - return self._getitem(self, parameters) - - -if hasattr(typing, "LiteralString"): # 3.11+ - LiteralString = typing.LiteralString -else: - @_SpecialForm - def LiteralString(self, params): - """Represents an arbitrary literal string. - - Example:: - - from typing_extensions import LiteralString - - def query(sql: LiteralString) -> ...: - ... - - query("SELECT * FROM table") # ok - query(f"SELECT * FROM {input()}") # not ok - - See PEP 675 for details. - - """ - raise TypeError(f"{self} is not subscriptable") - - -if hasattr(typing, "Self"): # 3.11+ - Self = typing.Self -else: - @_SpecialForm - def Self(self, params): - """Used to spell the type of "self" in classes. - - Example:: - - from typing import Self - - class ReturnsSelf: - def parse(self, data: bytes) -> Self: - ... - return self - - """ - - raise TypeError(f"{self} is not subscriptable") - - -if hasattr(typing, "Never"): # 3.11+ - Never = typing.Never -else: - @_SpecialForm - def Never(self, params): - """The bottom type, a type that has no members. - - This can be used to define a function that should never be - called, or a function that never returns:: - - from typing_extensions import Never - - def never_call_me(arg: Never) -> None: - pass - - def int_or_str(arg: int | str) -> None: - never_call_me(arg) # type checker error - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - never_call_me(arg) # ok, arg is of type Never - - """ - - raise TypeError(f"{self} is not subscriptable") - - -if hasattr(typing, 'Required'): # 3.11+ - Required = typing.Required - NotRequired = typing.NotRequired -elif sys.version_info[:2] >= (3, 9): # 3.9-3.10 - @_ExtensionsSpecialForm - def Required(self, parameters): - """A special typing construct to mark a key of a total=False TypedDict - as required. For example: - - class Movie(TypedDict, total=False): - title: Required[str] - year: int - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. - """ - item = typing._type_check(parameters, f'{self._name} accepts only a single type.') - return typing._GenericAlias(self, (item,)) - - @_ExtensionsSpecialForm - def NotRequired(self, parameters): - """A special typing construct to mark a key of a TypedDict as - potentially missing. For example: - - class Movie(TypedDict): - title: str - year: NotRequired[int] - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - """ - item = typing._type_check(parameters, f'{self._name} accepts only a single type.') - return typing._GenericAlias(self, (item,)) - -else: # 3.8 - class _RequiredForm(_ExtensionsSpecialForm, _root=True): - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only a single type.') - return typing._GenericAlias(self, (item,)) - - Required = _RequiredForm( - 'Required', - doc="""A special typing construct to mark a key of a total=False TypedDict - as required. For example: - - class Movie(TypedDict, total=False): - title: Required[str] - year: int - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. - """) - NotRequired = _RequiredForm( - 'NotRequired', - doc="""A special typing construct to mark a key of a TypedDict as - potentially missing. For example: - - class Movie(TypedDict): - title: str - year: NotRequired[int] - - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - """) - - -if hasattr(typing, 'ReadOnly'): - ReadOnly = typing.ReadOnly -elif sys.version_info[:2] >= (3, 9): # 3.9-3.12 - @_ExtensionsSpecialForm - def ReadOnly(self, parameters): - """A special typing construct to mark an item of a TypedDict as read-only. - - For example: - - class Movie(TypedDict): - title: ReadOnly[str] - year: int - - def mutate_movie(m: Movie) -> None: - m["year"] = 1992 # allowed - m["title"] = "The Matrix" # typechecker error - - There is no runtime checking for this property. - """ - item = typing._type_check(parameters, f'{self._name} accepts only a single type.') - return typing._GenericAlias(self, (item,)) - -else: # 3.8 - class _ReadOnlyForm(_ExtensionsSpecialForm, _root=True): - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only a single type.') - return typing._GenericAlias(self, (item,)) - - ReadOnly = _ReadOnlyForm( - 'ReadOnly', - doc="""A special typing construct to mark a key of a TypedDict as read-only. - - For example: - - class Movie(TypedDict): - title: ReadOnly[str] - year: int - - def mutate_movie(m: Movie) -> None: - m["year"] = 1992 # allowed - m["title"] = "The Matrix" # typechecker error - - There is no runtime checking for this propery. - """) - - -_UNPACK_DOC = """\ -Type unpack operator. - -The type unpack operator takes the child types from some container type, -such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. For -example: - - # For some generic class `Foo`: - Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str] - - Ts = TypeVarTuple('Ts') - # Specifies that `Bar` is generic in an arbitrary number of types. - # (Think of `Ts` as a tuple of an arbitrary number of individual - # `TypeVar`s, which the `Unpack` is 'pulling out' directly into the - # `Generic[]`.) - class Bar(Generic[Unpack[Ts]]): ... - Bar[int] # Valid - Bar[int, str] # Also valid - -From Python 3.11, this can also be done using the `*` operator: - - Foo[*tuple[int, str]] - class Bar(Generic[*Ts]): ... - -The operator can also be used along with a `TypedDict` to annotate -`**kwargs` in a function signature. For instance: - - class Movie(TypedDict): - name: str - year: int - - # This function expects two keyword arguments - *name* of type `str` and - # *year* of type `int`. - def foo(**kwargs: Unpack[Movie]): ... - -Note that there is only some runtime checking of this operator. Not -everything the runtime allows may be accepted by static type checkers. - -For more information, see PEP 646 and PEP 692. -""" - - -if sys.version_info >= (3, 12): # PEP 692 changed the repr of Unpack[] - Unpack = typing.Unpack - - def _is_unpack(obj): - return get_origin(obj) is Unpack - -elif sys.version_info[:2] >= (3, 9): # 3.9+ - class _UnpackSpecialForm(_ExtensionsSpecialForm, _root=True): - def __init__(self, getitem): - super().__init__(getitem) - self.__doc__ = _UNPACK_DOC - - class _UnpackAlias(typing._GenericAlias, _root=True): - __class__ = typing.TypeVar - - @property - def __typing_unpacked_tuple_args__(self): - assert self.__origin__ is Unpack - assert len(self.__args__) == 1 - arg, = self.__args__ - if isinstance(arg, (typing._GenericAlias, _types.GenericAlias)): - if arg.__origin__ is not tuple: - raise TypeError("Unpack[...] must be used with a tuple type") - return arg.__args__ - return None - - @_UnpackSpecialForm - def Unpack(self, parameters): - item = typing._type_check(parameters, f'{self._name} accepts only a single type.') - return _UnpackAlias(self, (item,)) - - def _is_unpack(obj): - return isinstance(obj, _UnpackAlias) - -else: # 3.8 - class _UnpackAlias(typing._GenericAlias, _root=True): - __class__ = typing.TypeVar - - class _UnpackForm(_ExtensionsSpecialForm, _root=True): - def __getitem__(self, parameters): - item = typing._type_check(parameters, - f'{self._name} accepts only a single type.') - return _UnpackAlias(self, (item,)) - - Unpack = _UnpackForm('Unpack', doc=_UNPACK_DOC) - - def _is_unpack(obj): - return isinstance(obj, _UnpackAlias) - - -if _PEP_696_IMPLEMENTED: - from typing import TypeVarTuple - -elif hasattr(typing, "TypeVarTuple"): # 3.11+ - - def _unpack_args(*args): - newargs = [] - for arg in args: - subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) - if subargs is not None and not (subargs and subargs[-1] is ...): - newargs.extend(subargs) - else: - newargs.append(arg) - return newargs - - # Add default parameter - PEP 696 - class TypeVarTuple(metaclass=_TypeVarLikeMeta): - """Type variable tuple.""" - - _backported_typevarlike = typing.TypeVarTuple - - def __new__(cls, name, *, default=NoDefault): - tvt = typing.TypeVarTuple(name) - _set_default(tvt, default) - _set_module(tvt) - - def _typevartuple_prepare_subst(alias, args): - params = alias.__parameters__ - typevartuple_index = params.index(tvt) - for param in params[typevartuple_index + 1:]: - if isinstance(param, TypeVarTuple): - raise TypeError( - f"More than one TypeVarTuple parameter in {alias}" - ) - - alen = len(args) - plen = len(params) - left = typevartuple_index - right = plen - typevartuple_index - 1 - var_tuple_index = None - fillarg = None - for k, arg in enumerate(args): - if not isinstance(arg, type): - subargs = getattr(arg, '__typing_unpacked_tuple_args__', None) - if subargs and len(subargs) == 2 and subargs[-1] is ...: - if var_tuple_index is not None: - raise TypeError( - "More than one unpacked " - "arbitrary-length tuple argument" - ) - var_tuple_index = k - fillarg = subargs[0] - if var_tuple_index is not None: - left = min(left, var_tuple_index) - right = min(right, alen - var_tuple_index - 1) - elif left + right > alen: - raise TypeError(f"Too few arguments for {alias};" - f" actual {alen}, expected at least {plen - 1}") - if left == alen - right and tvt.has_default(): - replacement = _unpack_args(tvt.__default__) - else: - replacement = args[left: alen - right] - - return ( - *args[:left], - *([fillarg] * (typevartuple_index - left)), - replacement, - *([fillarg] * (plen - right - left - typevartuple_index - 1)), - *args[alen - right:], - ) - - tvt.__typing_prepare_subst__ = _typevartuple_prepare_subst - return tvt - - def __init_subclass__(self, *args, **kwds): - raise TypeError("Cannot subclass special typing classes") - -else: # <=3.10 - class TypeVarTuple(_DefaultMixin): - """Type variable tuple. - - Usage:: - - Ts = TypeVarTuple('Ts') - - In the same way that a normal type variable is a stand-in for a single - type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* - type such as ``Tuple[int, str]``. - - Type variable tuples can be used in ``Generic`` declarations. - Consider the following example:: - - class Array(Generic[*Ts]): ... - - The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, - where ``T1`` and ``T2`` are type variables. To use these type variables - as type parameters of ``Array``, we must *unpack* the type variable tuple using - the star operator: ``*Ts``. The signature of ``Array`` then behaves - as if we had simply written ``class Array(Generic[T1, T2]): ...``. - In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows - us to parameterise the class with an *arbitrary* number of type parameters. - - Type variable tuples can be used anywhere a normal ``TypeVar`` can. - This includes class definitions, as shown above, as well as function - signatures and variable annotations:: - - class Array(Generic[*Ts]): - - def __init__(self, shape: Tuple[*Ts]): - self._shape: Tuple[*Ts] = shape - - def get_shape(self) -> Tuple[*Ts]: - return self._shape - - shape = (Height(480), Width(640)) - x: Array[Height, Width] = Array(shape) - y = abs(x) # Inferred type is Array[Height, Width] - z = x + x # ... is Array[Height, Width] - x.get_shape() # ... is tuple[Height, Width] - - """ - - # Trick Generic __parameters__. - __class__ = typing.TypeVar - - def __iter__(self): - yield self.__unpacked__ - - def __init__(self, name, *, default=NoDefault): - self.__name__ = name - _DefaultMixin.__init__(self, default) - - # for pickling: - def_mod = _caller() - if def_mod != 'typing_extensions': - self.__module__ = def_mod - - self.__unpacked__ = Unpack[self] - - def __repr__(self): - return self.__name__ - - def __hash__(self): - return object.__hash__(self) - - def __eq__(self, other): - return self is other - - def __reduce__(self): - return self.__name__ - - def __init_subclass__(self, *args, **kwds): - if '_root' not in kwds: - raise TypeError("Cannot subclass special typing classes") - - -if hasattr(typing, "reveal_type"): # 3.11+ - reveal_type = typing.reveal_type -else: # <=3.10 - def reveal_type(obj: T, /) -> T: - """Reveal the inferred type of a variable. - - When a static type checker encounters a call to ``reveal_type()``, - it will emit the inferred type of the argument:: - - x: int = 1 - reveal_type(x) - - Running a static type checker (e.g., ``mypy``) on this example - will produce output similar to 'Revealed type is "builtins.int"'. - - At runtime, the function prints the runtime type of the - argument and returns it unchanged. - - """ - print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr) - return obj - - -if hasattr(typing, "_ASSERT_NEVER_REPR_MAX_LENGTH"): # 3.11+ - _ASSERT_NEVER_REPR_MAX_LENGTH = typing._ASSERT_NEVER_REPR_MAX_LENGTH -else: # <=3.10 - _ASSERT_NEVER_REPR_MAX_LENGTH = 100 - - -if hasattr(typing, "assert_never"): # 3.11+ - assert_never = typing.assert_never -else: # <=3.10 - def assert_never(arg: Never, /) -> Never: - """Assert to the type checker that a line of code is unreachable. - - Example:: - - def int_or_str(arg: int | str) -> None: - match arg: - case int(): - print("It's an int") - case str(): - print("It's a str") - case _: - assert_never(arg) - - If a type checker finds that a call to assert_never() is - reachable, it will emit an error. - - At runtime, this throws an exception when called. - - """ - value = repr(arg) - if len(value) > _ASSERT_NEVER_REPR_MAX_LENGTH: - value = value[:_ASSERT_NEVER_REPR_MAX_LENGTH] + '...' - raise AssertionError(f"Expected code to be unreachable, but got: {value}") - - -if sys.version_info >= (3, 12): # 3.12+ - # dataclass_transform exists in 3.11 but lacks the frozen_default parameter - dataclass_transform = typing.dataclass_transform -else: # <=3.11 - def dataclass_transform( - *, - eq_default: bool = True, - order_default: bool = False, - kw_only_default: bool = False, - frozen_default: bool = False, - field_specifiers: typing.Tuple[ - typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], - ... - ] = (), - **kwargs: typing.Any, - ) -> typing.Callable[[T], T]: - """Decorator that marks a function, class, or metaclass as providing - dataclass-like behavior. - - Example: - - from typing_extensions import dataclass_transform - - _T = TypeVar("_T") - - # Used on a decorator function - @dataclass_transform() - def create_model(cls: type[_T]) -> type[_T]: - ... - return cls - - @create_model - class CustomerModel: - id: int - name: str - - # Used on a base class - @dataclass_transform() - class ModelBase: ... - - class CustomerModel(ModelBase): - id: int - name: str - - # Used on a metaclass - @dataclass_transform() - class ModelMeta(type): ... - - class ModelBase(metaclass=ModelMeta): ... - - class CustomerModel(ModelBase): - id: int - name: str - - Each of the ``CustomerModel`` classes defined in this example will now - behave similarly to a dataclass created with the ``@dataclasses.dataclass`` - decorator. For example, the type checker will synthesize an ``__init__`` - method. - - The arguments to this decorator can be used to customize this behavior: - - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be - True or False if it is omitted by the caller. - - ``order_default`` indicates whether the ``order`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``kw_only_default`` indicates whether the ``kw_only`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``frozen_default`` indicates whether the ``frozen`` parameter is - assumed to be True or False if it is omitted by the caller. - - ``field_specifiers`` specifies a static list of supported classes - or functions that describe fields, similar to ``dataclasses.field()``. - - At runtime, this decorator records its arguments in the - ``__dataclass_transform__`` attribute on the decorated object. - - See PEP 681 for details. - - """ - def decorator(cls_or_fn): - cls_or_fn.__dataclass_transform__ = { - "eq_default": eq_default, - "order_default": order_default, - "kw_only_default": kw_only_default, - "frozen_default": frozen_default, - "field_specifiers": field_specifiers, - "kwargs": kwargs, - } - return cls_or_fn - return decorator - - -if hasattr(typing, "override"): # 3.12+ - override = typing.override -else: # <=3.11 - _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) - - def override(arg: _F, /) -> _F: - """Indicate that a method is intended to override a method in a base class. - - Usage: - - class Base: - def method(self) -> None: - pass - - class Child(Base): - @override - def method(self) -> None: - super().method() - - When this decorator is applied to a method, the type checker will - validate that it overrides a method with the same name on a base class. - This helps prevent bugs that may occur when a base class is changed - without an equivalent change to a child class. - - There is no runtime checking of these properties. The decorator - sets the ``__override__`` attribute to ``True`` on the decorated object - to allow runtime introspection. - - See PEP 698 for details. - - """ - try: - arg.__override__ = True - except (AttributeError, TypeError): - # Skip the attribute silently if it is not writable. - # AttributeError happens if the object has __slots__ or a - # read-only property, TypeError if it's a builtin class. - pass - return arg - - -if hasattr(warnings, "deprecated"): - deprecated = warnings.deprecated -else: - _T = typing.TypeVar("_T") - - class deprecated: - """Indicate that a class, function or overload is deprecated. - - When this decorator is applied to an object, the type checker - will generate a diagnostic on usage of the deprecated object. - - Usage: - - @deprecated("Use B instead") - class A: - pass - - @deprecated("Use g instead") - def f(): - pass - - @overload - @deprecated("int support is deprecated") - def g(x: int) -> int: ... - @overload - def g(x: str) -> int: ... - - The warning specified by *category* will be emitted at runtime - on use of deprecated objects. For functions, that happens on calls; - for classes, on instantiation and on creation of subclasses. - If the *category* is ``None``, no warning is emitted at runtime. - The *stacklevel* determines where the - warning is emitted. If it is ``1`` (the default), the warning - is emitted at the direct caller of the deprecated object; if it - is higher, it is emitted further up the stack. - Static type checker behavior is not affected by the *category* - and *stacklevel* arguments. - - The deprecation message passed to the decorator is saved in the - ``__deprecated__`` attribute on the decorated object. - If applied to an overload, the decorator - must be after the ``@overload`` decorator for the attribute to - exist on the overload as returned by ``get_overloads()``. - - See PEP 702 for details. - - """ - def __init__( - self, - message: str, - /, - *, - category: typing.Optional[typing.Type[Warning]] = DeprecationWarning, - stacklevel: int = 1, - ) -> None: - if not isinstance(message, str): - raise TypeError( - "Expected an object of type str for 'message', not " - f"{type(message).__name__!r}" - ) - self.message = message - self.category = category - self.stacklevel = stacklevel - - def __call__(self, arg: _T, /) -> _T: - # Make sure the inner functions created below don't - # retain a reference to self. - msg = self.message - category = self.category - stacklevel = self.stacklevel - if category is None: - arg.__deprecated__ = msg - return arg - elif isinstance(arg, type): - import functools - from types import MethodType - - original_new = arg.__new__ - - @functools.wraps(original_new) - def __new__(cls, *args, **kwargs): - if cls is arg: - warnings.warn(msg, category=category, stacklevel=stacklevel + 1) - if original_new is not object.__new__: - return original_new(cls, *args, **kwargs) - # Mirrors a similar check in object.__new__. - elif cls.__init__ is object.__init__ and (args or kwargs): - raise TypeError(f"{cls.__name__}() takes no arguments") - else: - return original_new(cls) - - arg.__new__ = staticmethod(__new__) - - original_init_subclass = arg.__init_subclass__ - # We need slightly different behavior if __init_subclass__ - # is a bound method (likely if it was implemented in Python) - if isinstance(original_init_subclass, MethodType): - original_init_subclass = original_init_subclass.__func__ - - @functools.wraps(original_init_subclass) - def __init_subclass__(*args, **kwargs): - warnings.warn(msg, category=category, stacklevel=stacklevel + 1) - return original_init_subclass(*args, **kwargs) - - arg.__init_subclass__ = classmethod(__init_subclass__) - # Or otherwise, which likely means it's a builtin such as - # object's implementation of __init_subclass__. - else: - @functools.wraps(original_init_subclass) - def __init_subclass__(*args, **kwargs): - warnings.warn(msg, category=category, stacklevel=stacklevel + 1) - return original_init_subclass(*args, **kwargs) - - arg.__init_subclass__ = __init_subclass__ - - arg.__deprecated__ = __new__.__deprecated__ = msg - __init_subclass__.__deprecated__ = msg - return arg - elif callable(arg): - import functools - - @functools.wraps(arg) - def wrapper(*args, **kwargs): - warnings.warn(msg, category=category, stacklevel=stacklevel + 1) - return arg(*args, **kwargs) - - arg.__deprecated__ = wrapper.__deprecated__ = msg - return wrapper - else: - raise TypeError( - "@deprecated decorator with non-None category must be applied to " - f"a class or callable, not {arg!r}" - ) - - -# We have to do some monkey patching to deal with the dual nature of -# Unpack/TypeVarTuple: -# - We want Unpack to be a kind of TypeVar so it gets accepted in -# Generic[Unpack[Ts]] -# - We want it to *not* be treated as a TypeVar for the purposes of -# counting generic parameters, so that when we subscript a generic, -# the runtime doesn't try to substitute the Unpack with the subscripted type. -if not hasattr(typing, "TypeVarTuple"): - def _check_generic(cls, parameters, elen=_marker): - """Check correct count for parameters of a generic cls (internal helper). - - This gives a nice error message in case of count mismatch. - """ - if not elen: - raise TypeError(f"{cls} is not a generic class") - if elen is _marker: - if not hasattr(cls, "__parameters__") or not cls.__parameters__: - raise TypeError(f"{cls} is not a generic class") - elen = len(cls.__parameters__) - alen = len(parameters) - if alen != elen: - expect_val = elen - if hasattr(cls, "__parameters__"): - parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] - num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) - if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): - return - - # deal with TypeVarLike defaults - # required TypeVarLikes cannot appear after a defaulted one. - if alen < elen: - # since we validate TypeVarLike default in _collect_type_vars - # or _collect_parameters we can safely check parameters[alen] - if ( - getattr(parameters[alen], '__default__', NoDefault) - is not NoDefault - ): - return - - num_default_tv = sum(getattr(p, '__default__', NoDefault) - is not NoDefault for p in parameters) - - elen -= num_default_tv - - expect_val = f"at least {elen}" - - things = "arguments" if sys.version_info >= (3, 10) else "parameters" - raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}" - f" for {cls}; actual {alen}, expected {expect_val}") -else: - # Python 3.11+ - - def _check_generic(cls, parameters, elen): - """Check correct count for parameters of a generic cls (internal helper). - - This gives a nice error message in case of count mismatch. - """ - if not elen: - raise TypeError(f"{cls} is not a generic class") - alen = len(parameters) - if alen != elen: - expect_val = elen - if hasattr(cls, "__parameters__"): - parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] - - # deal with TypeVarLike defaults - # required TypeVarLikes cannot appear after a defaulted one. - if alen < elen: - # since we validate TypeVarLike default in _collect_type_vars - # or _collect_parameters we can safely check parameters[alen] - if ( - getattr(parameters[alen], '__default__', NoDefault) - is not NoDefault - ): - return - - num_default_tv = sum(getattr(p, '__default__', NoDefault) - is not NoDefault for p in parameters) - - elen -= num_default_tv - - expect_val = f"at least {elen}" - - raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments" - f" for {cls}; actual {alen}, expected {expect_val}") - -if not _PEP_696_IMPLEMENTED: - typing._check_generic = _check_generic - - -def _has_generic_or_protocol_as_origin() -> bool: - try: - frame = sys._getframe(2) - # - Catch AttributeError: not all Python implementations have sys._getframe() - # - Catch ValueError: maybe we're called from an unexpected module - # and the call stack isn't deep enough - except (AttributeError, ValueError): - return False # err on the side of leniency - else: - # If we somehow get invoked from outside typing.py, - # also err on the side of leniency - if frame.f_globals.get("__name__") != "typing": - return False - origin = frame.f_locals.get("origin") - # Cannot use "in" because origin may be an object with a buggy __eq__ that - # throws an error. - return origin is typing.Generic or origin is Protocol or origin is typing.Protocol - - -_TYPEVARTUPLE_TYPES = {TypeVarTuple, getattr(typing, "TypeVarTuple", None)} - - -def _is_unpacked_typevartuple(x) -> bool: - if get_origin(x) is not Unpack: - return False - args = get_args(x) - return ( - bool(args) - and len(args) == 1 - and type(args[0]) in _TYPEVARTUPLE_TYPES - ) - - -# Python 3.11+ _collect_type_vars was renamed to _collect_parameters -if hasattr(typing, '_collect_type_vars'): - def _collect_type_vars(types, typevar_types=None): - """Collect all type variable contained in types in order of - first appearance (lexicographic order). For example:: - - _collect_type_vars((T, List[S, T])) == (T, S) - """ - if typevar_types is None: - typevar_types = typing.TypeVar - tvars = [] - - # A required TypeVarLike cannot appear after a TypeVarLike with a default - # if it was a direct call to `Generic[]` or `Protocol[]` - enforce_default_ordering = _has_generic_or_protocol_as_origin() - default_encountered = False - - # Also, a TypeVarLike with a default cannot appear after a TypeVarTuple - type_var_tuple_encountered = False - - for t in types: - if _is_unpacked_typevartuple(t): - type_var_tuple_encountered = True - elif isinstance(t, typevar_types) and t not in tvars: - if enforce_default_ordering: - has_default = getattr(t, '__default__', NoDefault) is not NoDefault - if has_default: - if type_var_tuple_encountered: - raise TypeError('Type parameter with a default' - ' follows TypeVarTuple') - default_encountered = True - elif default_encountered: - raise TypeError(f'Type parameter {t!r} without a default' - ' follows type parameter with a default') - - tvars.append(t) - if _should_collect_from_parameters(t): - tvars.extend([t for t in t.__parameters__ if t not in tvars]) - return tuple(tvars) - - typing._collect_type_vars = _collect_type_vars -else: - def _collect_parameters(args): - """Collect all type variables and parameter specifications in args - in order of first appearance (lexicographic order). - - For example:: - - assert _collect_parameters((T, Callable[P, T])) == (T, P) - """ - parameters = [] - - # A required TypeVarLike cannot appear after a TypeVarLike with default - # if it was a direct call to `Generic[]` or `Protocol[]` - enforce_default_ordering = _has_generic_or_protocol_as_origin() - default_encountered = False - - # Also, a TypeVarLike with a default cannot appear after a TypeVarTuple - type_var_tuple_encountered = False - - for t in args: - if isinstance(t, type): - # We don't want __parameters__ descriptor of a bare Python class. - pass - elif isinstance(t, tuple): - # `t` might be a tuple, when `ParamSpec` is substituted with - # `[T, int]`, or `[int, *Ts]`, etc. - for x in t: - for collected in _collect_parameters([x]): - if collected not in parameters: - parameters.append(collected) - elif hasattr(t, '__typing_subst__'): - if t not in parameters: - if enforce_default_ordering: - has_default = ( - getattr(t, '__default__', NoDefault) is not NoDefault - ) - - if type_var_tuple_encountered and has_default: - raise TypeError('Type parameter with a default' - ' follows TypeVarTuple') - - if has_default: - default_encountered = True - elif default_encountered: - raise TypeError(f'Type parameter {t!r} without a default' - ' follows type parameter with a default') - - parameters.append(t) - else: - if _is_unpacked_typevartuple(t): - type_var_tuple_encountered = True - for x in getattr(t, '__parameters__', ()): - if x not in parameters: - parameters.append(x) - - return tuple(parameters) - - if not _PEP_696_IMPLEMENTED: - typing._collect_parameters = _collect_parameters - -# Backport typing.NamedTuple as it exists in Python 3.13. -# In 3.11, the ability to define generic `NamedTuple`s was supported. -# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. -# On 3.12, we added __orig_bases__ to call-based NamedTuples -# On 3.13, we deprecated kwargs-based NamedTuples -if sys.version_info >= (3, 13): - NamedTuple = typing.NamedTuple -else: - def _make_nmtuple(name, types, module, defaults=()): - fields = [n for n, t in types] - annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") - for n, t in types} - nm_tpl = collections.namedtuple(name, fields, - defaults=defaults, module=module) - nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations - # The `_field_types` attribute was removed in 3.9; - # in earlier versions, it is the same as the `__annotations__` attribute - if sys.version_info < (3, 9): - nm_tpl._field_types = annotations - return nm_tpl - - _prohibited_namedtuple_fields = typing._prohibited - _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) - - class _NamedTupleMeta(type): - def __new__(cls, typename, bases, ns): - assert _NamedTuple in bases - for base in bases: - if base is not _NamedTuple and base is not typing.Generic: - raise TypeError( - 'can only inherit from a NamedTuple type and Generic') - bases = tuple(tuple if base is _NamedTuple else base for base in bases) - if "__annotations__" in ns: - types = ns["__annotations__"] - elif "__annotate__" in ns: - # TODO: Use inspect.VALUE here, and make the annotations lazily evaluated - types = ns["__annotate__"](1) - else: - types = {} - default_names = [] - for field_name in types: - if field_name in ns: - default_names.append(field_name) - elif default_names: - raise TypeError(f"Non-default namedtuple field {field_name} " - f"cannot follow default field" - f"{'s' if len(default_names) > 1 else ''} " - f"{', '.join(default_names)}") - nm_tpl = _make_nmtuple( - typename, types.items(), - defaults=[ns[n] for n in default_names], - module=ns['__module__'] - ) - nm_tpl.__bases__ = bases - if typing.Generic in bases: - if hasattr(typing, '_generic_class_getitem'): # 3.12+ - nm_tpl.__class_getitem__ = classmethod(typing._generic_class_getitem) - else: - class_getitem = typing.Generic.__class_getitem__.__func__ - nm_tpl.__class_getitem__ = classmethod(class_getitem) - # update from user namespace without overriding special namedtuple attributes - for key, val in ns.items(): - if key in _prohibited_namedtuple_fields: - raise AttributeError("Cannot overwrite NamedTuple attribute " + key) - elif key not in _special_namedtuple_fields: - if key not in nm_tpl._fields: - setattr(nm_tpl, key, ns[key]) - try: - set_name = type(val).__set_name__ - except AttributeError: - pass - else: - try: - set_name(val, nm_tpl, key) - except BaseException as e: - msg = ( - f"Error calling __set_name__ on {type(val).__name__!r} " - f"instance {key!r} in {typename!r}" - ) - # BaseException.add_note() existed on py311, - # but the __set_name__ machinery didn't start - # using add_note() until py312. - # Making sure exceptions are raised in the same way - # as in "normal" classes seems most important here. - if sys.version_info >= (3, 12): - e.add_note(msg) - raise - else: - raise RuntimeError(msg) from e - - if typing.Generic in bases: - nm_tpl.__init_subclass__() - return nm_tpl - - _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) - - def _namedtuple_mro_entries(bases): - assert NamedTuple in bases - return (_NamedTuple,) - - @_ensure_subclassable(_namedtuple_mro_entries) - def NamedTuple(typename, fields=_marker, /, **kwargs): - """Typed version of namedtuple. - - Usage:: - - class Employee(NamedTuple): - name: str - id: int - - This is equivalent to:: - - Employee = collections.namedtuple('Employee', ['name', 'id']) - - The resulting class has an extra __annotations__ attribute, giving a - dict that maps field names to types. (The field names are also in - the _fields attribute, which is part of the namedtuple API.) - An alternative equivalent functional syntax is also accepted:: - - Employee = NamedTuple('Employee', [('name', str), ('id', int)]) - """ - if fields is _marker: - if kwargs: - deprecated_thing = "Creating NamedTuple classes using keyword arguments" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "Use the class-based or functional syntax instead." - ) - else: - deprecated_thing = "Failing to pass a value for the 'fields' parameter" - example = f"`{typename} = NamedTuple({typename!r}, [])`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a NamedTuple class with 0 fields " - "using the functional syntax, " - "pass an empty list, e.g. " - ) + example + "." - elif fields is None: - if kwargs: - raise TypeError( - "Cannot pass `None` as the 'fields' parameter " - "and also specify fields using keyword arguments" - ) - else: - deprecated_thing = "Passing `None` as the 'fields' parameter" - example = f"`{typename} = NamedTuple({typename!r}, [])`" - deprecation_msg = ( - "{name} is deprecated and will be disallowed in Python {remove}. " - "To create a NamedTuple class with 0 fields " - "using the functional syntax, " - "pass an empty list, e.g. " - ) + example + "." - elif kwargs: - raise TypeError("Either list of fields or keywords" - " can be provided to NamedTuple, not both") - if fields is _marker or fields is None: - warnings.warn( - deprecation_msg.format(name=deprecated_thing, remove="3.15"), - DeprecationWarning, - stacklevel=2, - ) - fields = kwargs.items() - nt = _make_nmtuple(typename, fields, module=_caller()) - nt.__orig_bases__ = (NamedTuple,) - return nt - - -if hasattr(collections.abc, "Buffer"): - Buffer = collections.abc.Buffer -else: - class Buffer(abc.ABC): # noqa: B024 - """Base class for classes that implement the buffer protocol. - - The buffer protocol allows Python objects to expose a low-level - memory buffer interface. Before Python 3.12, it is not possible - to implement the buffer protocol in pure Python code, or even - to check whether a class implements the buffer protocol. In - Python 3.12 and higher, the ``__buffer__`` method allows access - to the buffer protocol from Python code, and the - ``collections.abc.Buffer`` ABC allows checking whether a class - implements the buffer protocol. - - To indicate support for the buffer protocol in earlier versions, - inherit from this ABC, either in a stub file or at runtime, - or use ABC registration. This ABC provides no methods, because - there is no Python-accessible methods shared by pre-3.12 buffer - classes. It is useful primarily for static checks. - - """ - - # As a courtesy, register the most common stdlib buffer classes. - Buffer.register(memoryview) - Buffer.register(bytearray) - Buffer.register(bytes) - - -# Backport of types.get_original_bases, available on 3.12+ in CPython -if hasattr(_types, "get_original_bases"): - get_original_bases = _types.get_original_bases -else: - def get_original_bases(cls, /): - """Return the class's "original" bases prior to modification by `__mro_entries__`. - - Examples:: - - from typing import TypeVar, Generic - from typing_extensions import NamedTuple, TypedDict - - T = TypeVar("T") - class Foo(Generic[T]): ... - class Bar(Foo[int], float): ... - class Baz(list[str]): ... - Eggs = NamedTuple("Eggs", [("a", int), ("b", str)]) - Spam = TypedDict("Spam", {"a": int, "b": str}) - - assert get_original_bases(Bar) == (Foo[int], float) - assert get_original_bases(Baz) == (list[str],) - assert get_original_bases(Eggs) == (NamedTuple,) - assert get_original_bases(Spam) == (TypedDict,) - assert get_original_bases(int) == (object,) - """ - try: - return cls.__dict__.get("__orig_bases__", cls.__bases__) - except AttributeError: - raise TypeError( - f'Expected an instance of type, not {type(cls).__name__!r}' - ) from None - - -# NewType is a class on Python 3.10+, making it pickleable -# The error message for subclassing instances of NewType was improved on 3.11+ -if sys.version_info >= (3, 11): - NewType = typing.NewType -else: - class NewType: - """NewType creates simple unique types with almost zero - runtime overhead. NewType(name, tp) is considered a subtype of tp - by static type checkers. At runtime, NewType(name, tp) returns - a dummy callable that simply returns its argument. Usage:: - UserId = NewType('UserId', int) - def name_by_id(user_id: UserId) -> str: - ... - UserId('user') # Fails type check - name_by_id(42) # Fails type check - name_by_id(UserId(42)) # OK - num = UserId(5) + 1 # type: int - """ - - def __call__(self, obj, /): - return obj - - def __init__(self, name, tp): - self.__qualname__ = name - if '.' in name: - name = name.rpartition('.')[-1] - self.__name__ = name - self.__supertype__ = tp - def_mod = _caller() - if def_mod != 'typing_extensions': - self.__module__ = def_mod - - def __mro_entries__(self, bases): - # We defined __mro_entries__ to get a better error message - # if a user attempts to subclass a NewType instance. bpo-46170 - supercls_name = self.__name__ - - class Dummy: - def __init_subclass__(cls): - subcls_name = cls.__name__ - raise TypeError( - f"Cannot subclass an instance of NewType. " - f"Perhaps you were looking for: " - f"`{subcls_name} = NewType({subcls_name!r}, {supercls_name})`" - ) - - return (Dummy,) - - def __repr__(self): - return f'{self.__module__}.{self.__qualname__}' - - def __reduce__(self): - return self.__qualname__ - - if sys.version_info >= (3, 10): - # PEP 604 methods - # It doesn't make sense to have these methods on Python <3.10 - - def __or__(self, other): - return typing.Union[self, other] - - def __ror__(self, other): - return typing.Union[other, self] - - -if hasattr(typing, "TypeAliasType"): - TypeAliasType = typing.TypeAliasType -else: - def _is_unionable(obj): - """Corresponds to is_unionable() in unionobject.c in CPython.""" - return obj is None or isinstance(obj, ( - type, - _types.GenericAlias, - _types.UnionType, - TypeAliasType, - )) - - class TypeAliasType: - """Create named, parameterized type aliases. - - This provides a backport of the new `type` statement in Python 3.12: - - type ListOrSet[T] = list[T] | set[T] - - is equivalent to: - - T = TypeVar("T") - ListOrSet = TypeAliasType("ListOrSet", list[T] | set[T], type_params=(T,)) - - The name ListOrSet can then be used as an alias for the type it refers to. - - The type_params argument should contain all the type parameters used - in the value of the type alias. If the alias is not generic, this - argument is omitted. - - Static type checkers should only support type aliases declared using - TypeAliasType that follow these rules: - - - The first argument (the name) must be a string literal. - - The TypeAliasType instance must be immediately assigned to a variable - of the same name. (For example, 'X = TypeAliasType("Y", int)' is invalid, - as is 'X, Y = TypeAliasType("X", int), TypeAliasType("Y", int)'). - - """ - - def __init__(self, name: str, value, *, type_params=()): - if not isinstance(name, str): - raise TypeError("TypeAliasType name must be a string") - self.__value__ = value - self.__type_params__ = type_params - - parameters = [] - for type_param in type_params: - if isinstance(type_param, TypeVarTuple): - parameters.extend(type_param) - else: - parameters.append(type_param) - self.__parameters__ = tuple(parameters) - def_mod = _caller() - if def_mod != 'typing_extensions': - self.__module__ = def_mod - # Setting this attribute closes the TypeAliasType from further modification - self.__name__ = name - - def __setattr__(self, name: str, value: object, /) -> None: - if hasattr(self, "__name__"): - self._raise_attribute_error(name) - super().__setattr__(name, value) - - def __delattr__(self, name: str, /) -> Never: - self._raise_attribute_error(name) - - def _raise_attribute_error(self, name: str) -> Never: - # Match the Python 3.12 error messages exactly - if name == "__name__": - raise AttributeError("readonly attribute") - elif name in {"__value__", "__type_params__", "__parameters__", "__module__"}: - raise AttributeError( - f"attribute '{name}' of 'typing.TypeAliasType' objects " - "is not writable" - ) - else: - raise AttributeError( - f"'typing.TypeAliasType' object has no attribute '{name}'" - ) - - def __repr__(self) -> str: - return self.__name__ - - def __getitem__(self, parameters): - if not isinstance(parameters, tuple): - parameters = (parameters,) - parameters = [ - typing._type_check( - item, f'Subscripting {self.__name__} requires a type.' - ) - for item in parameters - ] - return typing._GenericAlias(self, tuple(parameters)) - - def __reduce__(self): - return self.__name__ - - def __init_subclass__(cls, *args, **kwargs): - raise TypeError( - "type 'typing_extensions.TypeAliasType' is not an acceptable base type" - ) - - # The presence of this method convinces typing._type_check - # that TypeAliasTypes are types. - def __call__(self): - raise TypeError("Type alias is not callable") - - if sys.version_info >= (3, 10): - def __or__(self, right): - # For forward compatibility with 3.12, reject Unions - # that are not accepted by the built-in Union. - if not _is_unionable(right): - return NotImplemented - return typing.Union[self, right] - - def __ror__(self, left): - if not _is_unionable(left): - return NotImplemented - return typing.Union[left, self] - - -if hasattr(typing, "is_protocol"): - is_protocol = typing.is_protocol - get_protocol_members = typing.get_protocol_members -else: - def is_protocol(tp: type, /) -> bool: - """Return True if the given type is a Protocol. - - Example:: - - >>> from typing_extensions import Protocol, is_protocol - >>> class P(Protocol): - ... def a(self) -> str: ... - ... b: int - >>> is_protocol(P) - True - >>> is_protocol(int) - False - """ - return ( - isinstance(tp, type) - and getattr(tp, '_is_protocol', False) - and tp is not Protocol - and tp is not typing.Protocol - ) - - def get_protocol_members(tp: type, /) -> typing.FrozenSet[str]: - """Return the set of members defined in a Protocol. - - Example:: - - >>> from typing_extensions import Protocol, get_protocol_members - >>> class P(Protocol): - ... def a(self) -> str: ... - ... b: int - >>> get_protocol_members(P) - frozenset({'a', 'b'}) - - Raise a TypeError for arguments that are not Protocols. - """ - if not is_protocol(tp): - raise TypeError(f'{tp!r} is not a Protocol') - if hasattr(tp, '__protocol_attrs__'): - return frozenset(tp.__protocol_attrs__) - return frozenset(_get_protocol_attrs(tp)) - - -if hasattr(typing, "Doc"): - Doc = typing.Doc -else: - class Doc: - """Define the documentation of a type annotation using ``Annotated``, to be - used in class attributes, function and method parameters, return values, - and variables. - - The value should be a positional-only string literal to allow static tools - like editors and documentation generators to use it. - - This complements docstrings. - - The string value passed is available in the attribute ``documentation``. - - Example:: - - >>> from typing_extensions import Annotated, Doc - >>> def hi(to: Annotated[str, Doc("Who to say hi to")]) -> None: ... - """ - def __init__(self, documentation: str, /) -> None: - self.documentation = documentation - - def __repr__(self) -> str: - return f"Doc({self.documentation!r})" - - def __hash__(self) -> int: - return hash(self.documentation) - - def __eq__(self, other: object) -> bool: - if not isinstance(other, Doc): - return NotImplemented - return self.documentation == other.documentation - - -_CapsuleType = getattr(_types, "CapsuleType", None) - -if _CapsuleType is None: - try: - import _socket - except ImportError: - pass - else: - _CAPI = getattr(_socket, "CAPI", None) - if _CAPI is not None: - _CapsuleType = type(_CAPI) - -if _CapsuleType is not None: - CapsuleType = _CapsuleType - __all__.append("CapsuleType") - - -# Aliases for items that have always been in typing. -# Explicitly assign these (rather than using `from typing import *` at the top), -# so that we get a CI error if one of these is deleted from typing.py -# in a future version of Python -AbstractSet = typing.AbstractSet -AnyStr = typing.AnyStr -BinaryIO = typing.BinaryIO -Callable = typing.Callable -Collection = typing.Collection -Container = typing.Container -Dict = typing.Dict -ForwardRef = typing.ForwardRef -FrozenSet = typing.FrozenSet -Generic = typing.Generic -Hashable = typing.Hashable -IO = typing.IO -ItemsView = typing.ItemsView -Iterable = typing.Iterable -Iterator = typing.Iterator -KeysView = typing.KeysView -List = typing.List -Mapping = typing.Mapping -MappingView = typing.MappingView -Match = typing.Match -MutableMapping = typing.MutableMapping -MutableSequence = typing.MutableSequence -MutableSet = typing.MutableSet -Optional = typing.Optional -Pattern = typing.Pattern -Reversible = typing.Reversible -Sequence = typing.Sequence -Set = typing.Set -Sized = typing.Sized -TextIO = typing.TextIO -Tuple = typing.Tuple -Union = typing.Union -ValuesView = typing.ValuesView -cast = typing.cast -no_type_check = typing.no_type_check -no_type_check_decorator = typing.no_type_check_decorator diff --git a/setuptools/_vendor/wheel-0.45.1.dist-info/INSTALLER b/setuptools/_vendor/wheel-0.45.1.dist-info/INSTALLER index a1b589e38a..5c69047b2e 100644 --- a/setuptools/_vendor/wheel-0.45.1.dist-info/INSTALLER +++ b/setuptools/_vendor/wheel-0.45.1.dist-info/INSTALLER @@ -1 +1 @@ -pip +uv \ No newline at end of file diff --git a/setuptools/_vendor/wheel-0.45.1.dist-info/RECORD b/setuptools/_vendor/wheel-0.45.1.dist-info/RECORD index c1535b697f..698d51bf77 100644 --- a/setuptools/_vendor/wheel-0.45.1.dist-info/RECORD +++ b/setuptools/_vendor/wheel-0.45.1.dist-info/RECORD @@ -1,5 +1,5 @@ -../../bin/wheel,sha256=pBhV19bQIgjS-r541fG3kLU6QtcyKaKdQ2RE9YIzeiU,249 -wheel-0.45.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +bin/wheel,sha256=7ijYGJ2HkBAM2Ws5-L6nCHhN-DASp95DgxlQWfqY51A,333 +wheel-0.45.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 wheel-0.45.1.dist-info/LICENSE.txt,sha256=MMI2GGeRCPPo6h0qZYx8pBe9_IkcmO8aifpP8MmChlQ,1107 wheel-0.45.1.dist-info/METADATA,sha256=mKz84H7m7jsxJyzeIcTVORiTb0NPMV39KvOIYhGgmjA,2313 wheel-0.45.1.dist-info/RECORD,, @@ -8,24 +8,10 @@ wheel-0.45.1.dist-info/WHEEL,sha256=CpUCUxeHQbRN5UGRQHYRJorO5Af-Qy_fHMctcQ8DSGI, wheel-0.45.1.dist-info/entry_points.txt,sha256=rTY1BbkPHhkGMm4Q3F0pIzJBzW2kMxoG1oriffvGdA0,104 wheel/__init__.py,sha256=mrxMnvdXACur_LWegbUfh5g5ysWZrd63UJn890wvGNk,59 wheel/__main__.py,sha256=NkMUnuTCGcOkgY0IBLgBCVC_BGGcWORx2K8jYGS12UE,455 -wheel/__pycache__/__init__.cpython-311.pyc,, -wheel/__pycache__/__main__.cpython-311.pyc,, -wheel/__pycache__/_bdist_wheel.cpython-311.pyc,, -wheel/__pycache__/_setuptools_logging.cpython-311.pyc,, -wheel/__pycache__/bdist_wheel.cpython-311.pyc,, -wheel/__pycache__/macosx_libfile.cpython-311.pyc,, -wheel/__pycache__/metadata.cpython-311.pyc,, -wheel/__pycache__/util.cpython-311.pyc,, -wheel/__pycache__/wheelfile.cpython-311.pyc,, wheel/_bdist_wheel.py,sha256=UghCQjSH_pVfcZh6oRjzSw_TQhcf3anSx1OkiLSL82M,21694 wheel/_setuptools_logging.py,sha256=-5KC-lne0ilOUWIDfOkqapUWGMFZhuKYDIavIZiB5kM,781 wheel/bdist_wheel.py,sha256=tpf9WufiSO1RuEMg5oPhIfSG8DMziCZ_4muCKF69Cqo,1107 wheel/cli/__init__.py,sha256=Npq6_jKi03dhIcRnmbuFhwviVJxwO0tYEnEhWMv9cJo,4402 -wheel/cli/__pycache__/__init__.cpython-311.pyc,, -wheel/cli/__pycache__/convert.cpython-311.pyc,, -wheel/cli/__pycache__/pack.cpython-311.pyc,, -wheel/cli/__pycache__/tags.cpython-311.pyc,, -wheel/cli/__pycache__/unpack.cpython-311.pyc,, wheel/cli/convert.py,sha256=Bi0ntEXb9nTllCxWeTRQ4j-nPs3szWSEKipG_GgnMkQ,12634 wheel/cli/pack.py,sha256=CAFcHdBVulvsHYJlndKVO7KMI9JqBTZz5ii0PKxxCOs,3103 wheel/cli/tags.py,sha256=lHw-LaWrkS5Jy_qWcw-6pSjeNM6yAjDnqKI3E5JTTCU,4760 @@ -34,24 +20,10 @@ wheel/macosx_libfile.py,sha256=k1x7CE3LPtOVGqj6NXQ1nTGYVPaeRrhVzUG_KPq3zDs,16572 wheel/metadata.py,sha256=JC4p7jlQZu2bUTAQ2fevkqLjg_X6gnNyRhLn6OUO1tc,6171 wheel/util.py,sha256=aL7aibHwYUgfc8WlolL5tXdkV4DatbJxZHb1kwHFJAU,423 wheel/vendored/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wheel/vendored/__pycache__/__init__.cpython-311.pyc,, wheel/vendored/packaging/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197 wheel/vendored/packaging/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174 wheel/vendored/packaging/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344 wheel/vendored/packaging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wheel/vendored/packaging/__pycache__/__init__.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/_elffile.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/_manylinux.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/_musllinux.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/_parser.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/_structures.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/_tokenizer.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/markers.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/requirements.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/specifiers.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/tags.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/utils.cpython-311.pyc,, -wheel/vendored/packaging/__pycache__/version.cpython-311.pyc,, wheel/vendored/packaging/_elffile.py,sha256=hbmK8OD6Z7fY6hwinHEUcD1by7czkGiNYu7ShnFEk2k,3266 wheel/vendored/packaging/_manylinux.py,sha256=P7sdR5_7XBY09LVYYPhHmydMJIIwPXWsh4olk74Uuj4,9588 wheel/vendored/packaging/_musllinux.py,sha256=z1s8To2hQ0vpn_d-O2i5qxGwEK8WmGlLt3d_26V7NeY,2674 diff --git a/setuptools/_vendor/zipp-3.19.2.dist-info/INSTALLER b/setuptools/_vendor/zipp-3.19.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e38a..0000000000 --- a/setuptools/_vendor/zipp-3.19.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/setuptools/_vendor/zipp-3.19.2.dist-info/LICENSE b/setuptools/_vendor/zipp-3.19.2.dist-info/LICENSE deleted file mode 100644 index 1bb5a44356..0000000000 --- a/setuptools/_vendor/zipp-3.19.2.dist-info/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/setuptools/_vendor/zipp-3.19.2.dist-info/RECORD b/setuptools/_vendor/zipp-3.19.2.dist-info/RECORD deleted file mode 100644 index 77c02835d8..0000000000 --- a/setuptools/_vendor/zipp-3.19.2.dist-info/RECORD +++ /dev/null @@ -1,15 +0,0 @@ -zipp-3.19.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -zipp-3.19.2.dist-info/LICENSE,sha256=htoPAa6uRjSKPD1GUZXcHOzN55956HdppkuNoEsqR0E,1023 -zipp-3.19.2.dist-info/METADATA,sha256=UIrk_kMIHGSwsKKChYizqMw0MMZpPRZ2ZiVpQAsN_bE,3575 -zipp-3.19.2.dist-info/RECORD,, -zipp-3.19.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -zipp-3.19.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 -zipp-3.19.2.dist-info/top_level.txt,sha256=iAbdoSHfaGqBfVb2XuR9JqSQHCoOsOtG6y9C_LSpqFw,5 -zipp/__init__.py,sha256=QuI1g00G4fRAcGt-HqbV0oWIkmSgedCGGYsHHYzNa8A,13412 -zipp/__pycache__/__init__.cpython-312.pyc,, -zipp/__pycache__/glob.cpython-312.pyc,, -zipp/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -zipp/compat/__pycache__/__init__.cpython-312.pyc,, -zipp/compat/__pycache__/py310.cpython-312.pyc,, -zipp/compat/py310.py,sha256=eZpkW0zRtunkhEh8jjX3gCGe22emoKCBJw72Zt4RkhA,219 -zipp/glob.py,sha256=etWpnfEoRyfUvrUsi6sTiGmErvPwe6HzY6pT8jg_lUI,3082 diff --git a/setuptools/_vendor/zipp-3.19.2.dist-info/WHEEL b/setuptools/_vendor/zipp-3.19.2.dist-info/WHEEL deleted file mode 100644 index bab98d6758..0000000000 --- a/setuptools/_vendor/zipp-3.19.2.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.43.0) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/setuptools/_vendor/zipp-3.23.0.dist-info/INSTALLER b/setuptools/_vendor/zipp-3.23.0.dist-info/INSTALLER new file mode 100644 index 0000000000..5c69047b2e --- /dev/null +++ b/setuptools/_vendor/zipp-3.23.0.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/setuptools/_vendor/zipp-3.19.2.dist-info/METADATA b/setuptools/_vendor/zipp-3.23.0.dist-info/METADATA similarity index 60% rename from setuptools/_vendor/zipp-3.19.2.dist-info/METADATA rename to setuptools/_vendor/zipp-3.23.0.dist-info/METADATA index 1399281717..6420117987 100644 --- a/setuptools/_vendor/zipp-3.19.2.dist-info/METADATA +++ b/setuptools/_vendor/zipp-3.23.0.dist-info/METADATA @@ -1,38 +1,42 @@ -Metadata-Version: 2.1 +Metadata-Version: 2.4 Name: zipp -Version: 3.19.2 +Version: 3.23.0 Summary: Backport of pathlib-compatible object wrapper for zip files Author-email: "Jason R. Coombs" -Project-URL: Homepage, https://github.com/jaraco/zipp +License-Expression: MIT +Project-URL: Source, https://github.com/jaraco/zipp Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3 :: Only -Requires-Python: >=3.8 +Requires-Python: >=3.9 Description-Content-Type: text/x-rst License-File: LICENSE -Provides-Extra: doc -Requires-Dist: sphinx >=3.5 ; extra == 'doc' -Requires-Dist: jaraco.packaging >=9.3 ; extra == 'doc' -Requires-Dist: rst.linker >=1.9 ; extra == 'doc' -Requires-Dist: furo ; extra == 'doc' -Requires-Dist: sphinx-lint ; extra == 'doc' -Requires-Dist: jaraco.tidelift >=1.4 ; extra == 'doc' Provides-Extra: test -Requires-Dist: pytest !=8.1.*,>=6 ; extra == 'test' -Requires-Dist: pytest-checkdocs >=2.4 ; extra == 'test' -Requires-Dist: pytest-cov ; extra == 'test' -Requires-Dist: pytest-mypy ; extra == 'test' -Requires-Dist: pytest-enabler >=2.2 ; extra == 'test' -Requires-Dist: pytest-ruff >=0.2.1 ; extra == 'test' -Requires-Dist: jaraco.itertools ; extra == 'test' -Requires-Dist: jaraco.functools ; extra == 'test' -Requires-Dist: more-itertools ; extra == 'test' -Requires-Dist: big-O ; extra == 'test' -Requires-Dist: pytest-ignore-flaky ; extra == 'test' -Requires-Dist: jaraco.test ; extra == 'test' -Requires-Dist: importlib-resources ; (python_version < "3.9") and extra == 'test' +Requires-Dist: pytest!=8.1.*,>=6; extra == "test" +Requires-Dist: jaraco.itertools; extra == "test" +Requires-Dist: jaraco.functools; extra == "test" +Requires-Dist: more_itertools; extra == "test" +Requires-Dist: big-O; extra == "test" +Requires-Dist: pytest-ignore-flaky; extra == "test" +Requires-Dist: jaraco.test; extra == "test" +Provides-Extra: doc +Requires-Dist: sphinx>=3.5; extra == "doc" +Requires-Dist: jaraco.packaging>=9.3; extra == "doc" +Requires-Dist: rst.linker>=1.9; extra == "doc" +Requires-Dist: furo; extra == "doc" +Requires-Dist: sphinx-lint; extra == "doc" +Requires-Dist: jaraco.tidelift>=1.4; extra == "doc" +Provides-Extra: check +Requires-Dist: pytest-checkdocs>=2.4; extra == "check" +Requires-Dist: pytest-ruff>=0.2.1; sys_platform != "cygwin" and extra == "check" +Provides-Extra: cover +Requires-Dist: pytest-cov; extra == "cover" +Provides-Extra: enabler +Requires-Dist: pytest-enabler>=2.2; extra == "enabler" +Provides-Extra: type +Requires-Dist: pytest-mypy; extra == "type" +Dynamic: license-file .. image:: https://img.shields.io/pypi/v/zipp.svg :target: https://pypi.org/project/zipp @@ -43,14 +47,14 @@ Requires-Dist: importlib-resources ; (python_version < "3.9") and extra == 'test :target: https://github.com/jaraco/zipp/actions?query=workflow%3A%22tests%22 :alt: tests -.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json +.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json :target: https://github.com/astral-sh/ruff :alt: Ruff -.. .. image:: https://readthedocs.org/projects/PROJECT_RTD/badge/?version=latest -.. :target: https://PROJECT_RTD.readthedocs.io/en/latest/?badge=latest +.. image:: https://readthedocs.org/projects/zipp/badge/?version=latest +.. :target: https://zipp.readthedocs.io/en/latest/?badge=latest -.. image:: https://img.shields.io/badge/skeleton-2024-informational +.. image:: https://img.shields.io/badge/skeleton-2025-informational :target: https://blog.jaraco.com/skeleton .. image:: https://tidelift.com/badges/package/pypi/zipp diff --git a/setuptools/_vendor/zipp-3.23.0.dist-info/RECORD b/setuptools/_vendor/zipp-3.23.0.dist-info/RECORD new file mode 100644 index 0000000000..167e234a53 --- /dev/null +++ b/setuptools/_vendor/zipp-3.23.0.dist-info/RECORD @@ -0,0 +1,14 @@ +zipp-3.23.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +zipp-3.23.0.dist-info/METADATA,sha256=vdZ9TRbPC_O4k-fRjNPS13StuC837Zhbx3cMYHIms1s,3563 +zipp-3.23.0.dist-info/RECORD,, +zipp-3.23.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +zipp-3.23.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91 +zipp-3.23.0.dist-info/licenses/LICENSE,sha256=WlfLTbheKi3YjCkGKJCK3VfjRRRJ4KmnH9-zh3b9dZ0,1076 +zipp-3.23.0.dist-info/top_level.txt,sha256=iAbdoSHfaGqBfVb2XuR9JqSQHCoOsOtG6y9C_LSpqFw,5 +zipp/__init__.py,sha256=ieXh9GIMdABjKRX_JUJtP9k5wdBLK4Mt5X4nszSkmYE,11976 +zipp/_functools.py,sha256=f6Kt9LxZ4TE-cY1lJVdXSId3memSXmH9IdgMbU-_x2k,575 +zipp/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +zipp/compat/overlay.py,sha256=oEIGAnbr8yGjuKTrVSO2ByewPui71uppbX18BLnYTKE,783 +zipp/compat/py310.py,sha256=S7i6N9mToEn3asNb2ILyjnzvITOXrATD_J4emjyBbDU,256 +zipp/compat/py313.py,sha256=RndvDNtuY7H2D9ecnnzcPBMZ8mZc42gmXD_IwQAXXAE,654 +zipp/glob.py,sha256=DLV9LBsDxA6YVW82e3-tkoNrus1h4R-j3BR6VqS0AzE,3382 diff --git a/setuptools/_vendor/typeguard/py.typed b/setuptools/_vendor/zipp-3.23.0.dist-info/REQUESTED similarity index 100% rename from setuptools/_vendor/typeguard/py.typed rename to setuptools/_vendor/zipp-3.23.0.dist-info/REQUESTED diff --git a/setuptools/_vendor/zipp-3.23.0.dist-info/WHEEL b/setuptools/_vendor/zipp-3.23.0.dist-info/WHEEL new file mode 100644 index 0000000000..e7fa31b6f3 --- /dev/null +++ b/setuptools/_vendor/zipp-3.23.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/setuptools/_vendor/zipp-3.23.0.dist-info/licenses/LICENSE b/setuptools/_vendor/zipp-3.23.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000..f60bd57201 --- /dev/null +++ b/setuptools/_vendor/zipp-3.23.0.dist-info/licenses/LICENSE @@ -0,0 +1,18 @@ +MIT License + +Copyright (c) 2025 + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO +EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/setuptools/_vendor/zipp-3.19.2.dist-info/top_level.txt b/setuptools/_vendor/zipp-3.23.0.dist-info/top_level.txt similarity index 100% rename from setuptools/_vendor/zipp-3.19.2.dist-info/top_level.txt rename to setuptools/_vendor/zipp-3.23.0.dist-info/top_level.txt diff --git a/setuptools/_vendor/zipp/__init__.py b/setuptools/_vendor/zipp/__init__.py index d65297b835..ed5b214632 100644 --- a/setuptools/_vendor/zipp/__init__.py +++ b/setuptools/_vendor/zipp/__init__.py @@ -1,17 +1,26 @@ +""" +A Path-like interface for zipfiles. + +This codebase is shared between zipfile.Path in the stdlib +and zipp in PyPI. See +https://github.com/python/importlib_metadata/wiki/Development-Methodology +for more detail. +""" + +import functools import io -import posixpath -import zipfile import itertools -import contextlib import pathlib +import posixpath import re import stat import sys +import zipfile +from ._functools import save_method_args from .compat.py310 import text_encoding from .glob import Translator - __all__ = ['Path'] @@ -37,7 +46,7 @@ def _parents(path): def _ancestry(path): """ Given a path with elements separated by - posixpath.sep, generate all elements of that path + posixpath.sep, generate all elements of that path. >>> list(_ancestry('b/d')) ['b/d', 'b'] @@ -49,9 +58,14 @@ def _ancestry(path): ['b'] >>> list(_ancestry('')) [] + + Multiple separators are treated like a single. + + >>> list(_ancestry('//b//d///f//')) + ['//b//d///f', '//b//d', '//b'] """ path = path.rstrip(posixpath.sep) - while path and path != posixpath.sep: + while path.rstrip(posixpath.sep): yield path path, tail = posixpath.split(path) @@ -73,82 +87,19 @@ class InitializedState: Mix-in to save the initialization state for pickling. """ + @save_method_args def __init__(self, *args, **kwargs): - self.__args = args - self.__kwargs = kwargs super().__init__(*args, **kwargs) def __getstate__(self): - return self.__args, self.__kwargs + return self._saved___init__.args, self._saved___init__.kwargs def __setstate__(self, state): args, kwargs = state super().__init__(*args, **kwargs) -class SanitizedNames: - """ - ZipFile mix-in to ensure names are sanitized. - """ - - def namelist(self): - return list(map(self._sanitize, super().namelist())) - - @staticmethod - def _sanitize(name): - r""" - Ensure a relative path with posix separators and no dot names. - - Modeled after - https://github.com/python/cpython/blob/bcc1be39cb1d04ad9fc0bd1b9193d3972835a57c/Lib/zipfile/__init__.py#L1799-L1813 - but provides consistent cross-platform behavior. - - >>> san = SanitizedNames._sanitize - >>> san('/foo/bar') - 'foo/bar' - >>> san('//foo.txt') - 'foo.txt' - >>> san('foo/.././bar.txt') - 'foo/bar.txt' - >>> san('foo../.bar.txt') - 'foo../.bar.txt' - >>> san('\\foo\\bar.txt') - 'foo/bar.txt' - >>> san('D:\\foo.txt') - 'D/foo.txt' - >>> san('\\\\server\\share\\file.txt') - 'server/share/file.txt' - >>> san('\\\\?\\GLOBALROOT\\Volume3') - '?/GLOBALROOT/Volume3' - >>> san('\\\\.\\PhysicalDrive1\\root') - 'PhysicalDrive1/root' - - Retain any trailing slash. - >>> san('abc/') - 'abc/' - - Raises a ValueError if the result is empty. - >>> san('../..') - Traceback (most recent call last): - ... - ValueError: Empty filename - """ - - def allowed(part): - return part and part not in {'..', '.'} - - # Remove the drive letter. - # Don't use ntpath.splitdrive, because that also strips UNC paths - bare = re.sub('^([A-Z]):', r'\1', name, flags=re.IGNORECASE) - clean = bare.replace('\\', '/') - parts = clean.split('/') - joined = '/'.join(filter(allowed, parts)) - if not joined: - raise ValueError("Empty filename") - return joined + '/' * name.endswith('/') - - -class CompleteDirs(InitializedState, SanitizedNames, zipfile.ZipFile): +class CompleteDirs(InitializedState, zipfile.ZipFile): """ A ZipFile subclass that ensures that implied directories are always included in the namelist. @@ -230,22 +181,27 @@ class FastLookup(CompleteDirs): """ def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super().namelist() - return self.__names + return self._namelist + + @functools.cached_property + def _namelist(self): + return super().namelist() def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super()._name_set() - return self.__lookup + return self._name_set_prop + + @functools.cached_property + def _name_set_prop(self): + return super()._name_set() def _extract_text_encoding(encoding=None, *args, **kwargs): # compute stack level so that the caller of the caller sees any warning. is_pypy = sys.implementation.name == 'pypy' - stack_level = 3 + is_pypy + # PyPy no longer special cased after 7.3.19 (or maybe 7.3.18) + # See jaraco/zipp#143 + is_old_pypi = is_pypy and sys.pypy_version_info < (7, 3, 19) + stack_level = 3 + is_old_pypi return text_encoding(encoding, stack_level), args, kwargs @@ -329,7 +285,7 @@ class Path: >>> str(path.parent) 'mem' - If the zipfile has no filename, such attributes are not + If the zipfile has no filename, such attributes are not valid and accessing them will raise an Exception. >>> zf.filename = None @@ -388,7 +344,7 @@ def open(self, mode='r', *args, pwd=None, **kwargs): if self.is_dir(): raise IsADirectoryError(self) zip_mode = mode[0] - if not self.exists() and zip_mode == 'r': + if zip_mode == 'r' and not self.exists(): raise FileNotFoundError(self) stream = self.root.open(self.at, zip_mode, pwd=pwd) if 'b' in mode: @@ -400,7 +356,7 @@ def open(self, mode='r', *args, pwd=None, **kwargs): return io.TextIOWrapper(stream, encoding, *args, **kwargs) def _base(self): - return pathlib.PurePosixPath(self.at or self.root.filename) + return pathlib.PurePosixPath(self.at) if self.at else self.filename @property def name(self): @@ -470,8 +426,7 @@ def glob(self, pattern): prefix = re.escape(self.at) tr = Translator(seps='/') matches = re.compile(prefix + tr.translate(pattern)).fullmatch - names = (data.filename for data in self.root.filelist) - return map(self._next, filter(matches, names)) + return map(self._next, filter(matches, self.root.namelist())) def rglob(self, pattern): return self.glob(f'**/{pattern}') diff --git a/setuptools/_vendor/zipp/_functools.py b/setuptools/_vendor/zipp/_functools.py new file mode 100644 index 0000000000..7390be2187 --- /dev/null +++ b/setuptools/_vendor/zipp/_functools.py @@ -0,0 +1,20 @@ +import collections +import functools + + +# from jaraco.functools 4.0.2 +def save_method_args(method): + """ + Wrap a method such that when it is called, the args and kwargs are + saved on the method. + """ + args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') # noqa: PYI024 + + @functools.wraps(method) + def wrapper(self, /, *args, **kwargs): + attr_name = '_saved_' + method.__name__ + attr = args_and_kwargs(args, kwargs) + setattr(self, attr_name, attr) + return method(self, *args, **kwargs) + + return wrapper diff --git a/setuptools/_vendor/zipp/compat/overlay.py b/setuptools/_vendor/zipp/compat/overlay.py new file mode 100644 index 0000000000..5a97ee7cd8 --- /dev/null +++ b/setuptools/_vendor/zipp/compat/overlay.py @@ -0,0 +1,37 @@ +""" +Expose zipp.Path as .zipfile.Path. + +Includes everything else in ``zipfile`` to match future usage. Just +use: + +>>> from zipp.compat.overlay import zipfile + +in place of ``import zipfile``. + +Relative imports are supported too. + +>>> from zipp.compat.overlay.zipfile import ZipInfo + +The ``zipfile`` object added to ``sys.modules`` needs to be +hashable (#126). + +>>> _ = hash(sys.modules['zipp.compat.overlay.zipfile']) +""" + +import importlib +import sys +import types + +import zipp + + +class HashableNamespace(types.SimpleNamespace): + def __hash__(self): + return hash(tuple(vars(self))) + + +zipfile = HashableNamespace(**vars(importlib.import_module('zipfile'))) +zipfile.Path = zipp.Path +zipfile._path = zipp + +sys.modules[__name__ + '.zipfile'] = zipfile # type: ignore[assignment] diff --git a/setuptools/_vendor/zipp/compat/py310.py b/setuptools/_vendor/zipp/compat/py310.py index d5ca53e037..e1e7ec2290 100644 --- a/setuptools/_vendor/zipp/compat/py310.py +++ b/setuptools/_vendor/zipp/compat/py310.py @@ -1,5 +1,5 @@ -import sys import io +import sys def _text_encoding(encoding, stacklevel=2, /): # pragma: no cover @@ -7,5 +7,7 @@ def _text_encoding(encoding, stacklevel=2, /): # pragma: no cover text_encoding = ( - io.text_encoding if sys.version_info > (3, 10) else _text_encoding # type: ignore + io.text_encoding # type: ignore[unused-ignore, attr-defined] + if sys.version_info > (3, 10) + else _text_encoding ) diff --git a/setuptools/_vendor/zipp/compat/py313.py b/setuptools/_vendor/zipp/compat/py313.py new file mode 100644 index 0000000000..ae45869055 --- /dev/null +++ b/setuptools/_vendor/zipp/compat/py313.py @@ -0,0 +1,34 @@ +import functools +import sys + + +# from jaraco.functools 4.1 +def identity(x): + return x + + +# from jaraco.functools 4.1 +def apply(transform): + def wrap(func): + return functools.wraps(func)(compose(transform, func)) + + return wrap + + +# from jaraco.functools 4.1 +def compose(*funcs): + def compose_two(f1, f2): + return lambda *args, **kwargs: f1(f2(*args, **kwargs)) + + return functools.reduce(compose_two, funcs) + + +def replace(pattern): + r""" + >>> replace(r'foo\z') + 'foo\\Z' + """ + return pattern[:-2] + pattern[-2:].replace(r'\z', r'\Z') + + +legacy_end_marker = apply(replace) if sys.version_info < (3, 14) else identity diff --git a/setuptools/_vendor/zipp/glob.py b/setuptools/_vendor/zipp/glob.py index 69c41d77c3..1b4ffb3318 100644 --- a/setuptools/_vendor/zipp/glob.py +++ b/setuptools/_vendor/zipp/glob.py @@ -1,6 +1,7 @@ import os import re +from .compat.py313 import legacy_end_marker _default_seps = os.sep + str(os.altsep) * bool(os.altsep) @@ -28,8 +29,9 @@ def translate(self, pattern): """ Given a glob pattern, produce a regex that matches it. """ - return self.extend(self.translate_core(pattern)) + return self.extend(self.match_dirs(self.translate_core(pattern))) + @legacy_end_marker def extend(self, pattern): r""" Extend regex for pattern-wide concerns. @@ -37,9 +39,17 @@ def extend(self, pattern): Apply '(?s:)' to create a non-matching group that matches newlines (valid on Unix). - Append '\Z' to imply fullmatch even when match is used. + Append '\z' to imply fullmatch even when match is used. """ - return rf'(?s:{pattern})\Z' + return rf'(?s:{pattern})\z' + + def match_dirs(self, pattern): + """ + Ensure that zipfile.Path directory names are matched. + + zipfile.Path directory names always end in a slash. + """ + return rf'{pattern}[/]?' def translate_core(self, pattern): r"""